Page MenuHomeSoftware Heritage

No OneTemporary

diff --git a/swh/search/elasticsearch.py b/swh/search/elasticsearch.py
index 5000a09..d80f203 100644
--- a/swh/search/elasticsearch.py
+++ b/swh/search/elasticsearch.py
@@ -1,327 +1,365 @@
# Copyright (C) 2019-2021 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import base64
from textwrap import dedent
from typing import Any, Dict, Iterable, Iterator, List, Optional
from elasticsearch import Elasticsearch, helpers
import msgpack
from swh.indexer import codemeta
from swh.model import model
from swh.model.identifiers import origin_identifier
from swh.search.interface import MinimalOriginDict, OriginDict, PagedResult
from swh.search.metrics import send_metric, timed
INDEX_NAME_PARAM = "index"
READ_ALIAS_PARAM = "read_alias"
WRITE_ALIAS_PARAM = "write_alias"
ORIGIN_DEFAULT_CONFIG = {
INDEX_NAME_PARAM: "origin",
READ_ALIAS_PARAM: "origin-read",
WRITE_ALIAS_PARAM: "origin-write",
}
def _sanitize_origin(origin):
origin = origin.copy()
# Whitelist fields to be saved in Elasticsearch
res = {"url": origin.pop("url")}
for field_name in (
"blocklisted",
"has_visits",
"intrinsic_metadata",
"visit_types",
+ "nb_visits",
+ "last_visit_date",
):
if field_name in origin:
res[field_name] = origin.pop(field_name)
# Run the JSON-LD expansion algorithm
# <https://www.w3.org/TR/json-ld-api/#expansion>
# to normalize the Codemeta metadata.
# This is required as Elasticsearch will needs each field to have a consistent
# type across documents to be searchable; and non-expanded JSON-LD documents
# can have various types in the same field. For example, all these are
# equivalent in JSON-LD:
# * {"author": "Jane Doe"}
# * {"author": ["Jane Doe"]}
# * {"author": {"@value": "Jane Doe"}}
# * {"author": [{"@value": "Jane Doe"}]}
# and JSON-LD expansion will convert them all to the last one.
if "intrinsic_metadata" in res:
res["intrinsic_metadata"] = codemeta.expand(res["intrinsic_metadata"])
return res
def token_encode(index_to_tokenize: Dict[bytes, Any]) -> str:
"""Tokenize as string an index page result from a search
"""
page_token = base64.b64encode(msgpack.dumps(index_to_tokenize))
return page_token.decode()
def token_decode(page_token: str) -> Dict[bytes, Any]:
"""Read the page_token
"""
return msgpack.loads(base64.b64decode(page_token.encode()), raw=True)
class ElasticSearch:
def __init__(self, hosts: List[str], indexes: Dict[str, Dict[str, str]] = {}):
self._backend = Elasticsearch(hosts=hosts)
# Merge current configuration with default values
origin_config = indexes.get("origin", {})
self.origin_config = {**ORIGIN_DEFAULT_CONFIG, **origin_config}
def _get_origin_index(self) -> str:
return self.origin_config[INDEX_NAME_PARAM]
def _get_origin_read_alias(self) -> str:
return self.origin_config[READ_ALIAS_PARAM]
def _get_origin_write_alias(self) -> str:
return self.origin_config[WRITE_ALIAS_PARAM]
@timed
def check(self):
return self._backend.ping()
def deinitialize(self) -> None:
"""Removes all indices from the Elasticsearch backend"""
self._backend.indices.delete(index="*")
def initialize(self) -> None:
"""Declare Elasticsearch indices, aliases and mappings"""
if not self._backend.indices.exists(index=self._get_origin_index()):
self._backend.indices.create(index=self._get_origin_index())
if not self._backend.indices.exists_alias(self._get_origin_read_alias()):
self._backend.indices.put_alias(
index=self._get_origin_index(), name=self._get_origin_read_alias()
)
if not self._backend.indices.exists_alias(self._get_origin_write_alias()):
self._backend.indices.put_alias(
index=self._get_origin_index(), name=self._get_origin_write_alias()
)
self._backend.indices.put_mapping(
index=self._get_origin_index(),
body={
"date_detection": False,
"properties": {
# sha1 of the URL; used as the document id
"sha1": {"type": "keyword", "doc_values": True,},
# Used both to search URLs, and as the result to return
# as a response to queries
"url": {
"type": "text",
# To split URLs into token on any character
# that is not alphanumerical
"analyzer": "simple",
# 2-gram and partial-3-gram search (ie. with the end of the
# third word potentially missing)
"fields": {
"as_you_type": {
"type": "search_as_you_type",
"analyzer": "simple",
}
},
},
"visit_types": {"type": "keyword"},
# used to filter out origins that were never visited
"has_visits": {"type": "boolean",},
+ "nb_visits": {"type": "integer"},
+ "last_visit_date": {"type": "date"},
"intrinsic_metadata": {
"type": "nested",
"properties": {
"@context": {
# don't bother indexing tokens in these URIs, as the
# are used as namespaces
"type": "keyword",
}
},
},
# Has this origin been taken down?
"blocklisted": {"type": "boolean",},
},
},
)
@timed
def flush(self) -> None:
self._backend.indices.refresh(index=self._get_origin_write_alias())
@timed
def origin_update(self, documents: Iterable[OriginDict]) -> None:
write_index = self._get_origin_write_alias()
documents = map(_sanitize_origin, documents)
documents_with_sha1 = (
(origin_identifier(document), document) for document in documents
)
# painless script that will be executed when updating an origin document
update_script = dedent(
"""
- // backup current visit_types field value
- List visit_types = ctx._source.getOrDefault("visit_types", []);
+ // backup current visit_types field value
+ List visit_types = ctx._source.getOrDefault("visit_types", []);
+ int nb_visits = ctx._source.getOrDefault("nb_visits", 0);
+ ZonedDateTime last_visit_date = ZonedDateTime.parse(ctx._source.getOrDefault("last_visit_date", "0001-01-01T00:00:00Z"));
+
+ // update origin document with new field values
+ ctx._source.putAll(params);
+
+ // restore previous visit types after visit_types field overriding
+ if (ctx._source.containsKey("visit_types")) {
+ for (int i = 0; i < visit_types.length; ++i) {
+ if (!ctx._source.visit_types.contains(visit_types[i])) {
+ ctx._source.visit_types.add(visit_types[i]);
+ }
+ }
+ }
- // update origin document with new field values
- ctx._source.putAll(params);
+ // Undo overwrite if incoming nb_visits is smaller
+ if (ctx._source.containsKey("nb_visits")) {
+ int incoming_nb_visits = ctx._source.getOrDefault("nb_visits", 0);
+ if(incoming_nb_visits < nb_visits){
+ ctx._source.nb_visits = nb_visits;
+ }
+ }
- // restore previous visit types after visit_types field overriding
- if (ctx._source.containsKey("visit_types")) {
- for (int i = 0; i < visit_types.length; ++i) {
- if (!ctx._source.visit_types.contains(visit_types[i])) {
- ctx._source.visit_types.add(visit_types[i]);
- }
- }
+ // Undo overwrite if incoming last_visit_date is older
+ if (ctx._source.containsKey("last_visit_date")) {
+ ZonedDateTime incoming_last_visit_date = ZonedDateTime.parse(ctx._source.getOrDefault("last_visit_date", "0001-01-01T00:00:00Z"));
+ int difference = incoming_last_visit_date.compareTo(last_visit_date); // returns -1, 0 or 1
+ if(difference < 0){
+ ctx._source.last_visit_date = last_visit_date;
}
- """
+ }
+ """ # noqa
)
actions = [
{
"_op_type": "update",
"_id": sha1,
"_index": write_index,
"scripted_upsert": True,
"upsert": {**document, "sha1": sha1,},
"script": {
"source": update_script,
"lang": "painless",
"params": document,
},
}
for (sha1, document) in documents_with_sha1
]
indexed_count, errors = helpers.bulk(self._backend, actions, index=write_index)
assert isinstance(errors, List) # Make mypy happy
send_metric("document:index", count=indexed_count, method_name="origin_update")
send_metric(
"document:index_error", count=len(errors), method_name="origin_update"
)
def origin_dump(self) -> Iterator[model.Origin]:
results = helpers.scan(self._backend, index=self._get_origin_read_alias())
for hit in results:
yield self._backend.termvectors(
index=self._get_origin_read_alias(), id=hit["_id"], fields=["*"]
)
@timed
def origin_search(
self,
*,
url_pattern: Optional[str] = None,
metadata_pattern: Optional[str] = None,
with_visit: bool = False,
visit_types: Optional[List[str]] = None,
+ min_nb_visits: int = 0,
+ min_last_visit_date: str = "",
page_token: Optional[str] = None,
limit: int = 50,
) -> PagedResult[MinimalOriginDict]:
query_clauses: List[Dict[str, Any]] = []
if url_pattern:
query_clauses.append(
{
"multi_match": {
"query": url_pattern,
"type": "bool_prefix",
"operator": "and",
"fields": [
"url.as_you_type",
"url.as_you_type._2gram",
"url.as_you_type._3gram",
],
}
}
)
if metadata_pattern:
query_clauses.append(
{
"nested": {
"path": "intrinsic_metadata",
"query": {
"multi_match": {
"query": metadata_pattern,
# Makes it so that the "foo bar" query returns
# documents which contain "foo" in a field and "bar"
# in a different field
"type": "cross_fields",
# All keywords must be found in a document for it to
# be considered a match.
# TODO: allow missing keywords?
"operator": "and",
# Searches on all fields of the intrinsic_metadata dict,
# recursively.
"fields": ["intrinsic_metadata.*"],
}
},
}
}
)
if not query_clauses:
raise ValueError(
"At least one of url_pattern and metadata_pattern must be provided."
)
if with_visit:
query_clauses.append({"term": {"has_visits": True,}})
+ if min_nb_visits:
+ query_clauses.append({"range": {"nb_visits": {"gte": min_nb_visits,},}})
+ if min_last_visit_date:
+ query_clauses.append(
+ {
+ "range": {
+ "last_visit_date": {
+ "gte": min_last_visit_date.replace("Z", "+00:00"),
+ }
+ }
+ }
+ )
if visit_types is not None:
query_clauses.append({"terms": {"visit_types": visit_types}})
body = {
"query": {
"bool": {
"must": query_clauses,
"must_not": [{"term": {"blocklisted": True}}],
}
},
"sort": [{"_score": "desc"}, {"sha1": "asc"},],
}
+
if page_token:
# TODO: use ElasticSearch's scroll API?
page_token_content = token_decode(page_token)
body["search_after"] = [
page_token_content[b"score"],
page_token_content[b"sha1"].decode("ascii"),
]
res = self._backend.search(
index=self._get_origin_read_alias(), body=body, size=limit
)
hits = res["hits"]["hits"]
next_page_token: Optional[str] = None
if len(hits) == limit:
# There are more results after this page; return a pagination token
# to get them in a future query
last_hit = hits[-1]
next_page_token_content = {
b"score": last_hit["_score"],
b"sha1": last_hit["_source"]["sha1"],
}
next_page_token = token_encode(next_page_token_content)
assert len(hits) <= limit
return PagedResult(
results=[{"url": hit["_source"]["url"]} for hit in hits],
next_page_token=next_page_token,
)
diff --git a/swh/search/in_memory.py b/swh/search/in_memory.py
index 58f498a..828e147 100644
--- a/swh/search/in_memory.py
+++ b/swh/search/in_memory.py
@@ -1,152 +1,176 @@
# Copyright (C) 2019-2021 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
from collections import defaultdict
+from datetime import datetime
import itertools
import re
from typing import Any, Dict, Iterable, Iterator, List, Optional
from swh.model.identifiers import origin_identifier
from swh.search.interface import MinimalOriginDict, OriginDict, PagedResult
_words_regexp = re.compile(r"\w+")
def _dict_words_set(d):
"""Recursively extract set of words from dict content."""
values = set()
def extract(obj, words):
if isinstance(obj, dict):
for k, v in obj.items():
extract(v, words)
elif isinstance(obj, list):
for item in obj:
extract(item, words)
else:
words.update(_words_regexp.findall(str(obj).lower()))
return words
return extract(d, values)
class InMemorySearch:
def __init__(self):
pass
def check(self):
return True
def deinitialize(self) -> None:
if hasattr(self, "_origins"):
del self._origins
del self._origin_ids
def initialize(self) -> None:
self._origins: Dict[str, Dict[str, Any]] = defaultdict(dict)
self._origin_ids: List[str] = []
def flush(self) -> None:
pass
_url_splitter = re.compile(r"\W")
def origin_update(self, documents: Iterable[OriginDict]) -> None:
for source_document in documents:
document: Dict[str, Any] = dict(source_document)
id_ = origin_identifier(document)
if "url" in document:
document["_url_tokens"] = set(
self._url_splitter.split(source_document["url"])
)
if "visit_types" in document:
document["visit_types"] = set(source_document["visit_types"])
if "visit_types" in self._origins[id_]:
document["visit_types"].update(self._origins[id_]["visit_types"])
+ if "nb_visits" in document:
+ document["nb_visits"] = max(
+ document["nb_visits"], self._origins[id_].get("nb_visits", 0)
+ )
+ if "last_visit_date" in document:
+ document["last_visit_date"] = max(
+ datetime.fromisoformat(document["last_visit_date"]),
+ datetime.fromisoformat(
+ self._origins[id_]
+ .get("last_visit_date", "0001-01-01T00:00:00.000000Z",)
+ .replace("Z", "+00:00")
+ ),
+ ).isoformat()
self._origins[id_].update(document)
if id_ not in self._origin_ids:
self._origin_ids.append(id_)
def origin_search(
self,
*,
url_pattern: Optional[str] = None,
metadata_pattern: Optional[str] = None,
with_visit: bool = False,
visit_types: Optional[List[str]] = None,
page_token: Optional[str] = None,
+ min_nb_visits: int = 0,
+ min_last_visit_date: str = "",
limit: int = 50,
) -> PagedResult[MinimalOriginDict]:
hits: Iterator[Dict[str, Any]] = (
self._origins[id_]
for id_ in self._origin_ids
if not self._origins[id_].get("blocklisted")
)
if url_pattern:
tokens = set(self._url_splitter.split(url_pattern))
def predicate(match):
missing_tokens = tokens - match["_url_tokens"]
if len(missing_tokens) == 0:
return True
elif len(missing_tokens) > 1:
return False
else:
# There is one missing token, look up by prefix.
(missing_token,) = missing_tokens
return any(
token.startswith(missing_token)
for token in match["_url_tokens"]
)
hits = filter(predicate, hits)
if metadata_pattern:
metadata_pattern_words = set(
_words_regexp.findall(metadata_pattern.lower())
)
def predicate(match):
if "intrinsic_metadata" not in match:
return False
return metadata_pattern_words.issubset(
_dict_words_set(match["intrinsic_metadata"])
)
hits = filter(predicate, hits)
if not url_pattern and not metadata_pattern:
raise ValueError(
"At least one of url_pattern and metadata_pattern must be provided."
)
next_page_token: Optional[str] = None
if with_visit:
hits = filter(lambda o: o.get("has_visits"), hits)
+ if min_nb_visits:
+ hits = filter(lambda o: o.get("nb_visits", 0) >= min_nb_visits, hits)
+ if min_last_visit_date:
+ hits = filter(
+ lambda o: datetime.fromisoformat(o.get("last_visit_date", ""))
+ >= datetime.fromisoformat(min_last_visit_date),
+ hits,
+ )
if visit_types is not None:
visit_types_set = set(visit_types)
hits = filter(
lambda o: visit_types_set.intersection(o.get("visit_types", set())),
hits,
)
start_at_index = int(page_token) if page_token else 0
origins = [
{"url": hit["url"]}
for hit in itertools.islice(hits, start_at_index, start_at_index + limit)
]
if len(origins) == limit:
next_page_token = str(start_at_index + limit)
assert len(origins) <= limit
return PagedResult(results=origins, next_page_token=next_page_token,)
diff --git a/swh/search/interface.py b/swh/search/interface.py
index a37b47a..95e36cb 100644
--- a/swh/search/interface.py
+++ b/swh/search/interface.py
@@ -1,80 +1,86 @@
# Copyright (C) 2020-2021 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
from typing import Iterable, List, Optional, TypeVar
from typing_extensions import TypedDict
from swh.core.api import remote_api_endpoint
from swh.core.api.classes import PagedResult as CorePagedResult
TResult = TypeVar("TResult")
PagedResult = CorePagedResult[TResult, str]
class MinimalOriginDict(TypedDict):
"""Mandatory keys of an :class:`OriginDict`"""
url: str
class OriginDict(MinimalOriginDict, total=False):
"""Argument passed to :meth:`SearchInterface.origin_update`."""
visit_types: List[str]
has_visits: bool
class SearchInterface:
@remote_api_endpoint("check")
def check(self):
"""Dedicated method to execute some specific check per implementation.
"""
...
@remote_api_endpoint("flush")
def flush(self) -> None:
"""Blocks until all previous calls to _update() are completely
applied.
"""
...
@remote_api_endpoint("origin/update")
def origin_update(self, documents: Iterable[OriginDict]) -> None:
"""Persist documents to the search backend.
"""
...
@remote_api_endpoint("origin/search")
def origin_search(
self,
*,
url_pattern: Optional[str] = None,
metadata_pattern: Optional[str] = None,
with_visit: bool = False,
visit_types: Optional[List[str]] = None,
page_token: Optional[str] = None,
+ min_nb_visits: int = 0,
+ min_last_visit_date: str = "",
limit: int = 50,
) -> PagedResult[MinimalOriginDict]:
"""Searches for origins matching the `url_pattern`.
Args:
url_pattern: Part of the URL to search for
with_visit: Whether origins with no visit are to be
filtered out
visit_types: Only origins having any of the provided visit types
(e.g. git, svn, pypi) will be returned
page_token: Opaque value used for pagination
+ min_nb_visits: Filter origins that have number of visits >=
+ the provided value
+ min_last_visit_date: Filter origins that have
+ last_visit_date on or after the provided date(ISO format)
limit: number of results to return
Returns:
PagedResult of origin dicts matching the search criteria. If next_page_token
is None, there is no longer data to retrieve.
"""
...
diff --git a/swh/search/journal_client.py b/swh/search/journal_client.py
index 27b01b6..6714d81 100644
--- a/swh/search/journal_client.py
+++ b/swh/search/journal_client.py
@@ -1,79 +1,84 @@
# Copyright (C) 2018-2021 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import logging
EXPECTED_MESSAGE_TYPES = {
"origin",
"origin_visit",
"origin_visit_status",
"origin_intrinsic_metadata",
}
def process_journal_objects(messages, *, search):
"""Worker function for `JournalClient.process(worker_fn)`, after
currification of `scheduler` and `task_names`."""
assert set(messages) <= EXPECTED_MESSAGE_TYPES, set(messages)
if "origin" in messages:
process_origins(messages["origin"], search)
if "origin_visit" in messages:
process_origin_visits(messages["origin_visit"], search)
if "origin_visit_status" in messages:
process_origin_visit_statuses(messages["origin_visit_status"], search)
if "origin_intrinsic_metadata" in messages:
process_origin_intrinsic_metadata(messages["origin_intrinsic_metadata"], search)
def process_origins(origins, search):
logging.debug("processing origins %r", origins)
search.origin_update(origins)
def process_origin_visits(visits, search):
logging.debug("processing origin visits %r", visits)
search.origin_update(
[
{
"url": (
visit["origin"]
if isinstance(visit["origin"], str)
else visit["origin"]["url"]
),
"visit_types": [visit["type"]],
}
for visit in visits
]
)
def process_origin_visit_statuses(visit_statuses, search):
logging.debug("processing origin visit statuses %r", visit_statuses)
full_visit_status = [
- {"url": (visit_status["origin"]), "has_visits": True,}
+ {
+ "url": (visit_status["origin"]),
+ "has_visits": True,
+ "nb_visits": visit_status["visit"],
+ "last_visit_date": visit_status["date"].isoformat(),
+ }
for visit_status in visit_statuses
if visit_status["status"] == "full"
]
if full_visit_status:
search.origin_update(full_visit_status)
def process_origin_intrinsic_metadata(origin_metadata, search):
logging.debug("processing origin intrinsic_metadata %r", origin_metadata)
origin_metadata = [
{"url": item["id"], "intrinsic_metadata": item["metadata"],}
for item in origin_metadata
]
search.origin_update(origin_metadata)
diff --git a/swh/search/tests/test_cli.py b/swh/search/tests/test_cli.py
index 1ef5787..51124a3 100644
--- a/swh/search/tests/test_cli.py
+++ b/swh/search/tests/test_cli.py
@@ -1,440 +1,442 @@
# Copyright (C) 2019-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import copy
+from datetime import datetime, timezone
import tempfile
from click.testing import CliRunner
from confluent_kafka import Producer
import pytest
import yaml
from swh.journal.serializers import value_to_kafka
from swh.model.hashutil import hash_to_bytes
from swh.search import get_search
from swh.search.cli import search_cli_group
CLI_CONFIG = """
search:
cls: elasticsearch
hosts:
- '%(elasticsearch_host)s'
indexes:
origin:
index: test
read_alias: test-read
write_alias: test-write
"""
JOURNAL_OBJECTS_CONFIG_TEMPLATE = """
journal:
brokers:
- {broker}
prefix: {prefix}
group_id: {group_id}
"""
def invoke(catch_exceptions, args, config="", *, elasticsearch_host):
runner = CliRunner()
with tempfile.NamedTemporaryFile("a", suffix=".yml") as config_fd:
config_fd.write(
(CLI_CONFIG + config) % {"elasticsearch_host": elasticsearch_host}
)
config_fd.seek(0)
result = runner.invoke(search_cli_group, ["-C" + config_fd.name] + args)
if not catch_exceptions and result.exception:
print(result.output)
raise result.exception
return result
def test__journal_client__origin(
swh_search, elasticsearch_host: str, kafka_prefix: str, kafka_server
):
"""Tests the re-indexing when origin_batch_size*task_batch_size is a
divisor of nb_origins."""
producer = Producer(
{
"bootstrap.servers": kafka_server,
"client.id": "test search origin producer",
"acks": "all",
}
)
origin_foobar_baz = {
"url": "http://foobar.baz",
}
value = value_to_kafka(origin_foobar_baz)
topic = f"{kafka_prefix}.origin"
producer.produce(topic=topic, key=b"bogus-origin", value=value)
journal_objects_config = JOURNAL_OBJECTS_CONFIG_TEMPLATE.format(
broker=kafka_server, prefix=kafka_prefix, group_id="test-consumer"
)
result = invoke(
False,
[
"journal-client",
"objects",
"--stop-after-objects",
"1",
"--object-type",
"origin",
"--prefix",
kafka_prefix,
],
journal_objects_config,
elasticsearch_host=elasticsearch_host,
)
# Check the output
expected_output = "Processed 1 messages.\nDone.\n"
assert result.exit_code == 0, result.output
assert result.output == expected_output
swh_search.flush()
# searching origin without visit as requirement
actual_page = swh_search.origin_search(url_pattern="foobar")
# We find it
assert actual_page.next_page_token is None
assert actual_page.results == [origin_foobar_baz]
# It's an origin with no visit, searching for it with visit
actual_page = swh_search.origin_search(url_pattern="foobar", with_visit=True)
# returns nothing
assert actual_page.next_page_token is None
assert actual_page.results == []
def test__journal_client__origin_visit(
swh_search, elasticsearch_host, kafka_prefix: str, kafka_server
):
"""Tests the re-indexing when origin_batch_size*task_batch_size is a
divisor of nb_origins."""
origin_foobar = {"url": "http://baz.foobar"}
producer = Producer(
{
"bootstrap.servers": kafka_server,
"client.id": "test search origin visit producer",
"acks": "all",
}
)
topic = f"{kafka_prefix}.origin_visit"
value = value_to_kafka({"origin": origin_foobar["url"], "type": "git"})
producer.produce(topic=topic, key=b"bogus-origin-visit", value=value)
journal_objects_config = JOURNAL_OBJECTS_CONFIG_TEMPLATE.format(
broker=kafka_server, prefix=kafka_prefix, group_id="test-consumer"
)
result = invoke(
False,
[
"journal-client",
"objects",
"--stop-after-objects",
"1",
"--object-type",
"origin_visit",
],
journal_objects_config,
elasticsearch_host=elasticsearch_host,
)
# Check the output
expected_output = "Processed 1 messages.\nDone.\n"
assert result.exit_code == 0, result.output
assert result.output == expected_output
swh_search.flush()
actual_page = swh_search.origin_search(url_pattern="foobar", with_visit=False)
assert actual_page.next_page_token is None
assert actual_page.results == [origin_foobar]
# Not considered visited unless the visit is full
actual_page = swh_search.origin_search(url_pattern="foobar", with_visit=True)
assert actual_page.next_page_token is None
assert actual_page.results == []
def test__journal_client__origin_visit_status(
swh_search, elasticsearch_host, kafka_prefix: str, kafka_server
):
"""Subscribing to origin-visit-status should result in swh-search indexation
"""
origin_foobar = {"url": "http://baz.foobar"}
producer = Producer(
{
"bootstrap.servers": kafka_server,
"client.id": "test search origin visit status producer",
"acks": "all",
}
)
topic = f"{kafka_prefix}.origin_visit_status"
value = value_to_kafka(
{
"origin": origin_foobar["url"],
"visit": 1,
+ "date": datetime.now(tz=timezone.utc),
"snapshot": None,
"status": "full",
}
)
producer.produce(topic=topic, key=b"bogus-origin-visit-status", value=value)
journal_objects_config = JOURNAL_OBJECTS_CONFIG_TEMPLATE.format(
broker=kafka_server, prefix=kafka_prefix, group_id="test-consumer"
)
result = invoke(
False,
[
"journal-client",
"objects",
"--stop-after-objects",
"1",
"--prefix",
kafka_prefix,
"--object-type",
"origin_visit_status",
],
journal_objects_config,
elasticsearch_host=elasticsearch_host,
)
# Check the output
expected_output = "Processed 1 messages.\nDone.\n"
assert result.exit_code == 0, result.output
assert result.output == expected_output
swh_search.flush()
# Both search returns the visit
actual_page = swh_search.origin_search(url_pattern="foobar", with_visit=False)
assert actual_page.next_page_token is None
assert actual_page.results == [origin_foobar]
actual_page = swh_search.origin_search(url_pattern="foobar", with_visit=True)
assert actual_page.next_page_token is None
assert actual_page.results == [origin_foobar]
def test__journal_client__origin_intrinsic_metadata(
swh_search, elasticsearch_host, kafka_prefix: str, kafka_server
):
"""Subscribing to origin-intrinsic-metadata should result in swh-search indexation
"""
origin_foobar = {"url": "https://github.com/clojure/clojure"}
origin_intrinsic_metadata = {
"id": origin_foobar["url"],
"metadata": {
"name": "clojure",
"type": "SoftwareSourceCode",
"license": "http://opensource.org/licenses/eclipse-1.0.php",
"version": "1.10.2-master-SNAPSHOT",
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"identifier": "org.clojure",
"description": "Clojure core environment and runtime library.",
"codeRepository": "https://repo.maven.apache.org/maven2/org/clojure/clojure", # noqa
},
"indexer_configuration_id": 1,
"from_revision": hash_to_bytes("f47c139e20970ee0852166f48ee2a4626632b86e"),
"mappings": ["maven"],
}
producer = Producer(
{
"bootstrap.servers": kafka_server,
"client.id": "test search origin intrinsic metadata producer",
"acks": "all",
}
)
topic = f"{kafka_prefix}.origin_intrinsic_metadata"
value = value_to_kafka(origin_intrinsic_metadata)
producer.produce(topic=topic, key=b"bogus-origin-intrinsic-metadata", value=value)
journal_objects_config = JOURNAL_OBJECTS_CONFIG_TEMPLATE.format(
broker=kafka_server, prefix=kafka_prefix, group_id="test-consumer"
)
result = invoke(
False,
[
"journal-client",
"objects",
"--stop-after-objects",
"1",
"--object-type",
"origin_intrinsic_metadata",
],
journal_objects_config,
elasticsearch_host=elasticsearch_host,
)
# Check the output
expected_output = "Processed 1 messages.\nDone.\n"
assert result.exit_code == 0, result.output
assert result.output == expected_output
swh_search.flush()
# search without visit returns the metadata
actual_page = swh_search.origin_search(url_pattern="clojure", with_visit=False)
assert actual_page.next_page_token is None
assert actual_page.results == [origin_foobar]
# no visit associated so it does not return anything
actual_page = swh_search.origin_search(url_pattern="clojure", with_visit=True)
assert actual_page.next_page_token is None
assert actual_page.results == []
def test__journal_client__missing_main_journal_config_key(elasticsearch_host):
"""Missing configuration on journal should raise"""
with pytest.raises(KeyError, match="journal"):
invoke(
catch_exceptions=False,
args=["journal-client", "objects", "--stop-after-objects", "1",],
config="", # missing config will make it raise
elasticsearch_host=elasticsearch_host,
)
def test__journal_client__missing_journal_config_keys(elasticsearch_host):
"""Missing configuration on mandatory journal keys should raise"""
kafka_prefix = "swh.journal.objects"
journal_objects_config = JOURNAL_OBJECTS_CONFIG_TEMPLATE.format(
broker="192.0.2.1", prefix=kafka_prefix, group_id="test-consumer"
)
journal_config = yaml.safe_load(journal_objects_config)
for key in journal_config["journal"].keys():
if key == "prefix": # optional
continue
cfg = copy.deepcopy(journal_config)
del cfg["journal"][key] # make config incomplete
yaml_cfg = yaml.dump(cfg)
with pytest.raises(TypeError, match=f"{key}"):
invoke(
catch_exceptions=False,
args=[
"journal-client",
"objects",
"--stop-after-objects",
"1",
"--prefix",
kafka_prefix,
"--object-type",
"origin_visit_status",
],
config=yaml_cfg, # incomplete config will make the cli raise
elasticsearch_host=elasticsearch_host,
)
def test__journal_client__missing_prefix_config_key(
swh_search, elasticsearch_host, kafka_server
):
"""Missing configuration on mandatory prefix key should raise"""
journal_cfg_template = """
journal:
brokers:
- {broker}
group_id: {group_id}
"""
journal_cfg = journal_cfg_template.format(
broker=kafka_server, group_id="test-consumer"
)
with pytest.raises(ValueError, match="prefix"):
invoke(
False,
# Missing --prefix (and no config key) will make the cli raise
[
"journal-client",
"objects",
"--stop-after-objects",
"1",
"--object-type",
"origin_visit_status",
],
journal_cfg,
elasticsearch_host=elasticsearch_host,
)
def test__journal_client__missing_object_types_config_key(
swh_search, elasticsearch_host, kafka_server
):
"""Missing configuration on mandatory object-types key should raise"""
journal_cfg_template = """
journal:
brokers:
- {broker}
prefix: swh.journal.objects
group_id: {group_id}
"""
journal_cfg = journal_cfg_template.format(
broker=kafka_server, group_id="test-consumer"
)
with pytest.raises(ValueError, match="object_types"):
invoke(
False,
# Missing --object-types (and no config key) will make the cli raise
["journal-client", "objects", "--stop-after-objects", "1"],
journal_cfg,
elasticsearch_host=elasticsearch_host,
)
def test__initialize__with_index_name(elasticsearch_host):
"""Initializing the index with an index name should create the right index"""
search = get_search(
"elasticsearch",
hosts=[elasticsearch_host],
indexes={"origin": {"index": "test"}},
)
assert search._get_origin_index() == "test"
assert search._get_origin_read_alias() == "origin-read"
assert search._get_origin_write_alias() == "origin-write"
def test__initialize__with_read_alias(elasticsearch_host):
"""Initializing the index with a search alias name should create
the right search alias"""
search = get_search(
"elasticsearch",
hosts=[elasticsearch_host],
indexes={"origin": {"read_alias": "test"}},
)
assert search._get_origin_index() == "origin"
assert search._get_origin_read_alias() == "test"
assert search._get_origin_write_alias() == "origin-write"
def test__initialize__with_write_alias(elasticsearch_host):
"""Initializing the index with an indexing alias name should create
the right indexing alias"""
search = get_search(
"elasticsearch",
hosts=[elasticsearch_host],
indexes={"origin": {"write_alias": "test"}},
)
assert search._get_origin_index() == "origin"
assert search._get_origin_read_alias() == "origin-read"
assert search._get_origin_write_alias() == "test"
diff --git a/swh/search/tests/test_journal_client.py b/swh/search/tests/test_journal_client.py
index 5f60722..c18f955 100644
--- a/swh/search/tests/test_journal_client.py
+++ b/swh/search/tests/test_journal_client.py
@@ -1,94 +1,117 @@
# Copyright (C) 2019-2021 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
+from datetime import datetime, timezone
import functools
from unittest.mock import MagicMock
from swh.search.journal_client import process_journal_objects
def test_journal_client_origin_from_journal():
search_mock = MagicMock()
worker_fn = functools.partial(process_journal_objects, search=search_mock,)
worker_fn({"origin": [{"url": "http://foobar.baz"},]})
search_mock.origin_update.assert_called_once_with(
[{"url": "http://foobar.baz"},]
)
search_mock.reset_mock()
worker_fn({"origin": [{"url": "http://foobar.baz"}, {"url": "http://barbaz.qux"},]})
search_mock.origin_update.assert_called_once_with(
[{"url": "http://foobar.baz"}, {"url": "http://barbaz.qux"},]
)
def test_journal_client_origin_visit_from_journal():
search_mock = MagicMock()
worker_fn = functools.partial(process_journal_objects, search=search_mock,)
worker_fn({"origin_visit": [{"origin": "http://foobar.baz", "type": "git"},]})
search_mock.origin_update.assert_called_once_with(
[{"url": "http://foobar.baz", "visit_types": ["git"]},]
)
def test_journal_client_origin_visit_status_from_journal():
search_mock = MagicMock()
worker_fn = functools.partial(process_journal_objects, search=search_mock,)
+ current_datetime = datetime.now(tz=timezone.utc)
worker_fn(
{
"origin_visit_status": [
- {"origin": "http://foobar.baz", "status": "full"} # full visits ok
+ {
+ "origin": "http://foobar.baz",
+ "status": "full",
+ "visit": 5,
+ "date": current_datetime,
+ } # full visits ok
]
}
)
search_mock.origin_update.assert_called_once_with(
- [{"url": "http://foobar.baz", "has_visits": True},]
+ [
+ {
+ "url": "http://foobar.baz",
+ "has_visits": True,
+ "nb_visits": 5,
+ "last_visit_date": current_datetime.isoformat(),
+ },
+ ]
)
search_mock.reset_mock()
# non-full visits are filtered out
worker_fn(
- {"origin_visit_status": [{"origin": "http://foobar.baz", "status": "partial"}]}
+ {
+ "origin_visit_status": [
+ {
+ "origin": "http://foobar.baz",
+ "status": "partial",
+ "visit": 5,
+ "date": current_datetime,
+ }
+ ]
+ }
)
search_mock.origin_update.assert_not_called()
def test_journal_client_origin_metadata_from_journal():
search_mock = MagicMock()
worker_fn = functools.partial(process_journal_objects, search=search_mock,)
worker_fn(
{
"origin_intrinsic_metadata": [
{
"id": "http://foobar.baz",
"metadata": {
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"description": "foo bar",
},
},
]
}
)
search_mock.origin_update.assert_called_once_with(
[
{
"url": "http://foobar.baz",
"intrinsic_metadata": {
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"description": "foo bar",
},
},
]
)
diff --git a/swh/search/tests/test_search.py b/swh/search/tests/test_search.py
index 7a7bb36..b72372a 100644
--- a/swh/search/tests/test_search.py
+++ b/swh/search/tests/test_search.py
@@ -1,621 +1,695 @@
# Copyright (C) 2019-2021 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
+from datetime import datetime, timedelta, timezone
+
from hypothesis import given, settings, strategies
+import pytest
from swh.core.api.classes import stream_results
class CommonSearchTest:
def test_origin_url_unique_word_prefix(self):
origin_foobar_baz = {"url": "http://foobar.baz"}
origin_barbaz_qux = {"url": "http://barbaz.qux"}
origin_qux_quux = {"url": "http://qux.quux"}
origins = [origin_foobar_baz, origin_barbaz_qux, origin_qux_quux]
self.search.origin_update(origins)
self.search.flush()
actual_page = self.search.origin_search(url_pattern="foobar")
assert actual_page.next_page_token is None
assert actual_page.results == [origin_foobar_baz]
actual_page = self.search.origin_search(url_pattern="barb")
assert actual_page.next_page_token is None
assert actual_page.results == [origin_barbaz_qux]
# 'bar' is part of 'foobar', but is not the beginning of it
actual_page = self.search.origin_search(url_pattern="bar")
assert actual_page.next_page_token is None
assert actual_page.results == [origin_barbaz_qux]
actual_page = self.search.origin_search(url_pattern="barbaz")
assert actual_page.next_page_token is None
assert actual_page.results == [origin_barbaz_qux]
def test_origin_url_unique_word_prefix_multiple_results(self):
origin_foobar_baz = {"url": "http://foobar.baz"}
origin_barbaz_qux = {"url": "http://barbaz.qux"}
origin_qux_quux = {"url": "http://qux.quux"}
self.search.origin_update(
[origin_foobar_baz, origin_barbaz_qux, origin_qux_quux]
)
self.search.flush()
actual_page = self.search.origin_search(url_pattern="qu")
assert actual_page.next_page_token is None
results = [r["url"] for r in actual_page.results]
expected_results = [o["url"] for o in [origin_qux_quux, origin_barbaz_qux]]
assert sorted(results) == sorted(expected_results)
actual_page = self.search.origin_search(url_pattern="qux")
assert actual_page.next_page_token is None
results = [r["url"] for r in actual_page.results]
expected_results = [o["url"] for o in [origin_qux_quux, origin_barbaz_qux]]
assert sorted(results) == sorted(expected_results)
def test_origin_url_all_terms(self):
origin_foo_bar_baz = {"url": "http://foo.bar/baz"}
origin_foo_bar_foo_bar = {"url": "http://foo.bar/foo.bar"}
origins = [origin_foo_bar_baz, origin_foo_bar_foo_bar]
self.search.origin_update(origins)
self.search.flush()
# Only results containing all terms should be returned.
actual_page = self.search.origin_search(url_pattern="foo bar baz")
assert actual_page.next_page_token is None
assert actual_page.results == [origin_foo_bar_baz]
def test_origin_with_visit(self):
origin_foobar_baz = {"url": "http://foobar/baz"}
self.search.origin_update(
[{**o, "has_visits": True} for o in [origin_foobar_baz]]
)
self.search.flush()
actual_page = self.search.origin_search(url_pattern="foobar", with_visit=True)
assert actual_page.next_page_token is None
assert actual_page.results == [origin_foobar_baz]
def test_origin_with_visit_added(self):
origin_foobar_baz = {"url": "http://foobar.baz"}
self.search.origin_update([origin_foobar_baz])
self.search.flush()
actual_page = self.search.origin_search(url_pattern="foobar", with_visit=True)
assert actual_page.next_page_token is None
assert actual_page.results == []
self.search.origin_update(
[{**o, "has_visits": True} for o in [origin_foobar_baz]]
)
self.search.flush()
actual_page = self.search.origin_search(url_pattern="foobar", with_visit=True)
assert actual_page.next_page_token is None
assert actual_page.results == [origin_foobar_baz]
def test_origin_no_visit_types_search(self):
origins = [{"url": "http://foobar.baz"}]
self.search.origin_update(origins)
self.search.flush()
actual_page = self.search.origin_search(url_pattern="http", visit_types=["git"])
assert actual_page.next_page_token is None
results = [r["url"] for r in actual_page.results]
expected_results = []
assert sorted(results) == sorted(expected_results)
actual_page = self.search.origin_search(url_pattern="http", visit_types=None)
assert actual_page.next_page_token is None
results = [r["url"] for r in actual_page.results]
expected_results = [origin["url"] for origin in origins]
assert sorted(results) == sorted(expected_results)
def test_origin_visit_types_search(self):
origins = [
{"url": "http://foobar.baz", "visit_types": ["git"]},
{"url": "http://barbaz.qux", "visit_types": ["svn"]},
{"url": "http://qux.quux", "visit_types": ["hg"]},
]
self.search.origin_update(origins)
self.search.flush()
for origin in origins:
actual_page = self.search.origin_search(
url_pattern="http", visit_types=origin["visit_types"]
)
assert actual_page.next_page_token is None
results = [r["url"] for r in actual_page.results]
expected_results = [origin["url"]]
assert sorted(results) == sorted(expected_results)
actual_page = self.search.origin_search(url_pattern="http", visit_types=None)
assert actual_page.next_page_token is None
results = [r["url"] for r in actual_page.results]
expected_results = [origin["url"] for origin in origins]
assert sorted(results) == sorted(expected_results)
def test_origin_visit_types_update_search(self):
origin_url = "http://foobar.baz"
self.search.origin_update([{"url": origin_url}])
self.search.flush()
def _add_visit_type(visit_type):
self.search.origin_update(
[{"url": origin_url, "visit_types": [visit_type]}]
)
self.search.flush()
def _check_visit_types(visit_types_list):
for visit_types in visit_types_list:
actual_page = self.search.origin_search(
url_pattern="http", visit_types=visit_types
)
assert actual_page.next_page_token is None
results = [r["url"] for r in actual_page.results]
expected_results = [origin_url]
assert sorted(results) == sorted(expected_results)
_add_visit_type("git")
_check_visit_types([["git"], ["git", "hg"]])
_add_visit_type("svn")
_check_visit_types([["git"], ["svn"], ["svn", "git"], ["git", "hg", "svn"]])
_add_visit_type("hg")
_check_visit_types(
[
["git"],
["svn"],
["hg"],
["svn", "git"],
["hg", "git"],
["hg", "svn"],
["git", "hg", "svn"],
]
)
+ def test_origin_nb_visits_update_search(self):
+ origin_url = "http://foobar.baz"
+ self.search.origin_update([{"url": origin_url}])
+ self.search.flush()
+
+ def _update_nb_visits(nb_visits):
+ self.search.origin_update([{"url": origin_url, "nb_visits": nb_visits}])
+ self.search.flush()
+
+ def _check_min_nb_visits(min_nb_visits):
+ actual_page = self.search.origin_search(
+ url_pattern=origin_url, min_nb_visits=min_nb_visits,
+ )
+ assert actual_page.next_page_token is None
+ results = [r["url"] for r in actual_page.results]
+ expected_results = [origin_url]
+ assert sorted(results) == sorted(expected_results)
+
+ _update_nb_visits(2)
+ _check_min_nb_visits(2) # Works for = 2
+ _check_min_nb_visits(1) # Works for < 2
+
+ with pytest.raises(AssertionError):
+ _check_min_nb_visits(
+ 5
+ ) # No results for nb_visits >= 5 (should throw error)
+
+ _update_nb_visits(5)
+ _check_min_nb_visits(5) # Works for = 5
+ _check_min_nb_visits(3) # Works for < 5
+
+ def test_origin_last_visit_date_update_search(self):
+ origin_url = "http://foobar.baz"
+ self.search.origin_update([{"url": origin_url}])
+ self.search.flush()
+
+ def _update_last_visit_date(last_visit_date):
+ self.search.origin_update(
+ [{"url": origin_url, "last_visit_date": last_visit_date}]
+ )
+ self.search.flush()
+
+ def _check_min_last_visit_date(min_last_visit_date):
+ actual_page = self.search.origin_search(
+ url_pattern=origin_url, min_last_visit_date=min_last_visit_date,
+ )
+ assert actual_page.next_page_token is None
+ results = [r["url"] for r in actual_page.results]
+ expected_results = [origin_url]
+ assert sorted(results) == sorted(expected_results)
+
+ now = datetime.now(tz=timezone.utc).isoformat()
+ now_minus_5_hours = (
+ datetime.now(tz=timezone.utc) - timedelta(hours=5)
+ ).isoformat()
+ now_plus_5_hours = (
+ datetime.now(tz=timezone.utc) + timedelta(hours=5)
+ ).isoformat()
+
+ _update_last_visit_date(now)
+
+ _check_min_last_visit_date(now) # Works for =
+ _check_min_last_visit_date(now_minus_5_hours) # Works for <
+ with pytest.raises(AssertionError):
+ _check_min_last_visit_date(now_plus_5_hours) # Fails for >
+
+ _update_last_visit_date(now_plus_5_hours)
+
+ _check_min_last_visit_date(now_plus_5_hours) # Works for =
+ _check_min_last_visit_date(now) # Works for <
+
def test_origin_update_with_no_visit_types(self):
"""
Update an origin with visit types first then with no visit types,
check origin can still be searched with visit types afterwards.
"""
origin_url = "http://foobar.baz"
self.search.origin_update([{"url": origin_url, "visit_types": ["git"]}])
self.search.flush()
self.search.origin_update([{"url": origin_url}])
self.search.flush()
actual_page = self.search.origin_search(url_pattern="http", visit_types=["git"])
assert actual_page.next_page_token is None
results = [r["url"] for r in actual_page.results]
expected_results = [origin_url]
assert results == expected_results
def test_origin_intrinsic_metadata_description(self):
origin1_nothin = {"url": "http://origin1"}
origin2_foobar = {"url": "http://origin2"}
origin3_barbaz = {"url": "http://origin3"}
self.search.origin_update(
[
{**origin1_nothin, "intrinsic_metadata": {},},
{
**origin2_foobar,
"intrinsic_metadata": {
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"description": "foo bar",
},
},
{
**origin3_barbaz,
"intrinsic_metadata": {
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"description": "bar baz",
},
},
]
)
self.search.flush()
actual_page = self.search.origin_search(metadata_pattern="foo")
assert actual_page.next_page_token is None
assert actual_page.results == [origin2_foobar]
actual_page = self.search.origin_search(metadata_pattern="foo bar")
assert actual_page.next_page_token is None
assert actual_page.results == [origin2_foobar]
actual_page = self.search.origin_search(metadata_pattern="bar baz")
assert actual_page.next_page_token is None
assert actual_page.results == [origin3_barbaz]
def test_origin_intrinsic_metadata_all_terms(self):
origin1_foobarfoobar = {"url": "http://origin1"}
origin3_foobarbaz = {"url": "http://origin2"}
self.search.origin_update(
[
{
**origin1_foobarfoobar,
"intrinsic_metadata": {
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"description": "foo bar foo bar",
},
},
{
**origin3_foobarbaz,
"intrinsic_metadata": {
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"description": "foo bar baz",
},
},
]
)
self.search.flush()
actual_page = self.search.origin_search(metadata_pattern="foo bar baz")
assert actual_page.next_page_token is None
assert actual_page.results == [origin3_foobarbaz]
def test_origin_intrinsic_metadata_long_description(self):
"""Checks ElasticSearch does not try to store large values untokenize,
which would be inefficient and crash it with:
Document contains at least one immense term in field="intrinsic_metadata.http://schema.org/description.@value" (whose UTF8 encoding is longer than the max length 32766), all of which were skipped.
""" # noqa
origin1 = {"url": "http://origin1"}
self.search.origin_update(
[
{
**origin1,
"intrinsic_metadata": {
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"description": " ".join(f"foo{i}" for i in range(100000)),
},
},
]
)
self.search.flush()
actual_page = self.search.origin_search(metadata_pattern="foo42")
assert actual_page.next_page_token is None
assert actual_page.results == [origin1]
def test_origin_intrinsic_metadata_matches_cross_fields(self):
"""Checks the backend finds results even if the two words in the query are
each in a different field."""
origin1 = {"url": "http://origin1"}
self.search.origin_update(
[
{
**origin1,
"intrinsic_metadata": {
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"description": "foo bar",
"author": "John Doe",
},
},
]
)
self.search.flush()
actual_page = self.search.origin_search(metadata_pattern="foo John")
assert actual_page.next_page_token is None
assert actual_page.results == [origin1]
def test_origin_intrinsic_metadata_nested(self):
origin1_nothin = {"url": "http://origin1"}
origin2_foobar = {"url": "http://origin2"}
origin3_barbaz = {"url": "http://origin3"}
self.search.origin_update(
[
{**origin1_nothin, "intrinsic_metadata": {},},
{
**origin2_foobar,
"intrinsic_metadata": {
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"keywords": ["foo", "bar"],
},
},
{
**origin3_barbaz,
"intrinsic_metadata": {
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"keywords": ["bar", "baz"],
},
},
]
)
self.search.flush()
actual_page = self.search.origin_search(metadata_pattern="foo")
assert actual_page.next_page_token is None
assert actual_page.results == [origin2_foobar]
actual_page = self.search.origin_search(metadata_pattern="foo bar")
assert actual_page.next_page_token is None
assert actual_page.results == [origin2_foobar]
actual_page = self.search.origin_search(metadata_pattern="bar baz")
assert actual_page.next_page_token is None
assert actual_page.results == [origin3_barbaz]
def test_origin_intrinsic_metadata_inconsistent_type(self):
"""Checks the same field can have a concrete value, an object, or an array
in different documents."""
origin1_foobar = {"url": "http://origin1"}
origin2_barbaz = {"url": "http://origin2"}
origin3_bazqux = {"url": "http://origin3"}
self.search.origin_update(
[
{
**origin1_foobar,
"intrinsic_metadata": {
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"author": {"familyName": "Foo", "givenName": "Bar",},
},
},
]
)
self.search.flush()
self.search.origin_update(
[
{
**origin2_barbaz,
"intrinsic_metadata": {
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"author": "Bar Baz",
},
},
{
**origin3_bazqux,
"intrinsic_metadata": {
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"author": ["Baz", "Qux"],
},
},
]
)
self.search.flush()
actual_page = self.search.origin_search(metadata_pattern="bar")
assert actual_page.next_page_token is None
results = [r["url"] for r in actual_page.results]
expected_results = [o["url"] for o in [origin2_barbaz, origin1_foobar]]
assert sorted(results) == sorted(expected_results)
actual_page = self.search.origin_search(metadata_pattern="baz")
assert actual_page.next_page_token is None
assert actual_page.results == [origin2_barbaz, origin3_bazqux]
actual_page = self.search.origin_search(metadata_pattern="foo")
assert actual_page.next_page_token is None
assert actual_page.results == [origin1_foobar]
actual_page = self.search.origin_search(metadata_pattern="bar baz")
assert actual_page.next_page_token is None
assert actual_page.results == [origin2_barbaz]
actual_page = self.search.origin_search(metadata_pattern="qux")
assert actual_page.next_page_token is None
assert actual_page.results == [origin3_bazqux]
actual_page = self.search.origin_search(metadata_pattern="baz qux")
assert actual_page.next_page_token is None
assert actual_page.results == [origin3_bazqux]
actual_page = self.search.origin_search(metadata_pattern="foo bar")
assert actual_page.next_page_token is None
assert actual_page.results == [origin1_foobar]
def test_origin_intrinsic_metadata_date(self):
"""Checks inserting a date-like in a field does not update the mapping to
require every document uses a date in that field; or that search queries
use a date either.
Likewise for numeric fields."""
origin1 = {"url": "http://origin1"}
origin2 = {"url": "http://origin2"}
self.search.origin_update(
[
{
**origin1,
"intrinsic_metadata": {
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"dateCreated": "2021-02-18T10:16:52",
"version": "1.0",
},
}
]
)
self.search.flush()
self.search.origin_update(
[
{
**origin2,
"intrinsic_metadata": {
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"dateCreated": "a long time ago",
"address": "in a galaxy far, far away",
"version": "a new hope",
},
},
]
)
self.search.flush()
actual_page = self.search.origin_search(metadata_pattern="2021")
assert actual_page.next_page_token is None
assert actual_page.results == [origin1]
actual_page = self.search.origin_search(metadata_pattern="long time ago")
assert actual_page.next_page_token is None
assert actual_page.results == [origin2]
def test_origin_intrinsic_metadata_update(self):
origin = {"url": "http://origin1"}
origin_data = {
**origin,
"intrinsic_metadata": {
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"author": "John Doe",
},
}
self.search.origin_update([origin_data])
self.search.flush()
actual_page = self.search.origin_search(metadata_pattern="John")
assert actual_page.next_page_token is None
assert actual_page.results == [origin]
origin_data["intrinsic_metadata"]["author"] = "Jane Doe"
self.search.origin_update([origin_data])
self.search.flush()
actual_page = self.search.origin_search(metadata_pattern="Jane")
assert actual_page.next_page_token is None
assert actual_page.results == [origin]
# TODO: add more tests with more codemeta terms
# TODO: add more tests with edge cases
@settings(deadline=None)
@given(strategies.integers(min_value=1, max_value=4))
def test_origin_url_paging(self, limit):
# TODO: no hypothesis
origin1_foo = {"url": "http://origin1/foo"}
origin2_foobar = {"url": "http://origin2/foo/bar"}
origin3_foobarbaz = {"url": "http://origin3/foo/bar/baz"}
self.reset()
self.search.origin_update([origin1_foo, origin2_foobar, origin3_foobarbaz])
self.search.flush()
results = stream_results(
self.search.origin_search, url_pattern="foo bar baz", limit=limit
)
results = [res["url"] for res in results]
expected_results = [o["url"] for o in [origin3_foobarbaz]]
assert sorted(results[0 : len(expected_results)]) == sorted(expected_results)
results = stream_results(
self.search.origin_search, url_pattern="foo bar", limit=limit
)
results = [res["url"] for res in results]
expected_results = [o["url"] for o in [origin2_foobar, origin3_foobarbaz]]
assert sorted(results[0 : len(expected_results)]) == sorted(expected_results)
results = stream_results(
self.search.origin_search, url_pattern="foo", limit=limit
)
results = [res["url"] for res in results]
expected_results = [
o["url"] for o in [origin1_foo, origin2_foobar, origin3_foobarbaz]
]
assert sorted(results[0 : len(expected_results)]) == sorted(expected_results)
@settings(deadline=None)
@given(strategies.integers(min_value=1, max_value=4))
def test_origin_intrinsic_metadata_paging(self, limit):
# TODO: no hypothesis
origin1_foo = {"url": "http://origin1"}
origin2_foobar = {"url": "http://origin2"}
origin3_foobarbaz = {"url": "http://origin3"}
self.reset()
self.search.origin_update(
[
{
**origin1_foo,
"intrinsic_metadata": {
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"keywords": ["foo"],
},
},
{
**origin2_foobar,
"intrinsic_metadata": {
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"keywords": ["foo", "bar"],
},
},
{
**origin3_foobarbaz,
"intrinsic_metadata": {
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"keywords": ["foo", "bar", "baz"],
},
},
]
)
self.search.flush()
results = stream_results(
self.search.origin_search, metadata_pattern="foo bar baz", limit=limit
)
assert list(results) == [origin3_foobarbaz]
results = stream_results(
self.search.origin_search, metadata_pattern="foo bar", limit=limit
)
assert list(results) == [origin2_foobar, origin3_foobarbaz]
results = stream_results(
self.search.origin_search, metadata_pattern="foo", limit=limit
)
assert list(results) == [origin1_foo, origin2_foobar, origin3_foobarbaz]
def test_search_blocklisted_results(self):
origin1 = {"url": "http://origin1"}
origin2 = {"url": "http://origin2", "blocklisted": True}
self.search.origin_update([origin1, origin2])
self.search.flush()
actual_page = self.search.origin_search(url_pattern="origin")
assert actual_page.next_page_token is None
assert actual_page.results == [origin1]
def test_search_blocklisted_update(self):
origin1 = {"url": "http://origin1"}
self.search.origin_update([origin1])
self.search.flush()
result_page = self.search.origin_search(url_pattern="origin")
assert result_page.next_page_token is None
assert result_page.results == [origin1]
self.search.origin_update([{**origin1, "blocklisted": True}])
self.search.flush()
result_page = self.search.origin_search(url_pattern="origin")
assert result_page.next_page_token is None
assert result_page.results == []
self.search.origin_update(
[{**origin1, "has_visits": True, "visit_types": ["git"]}]
)
self.search.flush()
result_page = self.search.origin_search(url_pattern="origin")
assert result_page.next_page_token is None
assert result_page.results == []

File Metadata

Mime Type
text/x-diff
Expires
Fri, Jul 4, 12:41 PM (2 w, 2 d ago)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
3273105

Event Timeline