diff --git a/swh/search/elasticsearch.py b/swh/search/elasticsearch.py index 0057c96..f640593 100644 --- a/swh/search/elasticsearch.py +++ b/swh/search/elasticsearch.py @@ -1,492 +1,545 @@ # Copyright (C) 2019-2021 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import base64 from textwrap import dedent from typing import Any, Dict, Iterable, Iterator, List, Optional from elasticsearch import Elasticsearch, helpers import msgpack from swh.indexer import codemeta from swh.model import model from swh.model.identifiers import origin_identifier from swh.search.interface import ( SORT_BY_OPTIONS, MinimalOriginDict, OriginDict, PagedResult, ) from swh.search.metrics import send_metric, timed INDEX_NAME_PARAM = "index" READ_ALIAS_PARAM = "read_alias" WRITE_ALIAS_PARAM = "write_alias" ORIGIN_DEFAULT_CONFIG = { INDEX_NAME_PARAM: "origin", READ_ALIAS_PARAM: "origin-read", WRITE_ALIAS_PARAM: "origin-write", } def _sanitize_origin(origin): origin = origin.copy() # Whitelist fields to be saved in Elasticsearch res = {"url": origin.pop("url")} for field_name in ( "blocklisted", "has_visits", "intrinsic_metadata", "visit_types", "nb_visits", "snapshot_id", "last_visit_date", "last_eventful_visit_date", "last_revision_date", "last_release_date", ): if field_name in origin: res[field_name] = origin.pop(field_name) # Run the JSON-LD expansion algorithm # # to normalize the Codemeta metadata. # This is required as Elasticsearch will needs each field to have a consistent # type across documents to be searchable; and non-expanded JSON-LD documents # can have various types in the same field. For example, all these are # equivalent in JSON-LD: # * {"author": "Jane Doe"} # * {"author": ["Jane Doe"]} # * {"author": {"@value": "Jane Doe"}} # * {"author": [{"@value": "Jane Doe"}]} # and JSON-LD expansion will convert them all to the last one. if "intrinsic_metadata" in res: res["intrinsic_metadata"] = codemeta.expand(res["intrinsic_metadata"]) return res def token_encode(index_to_tokenize: Dict[bytes, Any]) -> str: """Tokenize as string an index page result from a search """ page_token = base64.b64encode(msgpack.dumps(index_to_tokenize)) return page_token.decode() def token_decode(page_token: str) -> Dict[bytes, Any]: """Read the page_token """ return msgpack.loads(base64.b64decode(page_token.encode()), raw=True) class ElasticSearch: def __init__(self, hosts: List[str], indexes: Dict[str, Dict[str, str]] = {}): self._backend = Elasticsearch(hosts=hosts) # Merge current configuration with default values origin_config = indexes.get("origin", {}) self.origin_config = {**ORIGIN_DEFAULT_CONFIG, **origin_config} def _get_origin_index(self) -> str: return self.origin_config[INDEX_NAME_PARAM] def _get_origin_read_alias(self) -> str: return self.origin_config[READ_ALIAS_PARAM] def _get_origin_write_alias(self) -> str: return self.origin_config[WRITE_ALIAS_PARAM] @timed def check(self): return self._backend.ping() def deinitialize(self) -> None: """Removes all indices from the Elasticsearch backend""" self._backend.indices.delete(index="*") def initialize(self) -> None: """Declare Elasticsearch indices, aliases and mappings""" if not self._backend.indices.exists(index=self._get_origin_index()): self._backend.indices.create(index=self._get_origin_index()) if not self._backend.indices.exists_alias(self._get_origin_read_alias()): self._backend.indices.put_alias( index=self._get_origin_index(), name=self._get_origin_read_alias() ) if not self._backend.indices.exists_alias(self._get_origin_write_alias()): self._backend.indices.put_alias( index=self._get_origin_index(), name=self._get_origin_write_alias() ) self._backend.indices.put_mapping( index=self._get_origin_index(), body={ "dynamic_templates": [ { "booleans_as_string": { # All fields stored as string in the metadata # even the booleans "match_mapping_type": "boolean", "path_match": "intrinsic_metadata.*", "mapping": {"type": "keyword"}, } } ], "date_detection": False, "properties": { # sha1 of the URL; used as the document id "sha1": {"type": "keyword", "doc_values": True,}, # Used both to search URLs, and as the result to return # as a response to queries "url": { "type": "text", # To split URLs into token on any character # that is not alphanumerical "analyzer": "simple", # 2-gram and partial-3-gram search (ie. with the end of the # third word potentially missing) "fields": { "as_you_type": { "type": "search_as_you_type", "analyzer": "simple", } }, }, "visit_types": {"type": "keyword"}, # used to filter out origins that were never visited "has_visits": {"type": "boolean",}, "nb_visits": {"type": "integer"}, "snapshot_id": {"type": "keyword"}, "last_visit_date": {"type": "date"}, "last_eventful_visit_date": {"type": "date"}, "last_release_date": {"type": "date"}, "last_revision_date": {"type": "date"}, "intrinsic_metadata": { "type": "nested", "properties": { "@context": { # don't bother indexing tokens in these URIs, as the # are used as namespaces "type": "keyword", } }, }, # Has this origin been taken down? "blocklisted": {"type": "boolean",}, }, }, ) @timed def flush(self) -> None: self._backend.indices.refresh(index=self._get_origin_write_alias()) @timed def origin_update(self, documents: Iterable[OriginDict]) -> None: write_index = self._get_origin_write_alias() documents = map(_sanitize_origin, documents) documents_with_sha1 = ( (origin_identifier(document), document) for document in documents ) # painless script that will be executed when updating an origin document update_script = dedent( """ // utility function to get and parse date ZonedDateTime getDate(def ctx, String date_field) { String default_date = "0001-01-01T00:00:00Z"; String date = ctx._source.getOrDefault(date_field, default_date); return ZonedDateTime.parse(date); } // backup current visit_types field value List visit_types = ctx._source.getOrDefault("visit_types", []); int nb_visits = ctx._source.getOrDefault("nb_visits", 0); ZonedDateTime last_visit_date = getDate(ctx, "last_visit_date"); String snapshot_id = ctx._source.getOrDefault("snapshot_id", ""); ZonedDateTime last_eventful_visit_date = getDate(ctx, "last_eventful_visit_date"); ZonedDateTime last_revision_date = getDate(ctx, "last_revision_date"); ZonedDateTime last_release_date = getDate(ctx, "last_release_date"); // update origin document with new field values ctx._source.putAll(params); // restore previous visit types after visit_types field overriding if (ctx._source.containsKey("visit_types")) { for (int i = 0; i < visit_types.length; ++i) { if (!ctx._source.visit_types.contains(visit_types[i])) { ctx._source.visit_types.add(visit_types[i]); } } } // Undo overwrite if incoming nb_visits is smaller if (ctx._source.containsKey("nb_visits")) { int incoming_nb_visits = ctx._source.getOrDefault("nb_visits", 0); if(incoming_nb_visits < nb_visits){ ctx._source.nb_visits = nb_visits; } } // Undo overwrite if incoming last_visit_date is older if (ctx._source.containsKey("last_visit_date")) { ZonedDateTime incoming_last_visit_date = getDate(ctx, "last_visit_date"); int difference = // returns -1, 0 or 1 incoming_last_visit_date.compareTo(last_visit_date); if(difference < 0){ ctx._source.last_visit_date = last_visit_date; } } // Undo update of last_eventful_date and snapshot_id if // snapshot_id hasn't changed OR incoming_last_eventful_visit_date is older if (ctx._source.containsKey("snapshot_id")) { String incoming_snapshot_id = ctx._source.getOrDefault("snapshot_id", ""); ZonedDateTime incoming_last_eventful_visit_date = getDate(ctx, "last_eventful_visit_date"); int difference = // returns -1, 0 or 1 incoming_last_eventful_visit_date.compareTo(last_eventful_visit_date); if(snapshot_id == incoming_snapshot_id || difference < 0){ ctx._source.snapshot_id = snapshot_id; ctx._source.last_eventful_visit_date = last_eventful_visit_date; } } // Undo overwrite if incoming last_revision_date is older if (ctx._source.containsKey("last_revision_date")) { ZonedDateTime incoming_last_revision_date = getDate(ctx, "last_revision_date"); int difference = // returns -1, 0 or 1 incoming_last_revision_date.compareTo(last_revision_date); if(difference < 0){ ctx._source.last_revision_date = last_revision_date; } } // Undo overwrite if incoming last_release_date is older if (ctx._source.containsKey("last_release_date")) { ZonedDateTime incoming_last_release_date = getDate(ctx, "last_release_date"); // returns -1, 0 or 1 int difference = incoming_last_release_date.compareTo(last_release_date); if(difference < 0){ ctx._source.last_release_date = last_release_date; } } """ # noqa ) actions = [ { "_op_type": "update", "_id": sha1, "_index": write_index, "scripted_upsert": True, "upsert": {**document, "sha1": sha1,}, "script": { "source": update_script, "lang": "painless", "params": document, }, } for (sha1, document) in documents_with_sha1 ] indexed_count, errors = helpers.bulk(self._backend, actions, index=write_index) assert isinstance(errors, List) # Make mypy happy send_metric("document:index", count=indexed_count, method_name="origin_update") send_metric( "document:index_error", count=len(errors), method_name="origin_update" ) def origin_dump(self) -> Iterator[model.Origin]: results = helpers.scan(self._backend, index=self._get_origin_read_alias()) for hit in results: yield self._backend.termvectors( index=self._get_origin_read_alias(), id=hit["_id"], fields=["*"] ) @timed def origin_search( self, *, url_pattern: Optional[str] = None, metadata_pattern: Optional[str] = None, with_visit: bool = False, visit_types: Optional[List[str]] = None, min_nb_visits: int = 0, min_last_visit_date: str = "", min_last_eventful_visit_date: str = "", min_last_revision_date: str = "", min_last_release_date: str = "", + programming_languages: List[str] = [], + licenses: List[str] = [], page_token: Optional[str] = None, sort_by: List[str] = [], limit: int = 50, ) -> PagedResult[MinimalOriginDict]: query_clauses: List[Dict[str, Any]] = [] if url_pattern: query_clauses.append( { "multi_match": { "query": url_pattern, "type": "bool_prefix", "operator": "and", "fields": [ "url.as_you_type", "url.as_you_type._2gram", "url.as_you_type._3gram", ], } } ) if metadata_pattern: query_clauses.append( { "nested": { "path": "intrinsic_metadata", "query": { "multi_match": { "query": metadata_pattern, # Makes it so that the "foo bar" query returns # documents which contain "foo" in a field and "bar" # in a different field "type": "cross_fields", # All keywords must be found in a document for it to # be considered a match. # TODO: allow missing keywords? "operator": "and", # Searches on all fields of the intrinsic_metadata dict, # recursively. "fields": ["intrinsic_metadata.*"], } }, } } ) if not query_clauses: raise ValueError( "At least one of url_pattern and metadata_pattern must be provided." ) if with_visit: query_clauses.append({"term": {"has_visits": True,}}) if min_nb_visits: query_clauses.append({"range": {"nb_visits": {"gte": min_nb_visits,},}}) if min_last_visit_date: query_clauses.append( { "range": { "last_visit_date": { "gte": min_last_visit_date.replace("Z", "+00:00"), } } } ) if min_last_eventful_visit_date: query_clauses.append( { "range": { "last_eventful_visit_date": { "gte": min_last_eventful_visit_date.replace("Z", "+00:00"), } } } ) if min_last_revision_date: query_clauses.append( { "range": { "last_revision_date": { "gte": min_last_revision_date.replace("Z", "+00:00"), } } } ) if min_last_release_date: query_clauses.append( { "range": { "last_release_date": { "gte": min_last_release_date.replace("Z", "+00:00"), } } } ) + if licenses or programming_languages: + + license_filters = [] + for license in licenses: + license_filters.append( + { + "match": { + ( + "intrinsic_metadata" ".http://schema.org/license" ".@id" + ): license + } + } + ) + + language_filters = [] + for language in programming_languages: + language_filters.append( + { + "match": { + ( + "intrinsic_metadata" + ".http://schema.org/programmingLanguage" + ".@value" + ): language + } + } + ) + + intrinsic_metadata_filters = [ + {"bool": {"should": license_filters}}, + {"bool": {"should": language_filters}}, + ] + + query_clauses.append( + { + "nested": { + "path": "intrinsic_metadata", + "query": {"bool": {"must": intrinsic_metadata_filters,}}, + # "must" is equivalent to "AND" + # "should" is equivalent to "OR" + # Resulting origins must return true for the following: + # (license_1 OR license_2 ..) AND (lang_1 OR lang_2 ..) + # This is equivalent to {"must": [ + # {"should": [license_1,license_2] }, + # {"should": [lang_1,lang_2]}] } + # ]} + # Note: Usage of "bool" has been omitted for readability + } + } + ) + if visit_types is not None: query_clauses.append({"terms": {"visit_types": visit_types}}) sorting_params = [] for field in sort_by: order = "asc" if field and field[0] == "-": field = field[1:] order = "desc" if field in SORT_BY_OPTIONS: sorting_params.append({field: order}) sorting_params.extend( [{"_score": "desc"}, {"sha1": "asc"},] ) body = { "query": { "bool": { "must": query_clauses, "must_not": [{"term": {"blocklisted": True}}], } }, "sort": sorting_params, } if page_token: # TODO: use ElasticSearch's scroll API? page_token_content = token_decode(page_token) body["search_after"] = [ page_token_content[b"score"], page_token_content[b"sha1"].decode("ascii"), ] res = self._backend.search( index=self._get_origin_read_alias(), body=body, size=limit ) hits = res["hits"]["hits"] next_page_token: Optional[str] = None if len(hits) == limit: # There are more results after this page; return a pagination token # to get them in a future query last_hit = hits[-1] next_page_token_content = { b"score": last_hit["_score"], b"sha1": last_hit["_source"]["sha1"], } next_page_token = token_encode(next_page_token_content) assert len(hits) <= limit return PagedResult( results=[{"url": hit["_source"]["url"]} for hit in hits], next_page_token=next_page_token, ) diff --git a/swh/search/in_memory.py b/swh/search/in_memory.py index bc75d0f..a810e4f 100644 --- a/swh/search/in_memory.py +++ b/swh/search/in_memory.py @@ -1,294 +1,413 @@ # Copyright (C) 2019-2021 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from collections import defaultdict from datetime import datetime, timezone import re from typing import Any, Dict, Iterable, Iterator, List, Optional +from swh.indexer import codemeta from swh.model.identifiers import origin_identifier from swh.search.interface import ( SORT_BY_OPTIONS, MinimalOriginDict, OriginDict, PagedResult, ) _words_regexp = re.compile(r"\w+") def _dict_words_set(d): """Recursively extract set of words from dict content.""" values = set() def extract(obj, words): if isinstance(obj, dict): for k, v in obj.items(): extract(v, words) elif isinstance(obj, list): for item in obj: extract(item, words) else: words.update(_words_regexp.findall(str(obj).lower())) return words return extract(d, values) +def _nested_get(nested_dict, nested_keys): + """Extracts values from deeply nested dictionary nested_dict + using the nested_keys and returns a list of all of the values + discovered in the process. + + + >>> nested_dict = [ + ... {"name": [{"@value": {"first": "f1", "last": "l1"}}], "address": "XYZ"}, + ... {"name": [{"@value": {"first": "f2", "last": "l2"}}], "address": "ABC"}, + ... ] + >>> _nested_get(nested_dict, ["name", "@value", "last"]) + ['l1', 'l2'] + >>> _nested_get(nested_dict, ["address"]) + ['XYZ', 'ABC'] + + It doesn't allow fetching intermediate values and returns "" for such cases + >>> _nested_get(nested_dict, ["name", "@value"]) + ['', ''] + """ + + def _nested_get_recursive(nested_dict, nested_keys): + try: + curr_obj = nested_dict + type_curr_obj = type(curr_obj) + for i, key in enumerate(nested_keys): + if key in curr_obj: + curr_obj = curr_obj[key] + type_curr_obj = type(curr_obj) + else: + if type_curr_obj == list: + curr_obj = [ + _nested_get_recursive(obj, nested_keys[i:]) + for obj in curr_obj + ] + # If value isn't a list or string or integer + elif type_curr_obj != str and type_curr_obj != int: + return "" + + # If only one element is present in the list, take it out + # This ensures a flat array every time + if type_curr_obj == list and len(curr_obj) == 1: + curr_obj = curr_obj[0] + + return curr_obj + except Exception: + return [] + + res = _nested_get_recursive(nested_dict, nested_keys) + if type(res) != list: + return [res] + + return res + + def _get_sorting_key(origin, field): """Get value of the field from an origin for sorting origins. Here field should be a member of SORT_BY_OPTIONS. If "-" is present at the start of field then invert the value in a way that it reverses the sorting order. """ reversed = False if field[0] == "-": field = field[1:] reversed = True datetime_max = datetime.max.replace(tzinfo=timezone.utc) if field in ["nb_visits"]: # unlike other options, nb_visits is of type integer if reversed: return -origin.get(field, 0) else: return origin.get(field, 0) elif field in SORT_BY_OPTIONS: if reversed: return datetime_max - datetime.fromisoformat( origin.get(field, "0001-01-01T00:00:00Z").replace("Z", "+00:00") ) else: return datetime.fromisoformat( origin.get(field, "0001-01-01T00:00:00Z").replace("Z", "+00:00") ) class InMemorySearch: def __init__(self): pass def check(self): return True def deinitialize(self) -> None: if hasattr(self, "_origins"): del self._origins del self._origin_ids def initialize(self) -> None: self._origins: Dict[str, Dict[str, Any]] = defaultdict(dict) self._origin_ids: List[str] = [] def flush(self) -> None: pass _url_splitter = re.compile(r"\W") def origin_update(self, documents: Iterable[OriginDict]) -> None: for source_document in documents: document: Dict[str, Any] = dict(source_document) id_ = origin_identifier(document) if "url" in document: document["_url_tokens"] = set( self._url_splitter.split(source_document["url"]) ) if "visit_types" in document: document["visit_types"] = set(source_document["visit_types"]) if "visit_types" in self._origins[id_]: document["visit_types"].update(self._origins[id_]["visit_types"]) if "nb_visits" in document: document["nb_visits"] = max( document["nb_visits"], self._origins[id_].get("nb_visits", 0) ) if "last_visit_date" in document: document["last_visit_date"] = max( datetime.fromisoformat(document["last_visit_date"]), datetime.fromisoformat( self._origins[id_] .get("last_visit_date", "0001-01-01T00:00:00.000000Z",) .replace("Z", "+00:00") ), ).isoformat() if "snapshot_id" in document and "last_eventful_visit_date" in document: incoming_date = datetime.fromisoformat( document["last_eventful_visit_date"] ) current_date = datetime.fromisoformat( self._origins[id_] .get("last_eventful_visit_date", "0001-01-01T00:00:00Z",) .replace("Z", "+00:00") ) incoming_snapshot_id = document["snapshot_id"] current_snapshot_id = self._origins[id_].get("snapshot_id", "") if ( incoming_snapshot_id == current_snapshot_id or incoming_date < current_date ): # update not required so override the incoming_values document["snapshot_id"] = current_snapshot_id document["last_eventful_visit_date"] = current_date.isoformat() if "last_revision_date" in document: document["last_revision_date"] = max( datetime.fromisoformat(document["last_revision_date"]), datetime.fromisoformat( self._origins[id_] .get("last_revision_date", "0001-01-01T00:00:00Z",) .replace("Z", "+00:00") ), ).isoformat() if "last_release_date" in document: document["last_release_date"] = max( datetime.fromisoformat(document["last_release_date"]), datetime.fromisoformat( self._origins[id_] .get("last_release_date", "0001-01-01T00:00:00Z",) .replace("Z", "+00:00") ), ).isoformat() + if "intrinsic_metadata" in document: + document["intrinsic_metadata"] = codemeta.expand( + document["intrinsic_metadata"] + ) + + if len(document["intrinsic_metadata"]) != 1: + continue + + metadata = document["intrinsic_metadata"][0] + if "http://schema.org/license" in metadata: + metadata["http://schema.org/license"] = [ + {"@id": license["@id"].lower()} + for license in metadata["http://schema.org/license"] + ] + if "http://schema.org/programmingLanguage" in metadata: + metadata["http://schema.org/programmingLanguage"] = [ + {"@value": license["@value"].lower()} + for license in metadata["http://schema.org/programmingLanguage"] + ] + self._origins[id_].update(document) if id_ not in self._origin_ids: self._origin_ids.append(id_) def origin_search( self, *, url_pattern: Optional[str] = None, metadata_pattern: Optional[str] = None, with_visit: bool = False, visit_types: Optional[List[str]] = None, page_token: Optional[str] = None, min_nb_visits: int = 0, min_last_visit_date: str = "", min_last_eventful_visit_date: str = "", min_last_revision_date: str = "", min_last_release_date: str = "", + programming_languages: List[str] = [], + licenses: List[str] = [], sort_by: List[str] = [], limit: int = 50, ) -> PagedResult[MinimalOriginDict]: hits: Iterator[Dict[str, Any]] = ( self._origins[id_] for id_ in self._origin_ids if not self._origins[id_].get("blocklisted") ) if url_pattern: tokens = set(self._url_splitter.split(url_pattern)) def predicate(match): missing_tokens = tokens - match["_url_tokens"] if len(missing_tokens) == 0: return True elif len(missing_tokens) > 1: return False else: # There is one missing token, look up by prefix. (missing_token,) = missing_tokens return any( token.startswith(missing_token) for token in match["_url_tokens"] ) hits = filter(predicate, hits) if metadata_pattern: metadata_pattern_words = set( _words_regexp.findall(metadata_pattern.lower()) ) def predicate(match): if "intrinsic_metadata" not in match: return False return metadata_pattern_words.issubset( _dict_words_set(match["intrinsic_metadata"]) ) hits = filter(predicate, hits) if not url_pattern and not metadata_pattern: raise ValueError( "At least one of url_pattern and metadata_pattern must be provided." ) next_page_token: Optional[str] = None if with_visit: hits = filter(lambda o: o.get("has_visits"), hits) if min_nb_visits: hits = filter(lambda o: o.get("nb_visits", 0) >= min_nb_visits, hits) if min_last_visit_date: hits = filter( lambda o: datetime.fromisoformat( o.get("last_visit_date", "0001-01-01T00:00:00Z").replace( "Z", "+00:00" ) ) >= datetime.fromisoformat(min_last_visit_date), hits, ) if min_last_eventful_visit_date: hits = filter( lambda o: datetime.fromisoformat( o.get("last_eventful_visit_date", "0001-01-01T00:00:00Z").replace( "Z", "+00:00" ) ) >= datetime.fromisoformat(min_last_eventful_visit_date), hits, ) if min_last_revision_date: hits = filter( lambda o: datetime.fromisoformat( o.get("last_revision_date", "0001-01-01T00:00:00Z").replace( "Z", "+00:00" ) ) >= datetime.fromisoformat(min_last_revision_date), hits, ) if min_last_release_date: hits = filter( lambda o: datetime.fromisoformat( o.get("last_release_date", "0001-01-01T00:00:00Z").replace( "Z", "+00:00" ) ) >= datetime.fromisoformat(min_last_release_date), hits, ) + if licenses: + METADATA_LICENSES = [ + "intrinsic_metadata", + "http://schema.org/license", + "@id", + ] + licenses = [license_keyword.lower() for license_keyword in licenses] + hits = filter( + lambda o: any( + # If any of the queried licenses are found, include the origin + any( + # returns True if queried_license_keyword is found + # in any of the licenses of the origin + queried_license_keyword in origin_license + for origin_license in _nested_get(o, METADATA_LICENSES) + ) + for queried_license_keyword in licenses + ), + hits, + ) + if programming_languages: + METADATA_PROGRAMMING_LANGS = [ + "intrinsic_metadata", + "http://schema.org/programmingLanguage", + "@value", + ] + programming_languages = [ + lang_keyword.lower() for lang_keyword in programming_languages + ] + hits = filter( + lambda o: any( + # If any of the queried languages are found, include the origin + any( + # returns True if queried_lang_keyword is found + # in any of the langs of the origin + queried_lang_keyword in origin_lang + for origin_lang in _nested_get(o, METADATA_PROGRAMMING_LANGS) + ) + for queried_lang_keyword in programming_languages + ), + hits, + ) if visit_types is not None: visit_types_set = set(visit_types) hits = filter( lambda o: visit_types_set.intersection(o.get("visit_types", set())), hits, ) hits_list = sorted( hits, key=lambda o: tuple(_get_sorting_key(o, field) for field in sort_by), ) start_at_index = int(page_token) if page_token else 0 origins = [ {"url": hit["url"]} for hit in hits_list[start_at_index : start_at_index + limit] ] if len(origins) == limit: next_page_token = str(start_at_index + limit) assert len(origins) <= limit return PagedResult(results=origins, next_page_token=next_page_token,) diff --git a/swh/search/interface.py b/swh/search/interface.py index 9c1c66d..58e27c0 100644 --- a/swh/search/interface.py +++ b/swh/search/interface.py @@ -1,110 +1,116 @@ # Copyright (C) 2020-2021 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from typing import Iterable, List, Optional, TypeVar from typing_extensions import TypedDict from swh.core.api import remote_api_endpoint from swh.core.api.classes import PagedResult as CorePagedResult TResult = TypeVar("TResult") PagedResult = CorePagedResult[TResult, str] SORT_BY_OPTIONS = [ "nb_visits", "last_visit_date", "last_eventful_visit_date", "last_revision_date", "last_release_date", ] class MinimalOriginDict(TypedDict): """Mandatory keys of an :class:`OriginDict`""" url: str class OriginDict(MinimalOriginDict, total=False): """Argument passed to :meth:`SearchInterface.origin_update`.""" visit_types: List[str] has_visits: bool class SearchInterface: @remote_api_endpoint("check") def check(self): """Dedicated method to execute some specific check per implementation. """ ... @remote_api_endpoint("flush") def flush(self) -> None: """Blocks until all previous calls to _update() are completely applied. """ ... @remote_api_endpoint("origin/update") def origin_update(self, documents: Iterable[OriginDict]) -> None: """Persist documents to the search backend. """ ... @remote_api_endpoint("origin/search") def origin_search( self, *, url_pattern: Optional[str] = None, metadata_pattern: Optional[str] = None, with_visit: bool = False, visit_types: Optional[List[str]] = None, page_token: Optional[str] = None, min_nb_visits: int = 0, min_last_visit_date: str = "", min_last_eventful_visit_date: str = "", min_last_revision_date: str = "", min_last_release_date: str = "", + programming_languages: List[str] = [], + licenses: List[str] = [], sort_by: List[str] = [], limit: int = 50, ) -> PagedResult[MinimalOriginDict]: """Searches for origins matching the `url_pattern`. Args: url_pattern: Part of the URL to search for with_visit: Whether origins with no visit are to be filtered out visit_types: Only origins having any of the provided visit types (e.g. git, svn, pypi) will be returned page_token: Opaque value used for pagination min_nb_visits: Filter origins that have number of visits >= the provided value min_last_visit_date: Filter origins that have last_visit_date on or after the provided date(ISO format) min_last_eventful_visit_date: Filter origins that have last_eventful_visit_date (eventful = snapshot_id changed) on or after the provided date(ISO format) min_last_revision_date: Filter origins that have last_revision_date on or after the provided date(ISO format) min_last_release_date: Filter origins that have last_release_date on or after the provided date(ISO format) + licenses: Filter origins with licenses present in the given list + (based on instrinsic_metadata) + programming_languages: Filter origins with programming languages + present in the given list (based on instrinsic_metadata) sort_by: Sort results based on a list of fields mentioned in SORT_BY_OPTIONS (nb_visits,last_visit_date, last_eventful_visit_date, last_revision_date, last_release_date). Return results in descending order if "-" is present at the beginning otherwise in ascending order. limit: number of results to return Returns: PagedResult of origin dicts matching the search criteria. If next_page_token is None, there is no longer data to retrieve. """ ... diff --git a/swh/search/tests/test_in_memory.py b/swh/search/tests/test_in_memory.py index 6ee2c06..fbadd0d 100644 --- a/swh/search/tests/test_in_memory.py +++ b/swh/search/tests/test_in_memory.py @@ -1,25 +1,72 @@ # Copyright (C) 2019-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import unittest import pytest +from swh.indexer import codemeta from swh.search import get_search +from swh.search.in_memory import _nested_get from .test_search import CommonSearchTest class InmemorySearchTest(unittest.TestCase, CommonSearchTest): @pytest.fixture(autouse=True) def _instantiate_search(self): self.search = get_search("memory") def setUp(self): self.reset() def reset(self): self.search.deinitialize() self.search.initialize() + + +def test_nested_get_helper_function(): + + instrinsic_metadata = codemeta.expand( + { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "keywords": ["bar", "baz"], + "description": "foo bar 3", + "programmingLanguage": "cpp", + "license": "https://spdx.org/licenses/LGPL-2.0-only", + } + ) + expected_expansion = [ + { + "http://schema.org/description": [{"@value": "foo bar 3"}], + "http://schema.org/license": [ + {"@id": "https://spdx.org/licenses/LGPL-2.0-only"} + ], + "http://schema.org/keywords": [{"@value": "bar"}, {"@value": "baz"}], + "http://schema.org/programmingLanguage": [{"@value": "cpp"}], + } + ] + assert instrinsic_metadata == expected_expansion + assert _nested_get(instrinsic_metadata, ["http://schema.org/license", "@id"]) == [ + "https://spdx.org/licenses/LGPL-2.0-only" + ] + + new_field = [ + {"name": [{"@value": {"first": "f1", "last": "l1"}}], "address": "XYZ"}, + {"name": [{"@value": {"first": "f2", "last": "l2"}}], "address": "ABC"}, + {"name": [{"@value": {"first": "f3"}}], "address": {}}, + {"name": [{"@value": {"first": "f4"}}], "address": []}, + ] + assert _nested_get(new_field, ["name", "@value", "last"]) == ["l1", "l2", "", ""] + assert _nested_get(new_field, ["name", "@value", "first"]) == [ + "f1", + "f2", + "f3", + "f4", + ] + + assert _nested_get(new_field, ["address"]) == ["XYZ", "ABC", {}, []] + # shouldn't allow fetching intermediate values + assert _nested_get(new_field, ["name", "@value"]) == ["", "", "", ""] diff --git a/swh/search/tests/test_journal_client.py b/swh/search/tests/test_journal_client.py index 5bbf804..fd353eb 100644 --- a/swh/search/tests/test_journal_client.py +++ b/swh/search/tests/test_journal_client.py @@ -1,272 +1,276 @@ # Copyright (C) 2019-2021 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from datetime import datetime, timezone import functools from unittest.mock import MagicMock from swh.model.model import ( ObjectType, Person, Release, Revision, RevisionType, Snapshot, SnapshotBranch, TargetType, Timestamp, TimestampWithTimezone, hash_to_bytes, ) from swh.search.journal_client import process_journal_objects from swh.storage import get_storage DATES = [ TimestampWithTimezone( timestamp=Timestamp(seconds=1234567891, microseconds=0,), offset=120, negative_utc=False, ), TimestampWithTimezone( timestamp=Timestamp(seconds=1234567892, microseconds=0,), offset=120, negative_utc=False, ), TimestampWithTimezone( timestamp=Timestamp(seconds=1234567893, microseconds=0,), offset=120, negative_utc=False, ), TimestampWithTimezone( timestamp=Timestamp(seconds=1234567894, microseconds=0,), offset=120, negative_utc=False, ), ] COMMITTERS = [ Person(fullname=b"foo", name=b"foo", email=b""), Person(fullname=b"bar", name=b"bar", email=b""), ] REVISIONS = [ Revision( message=b"revision_1_message", date=DATES[0], committer=COMMITTERS[0], author=COMMITTERS[0], committer_date=DATES[0], type=RevisionType.GIT, directory=b"\x01" * 20, synthetic=False, metadata=None, parents=( hash_to_bytes("9b918dd063cec85c2bc63cc7f167e29f5894dcbc"), hash_to_bytes("757f38bdcd8473aaa12df55357f5e2f1a318e672"), ), ), Revision( message=b"revision_2_message", date=DATES[1], committer=COMMITTERS[1], author=COMMITTERS[1], committer_date=DATES[1], type=RevisionType.MERCURIAL, directory=b"\x02" * 20, synthetic=False, metadata=None, parents=(), extra_headers=((b"foo", b"bar"),), ), Revision( message=b"revision_3_message", date=DATES[2], committer=COMMITTERS[0], author=COMMITTERS[0], committer_date=DATES[2], type=RevisionType.GIT, directory=b"\x03" * 20, synthetic=False, metadata=None, parents=(), ), ] RELEASES = [ Release( name=b"v0.0.1", date=DATES[1], author=COMMITTERS[0], target_type=ObjectType.REVISION, target=b"\x04" * 20, message=b"foo", synthetic=False, ), Release( name=b"v0.0.2", date=DATES[2], author=COMMITTERS[1], target_type=ObjectType.REVISION, target=b"\x05" * 20, message=b"bar", synthetic=False, ), Release( name=b"v0.0.3", date=DATES[3], author=COMMITTERS[1], target_type=ObjectType.REVISION, target=b"\x05" * 20, message=b"foobar", synthetic=False, ), ] SNAPSHOTS = [ Snapshot( branches={ b"target/revision1": SnapshotBranch( target_type=TargetType.REVISION, target=REVISIONS[0].id, ), b"target/revision2": SnapshotBranch( target_type=TargetType.REVISION, target=REVISIONS[1].id, ), b"target/revision3": SnapshotBranch( target_type=TargetType.REVISION, target=REVISIONS[2].id, ), b"target/release1": SnapshotBranch( target_type=TargetType.RELEASE, target=RELEASES[0].id ), b"target/release2": SnapshotBranch( target_type=TargetType.RELEASE, target=RELEASES[1].id ), b"target/release3": SnapshotBranch( target_type=TargetType.RELEASE, target=RELEASES[2].id ), b"target/alias": SnapshotBranch( target_type=TargetType.ALIAS, target=b"target/revision1" ), }, ), ] def test_journal_client_origin_from_journal(): search_mock = MagicMock() worker_fn = functools.partial(process_journal_objects, search=search_mock,) worker_fn({"origin": [{"url": "http://foobar.baz"},]}) search_mock.origin_update.assert_called_once_with( [{"url": "http://foobar.baz"},] ) search_mock.reset_mock() worker_fn({"origin": [{"url": "http://foobar.baz"}, {"url": "http://barbaz.qux"},]}) search_mock.origin_update.assert_called_once_with( [{"url": "http://foobar.baz"}, {"url": "http://barbaz.qux"},] ) def test_journal_client_origin_visit_from_journal(): search_mock = MagicMock() worker_fn = functools.partial(process_journal_objects, search=search_mock,) worker_fn({"origin_visit": [{"origin": "http://foobar.baz", "type": "git"},]}) search_mock.origin_update.assert_called_once_with( [{"url": "http://foobar.baz", "visit_types": ["git"]},] ) def test_journal_client_origin_visit_status_from_journal(): search_mock = MagicMock() storage = get_storage("memory") storage.revision_add(REVISIONS) storage.release_add(RELEASES) storage.snapshot_add(SNAPSHOTS) worker_fn = functools.partial( process_journal_objects, search=search_mock, storage=storage ) current_datetime = datetime.now(tz=timezone.utc) worker_fn( { "origin_visit_status": [ { "origin": "http://foobar.baz", "status": "full", "visit": 5, "date": current_datetime, "snapshot": SNAPSHOTS[0].id, } # full visits ok ] } ) search_mock.origin_update.assert_called_once_with( [ { "url": "http://foobar.baz", "has_visits": True, "nb_visits": 5, "snapshot_id": SNAPSHOTS[0].id, "last_visit_date": current_datetime.isoformat(), "last_eventful_visit_date": current_datetime.isoformat(), "last_revision_date": "2009-02-14T01:31:33+02:00", "last_release_date": "2009-02-14T01:31:34+02:00", }, ] ) search_mock.reset_mock() # non-full visits are filtered out worker_fn( { "origin_visit_status": [ { "origin": "http://foobar.baz", "status": "partial", "visit": 5, "date": current_datetime, } ] } ) search_mock.origin_update.assert_not_called() def test_journal_client_origin_metadata_from_journal(): search_mock = MagicMock() worker_fn = functools.partial(process_journal_objects, search=search_mock,) worker_fn( { "origin_intrinsic_metadata": [ { "id": "http://foobar.baz", "metadata": { "@context": "https://doi.org/10.5063/schema/codemeta-2.0", "description": "foo bar", + "programmingLanguage": "python", + "license": "MIT", }, }, ] } ) search_mock.origin_update.assert_called_once_with( [ { "url": "http://foobar.baz", "intrinsic_metadata": { "@context": "https://doi.org/10.5063/schema/codemeta-2.0", "description": "foo bar", + "programmingLanguage": "python", + "license": "MIT", }, }, ] ) diff --git a/swh/search/tests/test_search.py b/swh/search/tests/test_search.py index 1bd3b4f..6a15ed7 100644 --- a/swh/search/tests/test_search.py +++ b/swh/search/tests/test_search.py @@ -1,895 +1,1018 @@ # Copyright (C) 2019-2021 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from datetime import datetime, timedelta, timezone from itertools import permutations from hypothesis import given, settings, strategies import pytest from swh.core.api.classes import stream_results class CommonSearchTest: def test_origin_url_unique_word_prefix(self): origin_foobar_baz = {"url": "http://foobar.baz"} origin_barbaz_qux = {"url": "http://barbaz.qux"} origin_qux_quux = {"url": "http://qux.quux"} origins = [origin_foobar_baz, origin_barbaz_qux, origin_qux_quux] self.search.origin_update(origins) self.search.flush() actual_page = self.search.origin_search(url_pattern="foobar") assert actual_page.next_page_token is None assert actual_page.results == [origin_foobar_baz] actual_page = self.search.origin_search(url_pattern="barb") assert actual_page.next_page_token is None assert actual_page.results == [origin_barbaz_qux] # 'bar' is part of 'foobar', but is not the beginning of it actual_page = self.search.origin_search(url_pattern="bar") assert actual_page.next_page_token is None assert actual_page.results == [origin_barbaz_qux] actual_page = self.search.origin_search(url_pattern="barbaz") assert actual_page.next_page_token is None assert actual_page.results == [origin_barbaz_qux] def test_origin_url_unique_word_prefix_multiple_results(self): origin_foobar_baz = {"url": "http://foobar.baz"} origin_barbaz_qux = {"url": "http://barbaz.qux"} origin_qux_quux = {"url": "http://qux.quux"} self.search.origin_update( [origin_foobar_baz, origin_barbaz_qux, origin_qux_quux] ) self.search.flush() actual_page = self.search.origin_search(url_pattern="qu") assert actual_page.next_page_token is None results = [r["url"] for r in actual_page.results] expected_results = [o["url"] for o in [origin_qux_quux, origin_barbaz_qux]] assert sorted(results) == sorted(expected_results) actual_page = self.search.origin_search(url_pattern="qux") assert actual_page.next_page_token is None results = [r["url"] for r in actual_page.results] expected_results = [o["url"] for o in [origin_qux_quux, origin_barbaz_qux]] assert sorted(results) == sorted(expected_results) def test_origin_url_all_terms(self): origin_foo_bar_baz = {"url": "http://foo.bar/baz"} origin_foo_bar_foo_bar = {"url": "http://foo.bar/foo.bar"} origins = [origin_foo_bar_baz, origin_foo_bar_foo_bar] self.search.origin_update(origins) self.search.flush() # Only results containing all terms should be returned. actual_page = self.search.origin_search(url_pattern="foo bar baz") assert actual_page.next_page_token is None assert actual_page.results == [origin_foo_bar_baz] def test_origin_with_visit(self): origin_foobar_baz = {"url": "http://foobar/baz"} self.search.origin_update( [{**o, "has_visits": True} for o in [origin_foobar_baz]] ) self.search.flush() actual_page = self.search.origin_search(url_pattern="foobar", with_visit=True) assert actual_page.next_page_token is None assert actual_page.results == [origin_foobar_baz] def test_origin_with_visit_added(self): origin_foobar_baz = {"url": "http://foobar.baz"} self.search.origin_update([origin_foobar_baz]) self.search.flush() actual_page = self.search.origin_search(url_pattern="foobar", with_visit=True) assert actual_page.next_page_token is None assert actual_page.results == [] self.search.origin_update( [{**o, "has_visits": True} for o in [origin_foobar_baz]] ) self.search.flush() actual_page = self.search.origin_search(url_pattern="foobar", with_visit=True) assert actual_page.next_page_token is None assert actual_page.results == [origin_foobar_baz] def test_origin_no_visit_types_search(self): origins = [{"url": "http://foobar.baz"}] self.search.origin_update(origins) self.search.flush() actual_page = self.search.origin_search(url_pattern="http", visit_types=["git"]) assert actual_page.next_page_token is None results = [r["url"] for r in actual_page.results] expected_results = [] assert sorted(results) == sorted(expected_results) actual_page = self.search.origin_search(url_pattern="http", visit_types=None) assert actual_page.next_page_token is None results = [r["url"] for r in actual_page.results] expected_results = [origin["url"] for origin in origins] assert sorted(results) == sorted(expected_results) def test_origin_visit_types_search(self): origins = [ {"url": "http://foobar.baz", "visit_types": ["git"]}, {"url": "http://barbaz.qux", "visit_types": ["svn"]}, {"url": "http://qux.quux", "visit_types": ["hg"]}, ] self.search.origin_update(origins) self.search.flush() for origin in origins: actual_page = self.search.origin_search( url_pattern="http", visit_types=origin["visit_types"] ) assert actual_page.next_page_token is None results = [r["url"] for r in actual_page.results] expected_results = [origin["url"]] assert sorted(results) == sorted(expected_results) actual_page = self.search.origin_search(url_pattern="http", visit_types=None) assert actual_page.next_page_token is None results = [r["url"] for r in actual_page.results] expected_results = [origin["url"] for origin in origins] assert sorted(results) == sorted(expected_results) def test_origin_visit_types_update_search(self): origin_url = "http://foobar.baz" self.search.origin_update([{"url": origin_url}]) self.search.flush() def _add_visit_type(visit_type): self.search.origin_update( [{"url": origin_url, "visit_types": [visit_type]}] ) self.search.flush() def _check_visit_types(visit_types_list): for visit_types in visit_types_list: actual_page = self.search.origin_search( url_pattern="http", visit_types=visit_types ) assert actual_page.next_page_token is None results = [r["url"] for r in actual_page.results] expected_results = [origin_url] assert sorted(results) == sorted(expected_results) _add_visit_type("git") _check_visit_types([["git"], ["git", "hg"]]) _add_visit_type("svn") _check_visit_types([["git"], ["svn"], ["svn", "git"], ["git", "hg", "svn"]]) _add_visit_type("hg") _check_visit_types( [ ["git"], ["svn"], ["hg"], ["svn", "git"], ["hg", "git"], ["hg", "svn"], ["git", "hg", "svn"], ] ) def test_origin_nb_visits_update_search(self): origin_url = "http://foobar.baz" self.search.origin_update([{"url": origin_url}]) self.search.flush() def _update_nb_visits(nb_visits): self.search.origin_update([{"url": origin_url, "nb_visits": nb_visits}]) self.search.flush() def _check_min_nb_visits(min_nb_visits): actual_page = self.search.origin_search( url_pattern=origin_url, min_nb_visits=min_nb_visits, ) assert actual_page.next_page_token is None results = [r["url"] for r in actual_page.results] expected_results = [origin_url] assert sorted(results) == sorted(expected_results) _update_nb_visits(2) _check_min_nb_visits(2) # Works for = 2 _check_min_nb_visits(1) # Works for < 2 with pytest.raises(AssertionError): _check_min_nb_visits( 5 ) # No results for nb_visits >= 5 (should throw error) _update_nb_visits(5) _check_min_nb_visits(5) # Works for = 5 _check_min_nb_visits(3) # Works for < 5 def test_origin_last_visit_date_update_search(self): origin_url = "http://foobar.baz" self.search.origin_update([{"url": origin_url}]) self.search.flush() def _update_last_visit_date(last_visit_date): self.search.origin_update( [{"url": origin_url, "last_visit_date": last_visit_date}] ) self.search.flush() def _check_min_last_visit_date(min_last_visit_date): actual_page = self.search.origin_search( url_pattern=origin_url, min_last_visit_date=min_last_visit_date, ) assert actual_page.next_page_token is None results = [r["url"] for r in actual_page.results] expected_results = [origin_url] assert sorted(results) == sorted(expected_results) now = datetime.now(tz=timezone.utc).isoformat() now_minus_5_hours = ( datetime.now(tz=timezone.utc) - timedelta(hours=5) ).isoformat() now_plus_5_hours = ( datetime.now(tz=timezone.utc) + timedelta(hours=5) ).isoformat() _update_last_visit_date(now) _check_min_last_visit_date(now) # Works for = _check_min_last_visit_date(now_minus_5_hours) # Works for < with pytest.raises(AssertionError): _check_min_last_visit_date(now_plus_5_hours) # Fails for > _update_last_visit_date(now_plus_5_hours) _check_min_last_visit_date(now_plus_5_hours) # Works for = _check_min_last_visit_date(now) # Works for < def test_journal_client_origin_visit_status_permutation(self): NOW = datetime.now(tz=timezone.utc).isoformat() NOW_MINUS_5_HOURS = ( datetime.now(tz=timezone.utc) - timedelta(hours=5) ).isoformat() NOW_PLUS_5_HOURS = ( datetime.now(tz=timezone.utc) + timedelta(hours=5) ).isoformat() VISIT_STATUSES = [ { "url": "http://foobar.baz", "snapshot_id": "SNAPSHOT_1", "last_eventful_visit_date": NOW, }, { "url": "http://foobar.baz", "snapshot_id": "SNAPSHOT_1", "last_eventful_visit_date": NOW_MINUS_5_HOURS, }, { "url": "http://foobar.baz", "snapshot_id": "SNAPSHOT_2", "last_eventful_visit_date": NOW_PLUS_5_HOURS, }, ] for visit_statuses in permutations(VISIT_STATUSES, len(VISIT_STATUSES)): self.search.origin_update(visit_statuses) self.search.flush() origin_url = "http://foobar.baz" actual_page = self.search.origin_search( url_pattern=origin_url, min_last_eventful_visit_date=NOW_PLUS_5_HOURS, ) assert actual_page.next_page_token is None results = [r["url"] for r in actual_page.results] expected_results = [origin_url] assert sorted(results) == sorted(expected_results) self.reset() def test_origin_last_eventful_visit_date_update_search(self): origin_url = "http://foobar.baz" self.search.origin_update([{"url": origin_url}]) self.search.flush() def _update_last_eventful_visit_date(snapshot_id, last_eventful_visit_date): self.search.origin_update( [ { "url": origin_url, "snapshot_id": snapshot_id, "last_eventful_visit_date": last_eventful_visit_date, } ] ) self.search.flush() def _check_min_last_eventful_visit_date(min_last_eventful_visit_date): actual_page = self.search.origin_search( url_pattern=origin_url, min_last_eventful_visit_date=min_last_eventful_visit_date, ) assert actual_page.next_page_token is None results = [r["url"] for r in actual_page.results] expected_results = [origin_url] assert sorted(results) == sorted(expected_results) now = datetime.now(tz=timezone.utc).isoformat() now_minus_5_hours = ( datetime.now(tz=timezone.utc) - timedelta(hours=5) ).isoformat() now_plus_5_hours = ( datetime.now(tz=timezone.utc) + timedelta(hours=5) ).isoformat() snapshot_1 = "SNAPSHOT_1" snapshot_2 = "SNAPSHOT_2" _update_last_eventful_visit_date(snapshot_1, now) _check_min_last_eventful_visit_date(now) # Works for = _check_min_last_eventful_visit_date(now_minus_5_hours) # Works for < with pytest.raises(AssertionError): _check_min_last_eventful_visit_date(now_plus_5_hours) # Fails for > _update_last_eventful_visit_date( snapshot_1, now_plus_5_hours ) # Revisit(not eventful) same origin _check_min_last_eventful_visit_date( now ) # Should remain the same because recent visit wasn't eventful with pytest.raises(AssertionError): _check_min_last_eventful_visit_date(now_plus_5_hours) _update_last_eventful_visit_date( snapshot_2, now_plus_5_hours ) # Revisit(eventful) same origin _check_min_last_eventful_visit_date(now_plus_5_hours) # Works for = _check_min_last_eventful_visit_date(now) # Works for < def _test_origin_last_revision_release_date_update_search(self, date_type): origin_url = "http://foobar.baz" self.search.origin_update([{"url": origin_url}]) self.search.flush() def _update_last_revision_release_date(date): self.search.origin_update([{"url": origin_url, date_type: date,}]) self.search.flush() def _check_min_last_revision_release_date(date): actual_page = self.search.origin_search( url_pattern=origin_url, **{f"min_{date_type}": date}, ) assert actual_page.next_page_token is None results = [r["url"] for r in actual_page.results] expected_results = [origin_url] assert sorted(results) == sorted(expected_results) now = datetime.now(tz=timezone.utc).isoformat() now_minus_5_hours = ( datetime.now(tz=timezone.utc) - timedelta(hours=5) ).isoformat() now_plus_5_hours = ( datetime.now(tz=timezone.utc) + timedelta(hours=5) ).isoformat() _update_last_revision_release_date(now) _check_min_last_revision_release_date(now) _check_min_last_revision_release_date(now_minus_5_hours) with pytest.raises(AssertionError): _check_min_last_revision_release_date(now_plus_5_hours) _update_last_revision_release_date(now_plus_5_hours) _check_min_last_revision_release_date(now_plus_5_hours) _check_min_last_revision_release_date(now) def test_origin_last_revision_date_update_search(self): self._test_origin_last_revision_release_date_update_search( date_type="last_revision_date" ) def test_origin_last_release_date_update_search(self): self._test_origin_last_revision_release_date_update_search( date_type="last_revision_date" ) def test_origin_sort_by_search(self): now = datetime.now(tz=timezone.utc).isoformat() now_minus_5_hours = ( datetime.now(tz=timezone.utc) - timedelta(hours=5) ).isoformat() now_plus_5_hours = ( datetime.now(tz=timezone.utc) + timedelta(hours=5) ).isoformat() ORIGINS = [ { "url": "http://foobar.1.com", "nb_visits": 1, "last_visit_date": now_minus_5_hours, }, {"url": "http://foobar.2.com", "nb_visits": 2, "last_visit_date": now,}, { "url": "http://foobar.3.com", "nb_visits": 3, "last_visit_date": now_plus_5_hours, }, ] self.search.origin_update(ORIGINS) self.search.flush() def _check_results(sort_by, origins): page = self.search.origin_search(url_pattern="foobar", sort_by=sort_by) results = [r["url"] for r in page.results] assert results == [origin["url"] for origin in origins] _check_results(["nb_visits"], ORIGINS) _check_results(["-nb_visits"], ORIGINS[::-1]) _check_results(["last_visit_date"], ORIGINS) _check_results(["-last_visit_date"], ORIGINS[::-1]) _check_results(["nb_visits", "-last_visit_date"], ORIGINS) _check_results(["-last_visit_date", "nb_visits"], ORIGINS[::-1]) + def test_origin_instrinsic_metadata_license_search(self): + ORIGINS = [ + { + "url": "http://foobar.1.com", + "intrinsic_metadata": { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "description": "foo bar", + "license": "https://spdx.org/licenses/MIT", + }, + }, + { + "url": "http://foobar.2.com", + "intrinsic_metadata": { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "description": "foo bar", + "license": "BSD-3-Clause", + }, + }, + ] + self.search.origin_update(ORIGINS) + self.search.flush() + + def _check_results(licenses, origin_indices): + page = self.search.origin_search(url_pattern="foobar", licenses=licenses) + results = [r["url"] for r in page.results] + assert sorted(results) == sorted( + [ORIGINS[i]["url"] for i in origin_indices] + ) + + _check_results(["MIT"], [0]) + _check_results(["bsd"], [1]) + _check_results(["mit", "3-Clause"], [0, 1]) + + def test_origin_instrinsic_metadata_programming_language_search(self): + ORIGINS = [ + { + "url": "http://foobar.1.com", + "intrinsic_metadata": { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "description": "foo bar", + "programmingLanguage": "python", + }, + }, + { + "url": "http://foobar.2.com", + "intrinsic_metadata": { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "description": "foo bar", + "programmingLanguage": "javascript", + }, + }, + ] + self.search.origin_update(ORIGINS) + self.search.flush() + + def _check_results(programming_languages, origin_indices): + page = self.search.origin_search( + url_pattern="foobar", programming_languages=programming_languages + ) + results = [r["url"] for r in page.results] + assert sorted(results) == sorted( + [ORIGINS[i]["url"] for i in origin_indices] + ) + + _check_results(["python"], [0]) + _check_results(["javascript"], [1]) + _check_results(["python", "javascript"], [0, 1]) + + def test_origin_instrinsic_metadata_multiple_field_search(self): + ORIGINS = [ + { + "url": "http://foobar.1.com", + "intrinsic_metadata": { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "description": "foo bar 1", + "programmingLanguage": "python", + "license": "https://spdx.org/licenses/MIT", + }, + }, + { + "url": "http://foobar.2.com", + "intrinsic_metadata": { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "description": "foo bar 2", + "programmingLanguage": ["javascript", "html", "css"], + "license": [ + "https://spdx.org/licenses/CC-BY-1.0", + "https://spdx.org/licenses/Apache-1.0", + ], + }, + }, + { + "url": "http://foobar.3.com", + "intrinsic_metadata": { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "description": "foo bar 3", + "programmingLanguage": ["Cpp", "c"], + "license": "https://spdx.org/licenses/LGPL-2.0-only", + }, + }, + ] + self.search.origin_update(ORIGINS) + self.search.flush() + + def _check_result(programming_languages, licenses, origin_indices): + page = self.search.origin_search( + url_pattern="foobar", + programming_languages=programming_languages, + licenses=licenses, + ) + results = [r["url"] for r in page.results] + assert sorted(results) == sorted( + [ORIGINS[i]["url"] for i in origin_indices] + ) + + _check_result(["javascript"], ["CC"], [1]) + _check_result(["css"], ["CC"], [1]) + _check_result(["css"], ["CC", "apache"], [1]) + + _check_result(["python", "javascript"], ["MIT"], [0]) + + _check_result(["c", "python"], ["LGPL", "mit"], [2, 0]) + def test_origin_update_with_no_visit_types(self): """ Update an origin with visit types first then with no visit types, check origin can still be searched with visit types afterwards. """ origin_url = "http://foobar.baz" self.search.origin_update([{"url": origin_url, "visit_types": ["git"]}]) self.search.flush() self.search.origin_update([{"url": origin_url}]) self.search.flush() actual_page = self.search.origin_search(url_pattern="http", visit_types=["git"]) assert actual_page.next_page_token is None results = [r["url"] for r in actual_page.results] expected_results = [origin_url] assert results == expected_results def test_origin_intrinsic_metadata_description(self): origin1_nothin = {"url": "http://origin1"} origin2_foobar = {"url": "http://origin2"} origin3_barbaz = {"url": "http://origin3"} self.search.origin_update( [ {**origin1_nothin, "intrinsic_metadata": {},}, { **origin2_foobar, "intrinsic_metadata": { "@context": "https://doi.org/10.5063/schema/codemeta-2.0", "description": "foo bar", }, }, { **origin3_barbaz, "intrinsic_metadata": { "@context": "https://doi.org/10.5063/schema/codemeta-2.0", "description": "bar baz", }, }, ] ) self.search.flush() actual_page = self.search.origin_search(metadata_pattern="foo") assert actual_page.next_page_token is None assert actual_page.results == [origin2_foobar] actual_page = self.search.origin_search(metadata_pattern="foo bar") assert actual_page.next_page_token is None assert actual_page.results == [origin2_foobar] actual_page = self.search.origin_search(metadata_pattern="bar baz") assert actual_page.next_page_token is None assert actual_page.results == [origin3_barbaz] def test_origin_intrinsic_metadata_all_terms(self): origin1_foobarfoobar = {"url": "http://origin1"} origin3_foobarbaz = {"url": "http://origin2"} self.search.origin_update( [ { **origin1_foobarfoobar, "intrinsic_metadata": { "@context": "https://doi.org/10.5063/schema/codemeta-2.0", "description": "foo bar foo bar", }, }, { **origin3_foobarbaz, "intrinsic_metadata": { "@context": "https://doi.org/10.5063/schema/codemeta-2.0", "description": "foo bar baz", }, }, ] ) self.search.flush() actual_page = self.search.origin_search(metadata_pattern="foo bar baz") assert actual_page.next_page_token is None assert actual_page.results == [origin3_foobarbaz] def test_origin_intrinsic_metadata_long_description(self): """Checks ElasticSearch does not try to store large values untokenize, which would be inefficient and crash it with: Document contains at least one immense term in field="intrinsic_metadata.http://schema.org/description.@value" (whose UTF8 encoding is longer than the max length 32766), all of which were skipped. """ # noqa origin1 = {"url": "http://origin1"} self.search.origin_update( [ { **origin1, "intrinsic_metadata": { "@context": "https://doi.org/10.5063/schema/codemeta-2.0", "description": " ".join(f"foo{i}" for i in range(100000)), }, }, ] ) self.search.flush() actual_page = self.search.origin_search(metadata_pattern="foo42") assert actual_page.next_page_token is None assert actual_page.results == [origin1] def test_origin_intrinsic_metadata_matches_cross_fields(self): """Checks the backend finds results even if the two words in the query are each in a different field.""" origin1 = {"url": "http://origin1"} self.search.origin_update( [ { **origin1, "intrinsic_metadata": { "@context": "https://doi.org/10.5063/schema/codemeta-2.0", "description": "foo bar", "author": "John Doe", }, }, ] ) self.search.flush() actual_page = self.search.origin_search(metadata_pattern="foo John") assert actual_page.next_page_token is None assert actual_page.results == [origin1] def test_origin_intrinsic_metadata_nested(self): origin1_nothin = {"url": "http://origin1"} origin2_foobar = {"url": "http://origin2"} origin3_barbaz = {"url": "http://origin3"} self.search.origin_update( [ {**origin1_nothin, "intrinsic_metadata": {},}, { **origin2_foobar, "intrinsic_metadata": { "@context": "https://doi.org/10.5063/schema/codemeta-2.0", "keywords": ["foo", "bar"], }, }, { **origin3_barbaz, "intrinsic_metadata": { "@context": "https://doi.org/10.5063/schema/codemeta-2.0", "keywords": ["bar", "baz"], }, }, ] ) self.search.flush() actual_page = self.search.origin_search(metadata_pattern="foo") assert actual_page.next_page_token is None assert actual_page.results == [origin2_foobar] actual_page = self.search.origin_search(metadata_pattern="foo bar") assert actual_page.next_page_token is None assert actual_page.results == [origin2_foobar] actual_page = self.search.origin_search(metadata_pattern="bar baz") assert actual_page.next_page_token is None assert actual_page.results == [origin3_barbaz] def test_origin_intrinsic_metadata_inconsistent_type(self): """Checks the same field can have a concrete value, an object, or an array in different documents.""" origin1_foobar = {"url": "http://origin1"} origin2_barbaz = {"url": "http://origin2"} origin3_bazqux = {"url": "http://origin3"} self.search.origin_update( [ { **origin1_foobar, "intrinsic_metadata": { "@context": "https://doi.org/10.5063/schema/codemeta-2.0", "author": {"familyName": "Foo", "givenName": "Bar",}, }, }, ] ) self.search.flush() self.search.origin_update( [ { **origin2_barbaz, "intrinsic_metadata": { "@context": "https://doi.org/10.5063/schema/codemeta-2.0", "author": "Bar Baz", }, }, { **origin3_bazqux, "intrinsic_metadata": { "@context": "https://doi.org/10.5063/schema/codemeta-2.0", "author": ["Baz", "Qux"], }, }, ] ) self.search.flush() actual_page = self.search.origin_search(metadata_pattern="bar") assert actual_page.next_page_token is None results = [r["url"] for r in actual_page.results] expected_results = [o["url"] for o in [origin2_barbaz, origin1_foobar]] assert sorted(results) == sorted(expected_results) actual_page = self.search.origin_search(metadata_pattern="baz") assert actual_page.next_page_token is None assert actual_page.results == [origin2_barbaz, origin3_bazqux] actual_page = self.search.origin_search(metadata_pattern="foo") assert actual_page.next_page_token is None assert actual_page.results == [origin1_foobar] actual_page = self.search.origin_search(metadata_pattern="bar baz") assert actual_page.next_page_token is None assert actual_page.results == [origin2_barbaz] actual_page = self.search.origin_search(metadata_pattern="qux") assert actual_page.next_page_token is None assert actual_page.results == [origin3_bazqux] actual_page = self.search.origin_search(metadata_pattern="baz qux") assert actual_page.next_page_token is None assert actual_page.results == [origin3_bazqux] actual_page = self.search.origin_search(metadata_pattern="foo bar") assert actual_page.next_page_token is None assert actual_page.results == [origin1_foobar] def test_origin_intrinsic_metadata_string_mapping(self): """Checks inserting a date-like in a field does not update the mapping to require every document uses a date in that field; or that search queries use a date either. Likewise for numeric and boolean fields.""" origin1 = {"url": "http://origin1"} origin2 = {"url": "http://origin2"} self.search.origin_update( [ { **origin1, "intrinsic_metadata": { "@context": "https://doi.org/10.5063/schema/codemeta-2.0", "dateCreated": "2021-02-18T10:16:52", "version": "1.0", "isAccessibleForFree": True, }, } ] ) self.search.flush() self.search.origin_update( [ { **origin2, "intrinsic_metadata": { "@context": "https://doi.org/10.5063/schema/codemeta-2.0", "dateCreated": "a long time ago", "address": "in a galaxy far, far away", "version": "a new hope", "isAccessibleForFree": "it depends", }, }, ] ) self.search.flush() actual_page = self.search.origin_search(metadata_pattern="2021") assert actual_page.next_page_token is None assert actual_page.results == [origin1] actual_page = self.search.origin_search(metadata_pattern="long time ago") assert actual_page.next_page_token is None assert actual_page.results == [origin2] actual_page = self.search.origin_search(metadata_pattern="true") assert actual_page.next_page_token is None assert actual_page.results == [origin1] actual_page = self.search.origin_search(metadata_pattern="it depends") assert actual_page.next_page_token is None assert actual_page.results == [origin2] def test_origin_intrinsic_metadata_update(self): origin = {"url": "http://origin1"} origin_data = { **origin, "intrinsic_metadata": { "@context": "https://doi.org/10.5063/schema/codemeta-2.0", "author": "John Doe", }, } self.search.origin_update([origin_data]) self.search.flush() actual_page = self.search.origin_search(metadata_pattern="John") assert actual_page.next_page_token is None assert actual_page.results == [origin] origin_data["intrinsic_metadata"]["author"] = "Jane Doe" self.search.origin_update([origin_data]) self.search.flush() actual_page = self.search.origin_search(metadata_pattern="Jane") assert actual_page.next_page_token is None assert actual_page.results == [origin] # TODO: add more tests with more codemeta terms # TODO: add more tests with edge cases @settings(deadline=None) @given(strategies.integers(min_value=1, max_value=4)) def test_origin_url_paging(self, limit): # TODO: no hypothesis origin1_foo = {"url": "http://origin1/foo"} origin2_foobar = {"url": "http://origin2/foo/bar"} origin3_foobarbaz = {"url": "http://origin3/foo/bar/baz"} self.reset() self.search.origin_update([origin1_foo, origin2_foobar, origin3_foobarbaz]) self.search.flush() results = stream_results( self.search.origin_search, url_pattern="foo bar baz", limit=limit ) results = [res["url"] for res in results] expected_results = [o["url"] for o in [origin3_foobarbaz]] assert sorted(results[0 : len(expected_results)]) == sorted(expected_results) results = stream_results( self.search.origin_search, url_pattern="foo bar", limit=limit ) results = [res["url"] for res in results] expected_results = [o["url"] for o in [origin2_foobar, origin3_foobarbaz]] assert sorted(results[0 : len(expected_results)]) == sorted(expected_results) results = stream_results( self.search.origin_search, url_pattern="foo", limit=limit ) results = [res["url"] for res in results] expected_results = [ o["url"] for o in [origin1_foo, origin2_foobar, origin3_foobarbaz] ] assert sorted(results[0 : len(expected_results)]) == sorted(expected_results) @settings(deadline=None) @given(strategies.integers(min_value=1, max_value=4)) def test_origin_intrinsic_metadata_paging(self, limit): # TODO: no hypothesis origin1_foo = {"url": "http://origin1"} origin2_foobar = {"url": "http://origin2"} origin3_foobarbaz = {"url": "http://origin3"} self.reset() self.search.origin_update( [ { **origin1_foo, "intrinsic_metadata": { "@context": "https://doi.org/10.5063/schema/codemeta-2.0", "keywords": ["foo"], }, }, { **origin2_foobar, "intrinsic_metadata": { "@context": "https://doi.org/10.5063/schema/codemeta-2.0", "keywords": ["foo", "bar"], }, }, { **origin3_foobarbaz, "intrinsic_metadata": { "@context": "https://doi.org/10.5063/schema/codemeta-2.0", "keywords": ["foo", "bar", "baz"], }, }, ] ) self.search.flush() results = stream_results( self.search.origin_search, metadata_pattern="foo bar baz", limit=limit ) assert list(results) == [origin3_foobarbaz] results = stream_results( self.search.origin_search, metadata_pattern="foo bar", limit=limit ) assert list(results) == [origin2_foobar, origin3_foobarbaz] results = stream_results( self.search.origin_search, metadata_pattern="foo", limit=limit ) assert list(results) == [origin1_foo, origin2_foobar, origin3_foobarbaz] def test_search_blocklisted_results(self): origin1 = {"url": "http://origin1"} origin2 = {"url": "http://origin2", "blocklisted": True} self.search.origin_update([origin1, origin2]) self.search.flush() actual_page = self.search.origin_search(url_pattern="origin") assert actual_page.next_page_token is None assert actual_page.results == [origin1] def test_search_blocklisted_update(self): origin1 = {"url": "http://origin1"} self.search.origin_update([origin1]) self.search.flush() result_page = self.search.origin_search(url_pattern="origin") assert result_page.next_page_token is None assert result_page.results == [origin1] self.search.origin_update([{**origin1, "blocklisted": True}]) self.search.flush() result_page = self.search.origin_search(url_pattern="origin") assert result_page.next_page_token is None assert result_page.results == [] self.search.origin_update( [{**origin1, "has_visits": True, "visit_types": ["git"]}] ) self.search.flush() result_page = self.search.origin_search(url_pattern="origin") assert result_page.next_page_token is None assert result_page.results == [] diff --git a/tox.ini b/tox.ini index d71d9af..aef0583 100644 --- a/tox.ini +++ b/tox.ini @@ -1,72 +1,73 @@ [tox] envlist=black,flake8,mypy,py3 [testenv] extras = testing deps = pytest-cov commands = - pytest --cov={envsitepackagesdir}/swh/search \ + pytest --doctest-modules \ {envsitepackagesdir}/swh/search \ + --cov={envsitepackagesdir}/swh/search \ --cov-branch {posargs} [testenv:black] skip_install = true deps = black==19.10b0 commands = {envpython} -m black --check swh [testenv:flake8] skip_install = true deps = flake8 commands = {envpython} -m flake8 [testenv:mypy] extras = testing deps = mypy commands = mypy swh # build documentation outside swh-environment using the current # git HEAD of swh-docs, is executed on CI for each diff to prevent # breaking doc build [testenv:sphinx] whitelist_externals = make usedevelop = true extras = testing deps = # fetch and install swh-docs in develop mode -e git+https://forge.softwareheritage.org/source/swh-docs#egg=swh.docs setenv = SWH_PACKAGE_DOC_TOX_BUILD = 1 # turn warnings into errors SPHINXOPTS = -W commands = make -I ../.tox/sphinx/src/swh-docs/swh/ -C docs # build documentation only inside swh-environment using local state # of swh-docs package [testenv:sphinx-dev] whitelist_externals = make usedevelop = true extras = testing deps = # install swh-docs in develop mode -e ../swh-docs setenv = SWH_PACKAGE_DOC_TOX_BUILD = 1 # turn warnings into errors SPHINXOPTS = -W commands = make -I ../.tox/sphinx-dev/src/swh-docs/swh/ -C docs