diff --git a/swh/indexer/ctags.py b/swh/indexer/ctags.py deleted file mode 100644 index d56204e..0000000 --- a/swh/indexer/ctags.py +++ /dev/null @@ -1,153 +0,0 @@ -# Copyright (C) 2015-2020 The Software Heritage developers -# See the AUTHORS file at the top-level directory of this distribution -# License: GNU General Public License version 3, or any later version -# See top-level LICENSE file for more information - -import json -import subprocess -from typing import Any, Dict, Iterator, List, Optional - -from swh.core.config import merge_configs -from swh.indexer.storage import Sha1 -from swh.indexer.storage.model import ContentCtagsRow -from swh.model import hashutil - -from .indexer import ContentIndexer, write_to_temp - -# Options used to compute tags -__FLAGS = [ - "--fields=+lnz", # +l: language - # +n: line number of tag definition - # +z: include the symbol's kind (function, variable, ...) - "--sort=no", # sort output on tag name - "--links=no", # do not follow symlinks - "--output-format=json", # outputs in json -] - - -def compute_language(content, log=None): - raise NotImplementedError( - "Language detection was unreliable, so it is currently disabled. " - "See https://forge.softwareheritage.org/D1455" - ) - - -def run_ctags(path, lang=None, ctags_command="ctags") -> Iterator[Dict[str, Any]]: - """Run ctags on file path with optional language. - - Args: - path: path to the file - lang: language for that path (optional) - - Yields: - dict: ctags' output - - """ - optional = [] - if lang: - optional = ["--language-force=%s" % lang] - - cmd = [ctags_command] + __FLAGS + optional + [path] - output = subprocess.check_output(cmd, universal_newlines=True) - - for symbol in output.split("\n"): - if not symbol: - continue - js_symbol = json.loads(symbol) - yield { - "name": js_symbol["name"], - "kind": js_symbol["kind"], - "line": js_symbol["line"], - "lang": js_symbol["language"], - } - - -DEFAULT_CONFIG: Dict[str, Any] = { - "workdir": "/tmp/swh/indexer.ctags", - "tools": { - "name": "universal-ctags", - "version": "~git7859817b", - "configuration": { - "command_line": """ctags --fields=+lnz --sort=no --links=no """ - """--output-format=json """ - }, - }, - "languages": {}, -} - - -class CtagsIndexer(ContentIndexer[ContentCtagsRow]): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.config = merge_configs(DEFAULT_CONFIG, self.config) - self.working_directory = self.config["workdir"] - self.language_map = self.config["languages"] - - def filter(self, ids): - """Filter out known sha1s and return only missing ones.""" - yield from self.idx_storage.content_ctags_missing( - ( - { - "id": sha1, - "indexer_configuration_id": self.tool["id"], - } - for sha1 in ids - ) - ) - - def index( - self, id: Sha1, data: Optional[bytes] = None, **kwargs - ) -> List[ContentCtagsRow]: - """Index sha1s' content and store result. - - Args: - id (bytes): content's identifier - data (bytes): raw content in bytes - - Returns: - dict: a dict representing a content_mimetype with keys: - - - **id** (bytes): content's identifier (sha1) - - **ctags** ([dict]): ctags list of symbols - - """ - assert isinstance(id, bytes) - assert data is not None - - lang = compute_language(data, log=self.log)["lang"] - - if not lang: - return [] - - ctags_lang = self.language_map.get(lang) - - if not ctags_lang: - return [] - - ctags = [] - - filename = hashutil.hash_to_hex(id) - with write_to_temp( - filename=filename, data=data, working_directory=self.working_directory - ) as content_path: - for ctag_kwargs in run_ctags(content_path, lang=ctags_lang): - ctags.append( - ContentCtagsRow( - id=id, - indexer_configuration_id=self.tool["id"], - **ctag_kwargs, - ) - ) - - return ctags - - def persist_index_computations( - self, results: List[ContentCtagsRow] - ) -> Dict[str, int]: - """Persist the results in storage. - - Args: - results: list of ctags returned by index() - - """ - return self.idx_storage.content_ctags_add(results) diff --git a/swh/indexer/storage/__init__.py b/swh/indexer/storage/__init__.py index ab5315c..a93ce23 100644 --- a/swh/indexer/storage/__init__.py +++ b/swh/indexer/storage/__init__.py @@ -1,802 +1,741 @@ # Copyright (C) 2015-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from collections import Counter from importlib import import_module import json from typing import Dict, Iterable, List, Optional, Tuple, Union import warnings import psycopg2 import psycopg2.pool from swh.core.db.common import db_transaction from swh.indexer.storage.interface import IndexerStorageInterface from swh.model.hashutil import hash_to_bytes, hash_to_hex from swh.model.model import SHA1_SIZE from swh.storage.exc import StorageDBError from swh.storage.utils import get_partition_bounds_bytes from . import converters from .db import Db from .exc import DuplicateId, IndexerStorageArgumentException from .interface import PagedResult, Sha1 from .metrics import process_metrics, send_metric, timed from .model import ( - ContentCtagsRow, ContentLanguageRow, ContentLicenseRow, ContentMetadataRow, ContentMimetypeRow, DirectoryIntrinsicMetadataRow, OriginExtrinsicMetadataRow, OriginIntrinsicMetadataRow, ) from .writer import JournalWriter INDEXER_CFG_KEY = "indexer_storage" MAPPING_NAMES = ["cff", "codemeta", "gemspec", "maven", "npm", "pkg-info"] SERVER_IMPLEMENTATIONS: Dict[str, str] = { "postgresql": ".IndexerStorage", "remote": ".api.client.RemoteStorage", "memory": ".in_memory.IndexerStorage", # deprecated "local": ".IndexerStorage", } def get_indexer_storage(cls: str, **kwargs) -> IndexerStorageInterface: """Instantiate an indexer storage implementation of class `cls` with arguments `kwargs`. Args: cls: indexer storage class (local, remote or memory) kwargs: dictionary of arguments passed to the indexer storage class constructor Returns: an instance of swh.indexer.storage Raises: ValueError if passed an unknown storage class. """ if "args" in kwargs: warnings.warn( 'Explicit "args" key is deprecated, use keys directly instead.', DeprecationWarning, ) kwargs = kwargs["args"] class_path = SERVER_IMPLEMENTATIONS.get(cls) if class_path is None: raise ValueError( f"Unknown indexer storage class `{cls}`. " f"Supported: {', '.join(SERVER_IMPLEMENTATIONS)}" ) (module_path, class_name) = class_path.rsplit(".", 1) module = import_module(module_path if module_path else ".", package=__package__) BackendClass = getattr(module, class_name) check_config = kwargs.pop("check_config", {}) idx_storage = BackendClass(**kwargs) if check_config: if not idx_storage.check_config(**check_config): raise EnvironmentError("Indexer storage check config failed") return idx_storage def check_id_duplicates(data): """ If any two row models in `data` have the same unique key, raises a `ValueError`. Values associated to the key must be hashable. Args: data (List[dict]): List of dictionaries to be inserted >>> check_id_duplicates([ ... ContentLanguageRow(id=b'foo', indexer_configuration_id=42, lang="python"), ... ContentLanguageRow(id=b'foo', indexer_configuration_id=32, lang="python"), ... ]) >>> check_id_duplicates([ ... ContentLanguageRow(id=b'foo', indexer_configuration_id=42, lang="python"), ... ContentLanguageRow(id=b'foo', indexer_configuration_id=42, lang="python"), ... ]) Traceback (most recent call last): ... swh.indexer.storage.exc.DuplicateId: [{'id': b'foo', 'indexer_configuration_id': 42}] """ # noqa counter = Counter(tuple(sorted(item.unique_key().items())) for item in data) duplicates = [id_ for (id_, count) in counter.items() if count >= 2] if duplicates: raise DuplicateId(list(map(dict, duplicates))) class IndexerStorage: """SWH Indexer Storage Datastore""" current_version = 135 def __init__(self, db, min_pool_conns=1, max_pool_conns=10, journal_writer=None): """ Args: db: either a libpq connection string, or a psycopg2 connection journal_writer: configuration passed to `swh.journal.writer.get_journal_writer` """ self.journal_writer = JournalWriter(self._tool_get_from_id, journal_writer) try: if isinstance(db, psycopg2.extensions.connection): self._pool = None self._db = Db(db) else: self._pool = psycopg2.pool.ThreadedConnectionPool( min_pool_conns, max_pool_conns, db ) self._db = None except psycopg2.OperationalError as e: raise StorageDBError(e) def get_db(self): if self._db: return self._db return Db.from_pool(self._pool) def put_db(self, db): if db is not self._db: db.put_conn() @timed @db_transaction() def check_config(self, *, check_write, db=None, cur=None): # Check permissions on one of the tables if check_write: check = "INSERT" else: check = "SELECT" cur.execute( "select has_table_privilege(current_user, 'content_mimetype', %s)", # noqa (check,), ) return cur.fetchone()[0] @timed @db_transaction() def content_mimetype_missing( self, mimetypes: Iterable[Dict], db=None, cur=None ) -> List[Tuple[Sha1, int]]: return [obj[0] for obj in db.content_mimetype_missing_from_list(mimetypes, cur)] @timed @db_transaction() def get_partition( self, indexer_type: str, indexer_configuration_id: int, partition_id: int, nb_partitions: int, page_token: Optional[str] = None, limit: int = 1000, with_textual_data=False, db=None, cur=None, ) -> PagedResult[Sha1]: """Retrieve ids of content with `indexer_type` within within partition partition_id bound by limit. Args: **indexer_type**: Type of data content to index (mimetype, language, etc...) **indexer_configuration_id**: The tool used to index data **partition_id**: index of the partition to fetch **nb_partitions**: total number of partitions to split into **page_token**: opaque token used for pagination **limit**: Limit result (default to 1000) **with_textual_data** (bool): Deal with only textual content (True) or all content (all contents by defaults, False) Raises: IndexerStorageArgumentException for; - limit to None - wrong indexer_type provided Returns: PagedResult of Sha1. If next_page_token is None, there is no more data to fetch """ if limit is None: raise IndexerStorageArgumentException("limit should not be None") if indexer_type not in db.content_indexer_names: err = f"Wrong type. Should be one of [{','.join(db.content_indexer_names)}]" raise IndexerStorageArgumentException(err) start, end = get_partition_bounds_bytes(partition_id, nb_partitions, SHA1_SIZE) if page_token is not None: start = hash_to_bytes(page_token) if end is None: end = b"\xff" * SHA1_SIZE next_page_token: Optional[str] = None ids = [ row[0] for row in db.content_get_range( indexer_type, start, end, indexer_configuration_id, limit=limit + 1, with_textual_data=with_textual_data, cur=cur, ) ] if len(ids) >= limit: next_page_token = hash_to_hex(ids[-1]) ids = ids[:limit] assert len(ids) <= limit return PagedResult(results=ids, next_page_token=next_page_token) @timed @db_transaction() def content_mimetype_get_partition( self, indexer_configuration_id: int, partition_id: int, nb_partitions: int, page_token: Optional[str] = None, limit: int = 1000, db=None, cur=None, ) -> PagedResult[Sha1]: return self.get_partition( "mimetype", indexer_configuration_id, partition_id, nb_partitions, page_token=page_token, limit=limit, db=db, cur=cur, ) @timed @process_metrics @db_transaction() def content_mimetype_add( self, mimetypes: List[ContentMimetypeRow], db=None, cur=None, ) -> Dict[str, int]: check_id_duplicates(mimetypes) mimetypes.sort(key=lambda m: m.id) self.journal_writer.write_additions("content_mimetype", mimetypes) db.mktemp_content_mimetype(cur) db.copy_to( [m.to_dict() for m in mimetypes], "tmp_content_mimetype", ["id", "mimetype", "encoding", "indexer_configuration_id"], cur, ) count = db.content_mimetype_add_from_temp(cur) return {"content_mimetype:add": count} @timed @db_transaction() def content_mimetype_get( self, ids: Iterable[Sha1], db=None, cur=None ) -> List[ContentMimetypeRow]: return [ ContentMimetypeRow.from_dict( converters.db_to_mimetype(dict(zip(db.content_mimetype_cols, c))) ) for c in db.content_mimetype_get_from_list(ids, cur) ] @timed @db_transaction() def content_language_missing( self, languages: Iterable[Dict], db=None, cur=None ) -> List[Tuple[Sha1, int]]: return [obj[0] for obj in db.content_language_missing_from_list(languages, cur)] @timed @db_transaction() def content_language_get( self, ids: Iterable[Sha1], db=None, cur=None ) -> List[ContentLanguageRow]: return [ ContentLanguageRow.from_dict( converters.db_to_language(dict(zip(db.content_language_cols, c))) ) for c in db.content_language_get_from_list(ids, cur) ] @timed @process_metrics @db_transaction() def content_language_add( self, languages: List[ContentLanguageRow], db=None, cur=None, ) -> Dict[str, int]: check_id_duplicates(languages) languages.sort(key=lambda m: m.id) self.journal_writer.write_additions("content_language", languages) db.mktemp_content_language(cur) # empty language is mapped to 'unknown' db.copy_to( ( { "id": lang.id, "lang": lang.lang or "unknown", "indexer_configuration_id": lang.indexer_configuration_id, } for lang in languages ), "tmp_content_language", ["id", "lang", "indexer_configuration_id"], cur, ) count = db.content_language_add_from_temp(cur) return {"content_language:add": count} - @timed - @db_transaction() - def content_ctags_missing( - self, ctags: Iterable[Dict], db=None, cur=None - ) -> List[Tuple[Sha1, int]]: - return [obj[0] for obj in db.content_ctags_missing_from_list(ctags, cur)] - - @timed - @db_transaction() - def content_ctags_get( - self, ids: Iterable[Sha1], db=None, cur=None - ) -> List[ContentCtagsRow]: - return [ - ContentCtagsRow.from_dict( - converters.db_to_ctags(dict(zip(db.content_ctags_cols, c))) - ) - for c in db.content_ctags_get_from_list(ids, cur) - ] - - @timed - @process_metrics - @db_transaction() - def content_ctags_add( - self, - ctags: List[ContentCtagsRow], - db=None, - cur=None, - ) -> Dict[str, int]: - check_id_duplicates(ctags) - ctags.sort(key=lambda m: m.id) - self.journal_writer.write_additions("content_ctags", ctags) - - db.mktemp_content_ctags(cur) - db.copy_to( - [ctag.to_dict() for ctag in ctags], - tblname="tmp_content_ctags", - columns=["id", "name", "kind", "line", "lang", "indexer_configuration_id"], - cur=cur, - ) - - count = db.content_ctags_add_from_temp(cur) - return {"content_ctags:add": count} - - @timed - @db_transaction() - def content_ctags_search( - self, - expression: str, - limit: int = 10, - last_sha1: Optional[Sha1] = None, - db=None, - cur=None, - ) -> List[ContentCtagsRow]: - return [ - ContentCtagsRow.from_dict( - converters.db_to_ctags(dict(zip(db.content_ctags_cols, obj))) - ) - for obj in db.content_ctags_search(expression, last_sha1, limit, cur=cur) - ] - @timed @db_transaction() def content_fossology_license_get( self, ids: Iterable[Sha1], db=None, cur=None ) -> List[ContentLicenseRow]: return [ ContentLicenseRow.from_dict( converters.db_to_fossology_license( dict(zip(db.content_fossology_license_cols, c)) ) ) for c in db.content_fossology_license_get_from_list(ids, cur) ] @timed @process_metrics @db_transaction() def content_fossology_license_add( self, licenses: List[ContentLicenseRow], db=None, cur=None, ) -> Dict[str, int]: check_id_duplicates(licenses) licenses.sort(key=lambda m: m.id) self.journal_writer.write_additions("content_fossology_license", licenses) db.mktemp_content_fossology_license(cur) db.copy_to( [license.to_dict() for license in licenses], tblname="tmp_content_fossology_license", columns=["id", "license", "indexer_configuration_id"], cur=cur, ) count = db.content_fossology_license_add_from_temp(cur) return {"content_fossology_license:add": count} @timed @db_transaction() def content_fossology_license_get_partition( self, indexer_configuration_id: int, partition_id: int, nb_partitions: int, page_token: Optional[str] = None, limit: int = 1000, db=None, cur=None, ) -> PagedResult[Sha1]: return self.get_partition( "fossology_license", indexer_configuration_id, partition_id, nb_partitions, page_token=page_token, limit=limit, with_textual_data=True, db=db, cur=cur, ) @timed @db_transaction() def content_metadata_missing( self, metadata: Iterable[Dict], db=None, cur=None ) -> List[Tuple[Sha1, int]]: return [obj[0] for obj in db.content_metadata_missing_from_list(metadata, cur)] @timed @db_transaction() def content_metadata_get( self, ids: Iterable[Sha1], db=None, cur=None ) -> List[ContentMetadataRow]: return [ ContentMetadataRow.from_dict( converters.db_to_metadata(dict(zip(db.content_metadata_cols, c))) ) for c in db.content_metadata_get_from_list(ids, cur) ] @timed @process_metrics @db_transaction() def content_metadata_add( self, metadata: List[ContentMetadataRow], db=None, cur=None, ) -> Dict[str, int]: check_id_duplicates(metadata) metadata.sort(key=lambda m: m.id) self.journal_writer.write_additions("content_metadata", metadata) db.mktemp_content_metadata(cur) db.copy_to( [m.to_dict() for m in metadata], "tmp_content_metadata", ["id", "metadata", "indexer_configuration_id"], cur, ) count = db.content_metadata_add_from_temp(cur) return { "content_metadata:add": count, } @timed @db_transaction() def directory_intrinsic_metadata_missing( self, metadata: Iterable[Dict], db=None, cur=None ) -> List[Tuple[Sha1, int]]: return [ obj[0] for obj in db.directory_intrinsic_metadata_missing_from_list(metadata, cur) ] @timed @db_transaction() def directory_intrinsic_metadata_get( self, ids: Iterable[Sha1], db=None, cur=None ) -> List[DirectoryIntrinsicMetadataRow]: return [ DirectoryIntrinsicMetadataRow.from_dict( converters.db_to_metadata( dict(zip(db.directory_intrinsic_metadata_cols, c)) ) ) for c in db.directory_intrinsic_metadata_get_from_list(ids, cur) ] @timed @process_metrics @db_transaction() def directory_intrinsic_metadata_add( self, metadata: List[DirectoryIntrinsicMetadataRow], db=None, cur=None, ) -> Dict[str, int]: check_id_duplicates(metadata) metadata.sort(key=lambda m: m.id) self.journal_writer.write_additions("directory_intrinsic_metadata", metadata) db.mktemp_directory_intrinsic_metadata(cur) db.copy_to( [m.to_dict() for m in metadata], "tmp_directory_intrinsic_metadata", ["id", "metadata", "mappings", "indexer_configuration_id"], cur, ) count = db.directory_intrinsic_metadata_add_from_temp(cur) return { "directory_intrinsic_metadata:add": count, } @timed @db_transaction() def origin_intrinsic_metadata_get( self, urls: Iterable[str], db=None, cur=None ) -> List[OriginIntrinsicMetadataRow]: return [ OriginIntrinsicMetadataRow.from_dict( converters.db_to_metadata( dict(zip(db.origin_intrinsic_metadata_cols, c)) ) ) for c in db.origin_intrinsic_metadata_get_from_list(urls, cur) ] @timed @process_metrics @db_transaction() def origin_intrinsic_metadata_add( self, metadata: List[OriginIntrinsicMetadataRow], db=None, cur=None, ) -> Dict[str, int]: check_id_duplicates(metadata) metadata.sort(key=lambda m: m.id) self.journal_writer.write_additions("origin_intrinsic_metadata", metadata) db.mktemp_origin_intrinsic_metadata(cur) db.copy_to( [m.to_dict() for m in metadata], "tmp_origin_intrinsic_metadata", [ "id", "metadata", "indexer_configuration_id", "from_directory", "mappings", ], cur, ) count = db.origin_intrinsic_metadata_add_from_temp(cur) return { "origin_intrinsic_metadata:add": count, } @timed @db_transaction() def origin_intrinsic_metadata_search_fulltext( self, conjunction: List[str], limit: int = 100, db=None, cur=None ) -> List[OriginIntrinsicMetadataRow]: return [ OriginIntrinsicMetadataRow.from_dict( converters.db_to_metadata( dict(zip(db.origin_intrinsic_metadata_cols, c)) ) ) for c in db.origin_intrinsic_metadata_search_fulltext( conjunction, limit=limit, cur=cur ) ] @timed @db_transaction() def origin_intrinsic_metadata_search_by_producer( self, page_token: str = "", limit: int = 100, ids_only: bool = False, mappings: Optional[List[str]] = None, tool_ids: Optional[List[int]] = None, db=None, cur=None, ) -> PagedResult[Union[str, OriginIntrinsicMetadataRow]]: assert isinstance(page_token, str) # we go to limit+1 to check whether we should add next_page_token in # the response rows = db.origin_intrinsic_metadata_search_by_producer( page_token, limit + 1, ids_only, mappings, tool_ids, cur ) next_page_token = None if ids_only: results = [origin for (origin,) in rows] if len(results) > limit: results[limit:] = [] next_page_token = results[-1] else: results = [ OriginIntrinsicMetadataRow.from_dict( converters.db_to_metadata( dict(zip(db.origin_intrinsic_metadata_cols, row)) ) ) for row in rows ] if len(results) > limit: results[limit:] = [] next_page_token = results[-1].id return PagedResult( results=results, next_page_token=next_page_token, ) @timed @db_transaction() def origin_intrinsic_metadata_stats(self, db=None, cur=None): mapping_names = [m for m in MAPPING_NAMES] select_parts = [] # Count rows for each mapping for mapping_name in mapping_names: select_parts.append( ( "sum(case when (mappings @> ARRAY['%s']) " " then 1 else 0 end)" ) % mapping_name ) # Total select_parts.append("sum(1)") # Rows whose metadata has at least one key that is not '@context' select_parts.append( "sum(case when ('{}'::jsonb @> (metadata - '@context')) " " then 0 else 1 end)" ) cur.execute( "select " + ", ".join(select_parts) + " from origin_intrinsic_metadata" ) results = dict(zip(mapping_names + ["total", "non_empty"], cur.fetchone())) return { "total": results.pop("total"), "non_empty": results.pop("non_empty"), "per_mapping": results, } @timed @db_transaction() def origin_extrinsic_metadata_get( self, urls: Iterable[str], db=None, cur=None ) -> List[OriginExtrinsicMetadataRow]: return [ OriginExtrinsicMetadataRow.from_dict( converters.db_to_metadata( dict(zip(db.origin_extrinsic_metadata_cols, c)) ) ) for c in db.origin_extrinsic_metadata_get_from_list(urls, cur) ] @timed @process_metrics @db_transaction() def origin_extrinsic_metadata_add( self, metadata: List[OriginExtrinsicMetadataRow], db=None, cur=None, ) -> Dict[str, int]: check_id_duplicates(metadata) metadata.sort(key=lambda m: m.id) self.journal_writer.write_additions("origin_extrinsic_metadata", metadata) db.mktemp_origin_extrinsic_metadata(cur) db.copy_to( [m.to_dict() for m in metadata], "tmp_origin_extrinsic_metadata", [ "id", "metadata", "indexer_configuration_id", "from_remd_id", "mappings", ], cur, ) count = db.origin_extrinsic_metadata_add_from_temp(cur) return { "origin_extrinsic_metadata:add": count, } @timed @db_transaction() def indexer_configuration_add(self, tools, db=None, cur=None): db.mktemp_indexer_configuration(cur) db.copy_to( tools, "tmp_indexer_configuration", ["tool_name", "tool_version", "tool_configuration"], cur, ) tools = db.indexer_configuration_add_from_temp(cur) results = [dict(zip(db.indexer_configuration_cols, line)) for line in tools] send_metric( "indexer_configuration:add", len(results), method_name="indexer_configuration_add", ) return results @timed @db_transaction() def indexer_configuration_get(self, tool, db=None, cur=None): tool_conf = tool["tool_configuration"] if isinstance(tool_conf, dict): tool_conf = json.dumps(tool_conf) idx = db.indexer_configuration_get( tool["tool_name"], tool["tool_version"], tool_conf ) if not idx: return None return dict(zip(db.indexer_configuration_cols, idx)) @db_transaction() def _tool_get_from_id(self, id_, db, cur): tool = dict( zip( db.indexer_configuration_cols, db.indexer_configuration_get_from_id(id_, cur), ) ) return { "id": tool["id"], "name": tool["tool_name"], "version": tool["tool_version"], "configuration": tool["tool_configuration"], } diff --git a/swh/indexer/storage/converters.py b/swh/indexer/storage/converters.py index c74bb68..aff22f0 100644 --- a/swh/indexer/storage/converters.py +++ b/swh/indexer/storage/converters.py @@ -1,135 +1,59 @@ -# Copyright (C) 2015-2017 The Software Heritage developers +# Copyright (C) 2015-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information -def ctags_to_db(ctags): - """Convert a ctags entry into a ready ctags entry. - - Args: - ctags (dict): ctags entry with the following keys: - - - id (bytes): content's identifier - - tool_id (int): tool id used to compute ctags - - ctags ([dict]): List of dictionary with the following keys: - - - name (str): symbol's name - - kind (str): symbol's kind - - line (int): symbol's line in the content - - language (str): language - - Returns: - list: list of ctags entries as dicts with the following keys: - - - id (bytes): content's identifier - - name (str): symbol's name - - kind (str): symbol's kind - - language (str): language for that content - - tool_id (int): tool id used to compute ctags - - """ - id = ctags["id"] - tool_id = ctags["indexer_configuration_id"] - for ctag in ctags["ctags"]: - yield { - "id": id, - "name": ctag["name"], - "kind": ctag["kind"], - "line": ctag["line"], - "lang": ctag["lang"], - "indexer_configuration_id": tool_id, - } - - -def db_to_ctags(ctag): - """Convert a ctags entry into a ready ctags entry. - - Args: - ctags (dict): ctags entry with the following keys: - - - id (bytes): content's identifier - - ctags ([dict]): List of dictionary with the following keys: - - name (str): symbol's name - - kind (str): symbol's kind - - line (int): symbol's line in the content - - language (str): language - - Returns: - list: list of ctags ready entry (dict with the following keys): - - - id (bytes): content's identifier - - name (str): symbol's name - - kind (str): symbol's kind - - language (str): language for that content - - tool (dict): tool used to compute the ctags - - """ - return { - "id": ctag["id"], - "name": ctag["name"], - "kind": ctag["kind"], - "line": ctag["line"], - "lang": ctag["lang"], - "tool": { - "id": ctag["tool_id"], - "name": ctag["tool_name"], - "version": ctag["tool_version"], - "configuration": ctag["tool_configuration"], - }, - } - - def db_to_mimetype(mimetype): - """Convert a ctags entry into a ready ctags output.""" + """Convert a mimetype entry into a ready mimetype output.""" return { "id": mimetype["id"], "encoding": mimetype["encoding"], "mimetype": mimetype["mimetype"], "tool": { "id": mimetype["tool_id"], "name": mimetype["tool_name"], "version": mimetype["tool_version"], "configuration": mimetype["tool_configuration"], }, } def db_to_language(language): """Convert a language entry into a ready language output.""" return { "id": language["id"], "lang": language["lang"], "tool": { "id": language["tool_id"], "name": language["tool_name"], "version": language["tool_version"], "configuration": language["tool_configuration"], }, } def db_to_metadata(metadata): """Convert a metadata entry into a ready metadata output.""" metadata["tool"] = { "id": metadata["tool_id"], "name": metadata["tool_name"], "version": metadata["tool_version"], "configuration": metadata["tool_configuration"], } del metadata["tool_id"], metadata["tool_configuration"] del metadata["tool_version"], metadata["tool_name"] return metadata def db_to_fossology_license(license): return { "id": license["id"], "license": license["license"], "tool": { "id": license["tool_id"], "name": license["tool_name"], "version": license["tool_version"], "configuration": license["tool_configuration"], }, } diff --git a/swh/indexer/storage/db.py b/swh/indexer/storage/db.py index bd8391d..a7121f3 100644 --- a/swh/indexer/storage/db.py +++ b/swh/indexer/storage/db.py @@ -1,563 +1,493 @@ # Copyright (C) 2015-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from typing import Dict, Iterable, Iterator, List from swh.core.db import BaseDb from swh.core.db.db_utils import execute_values_generator, stored_procedure -from swh.model import hashutil from .interface import Sha1 class Db(BaseDb): """Proxy to the SWH Indexer DB, with wrappers around stored procedures""" content_mimetype_hash_keys = ["id", "indexer_configuration_id"] def _missing_from_list( self, table: str, data: Iterable[Dict], hash_keys: List[str], cur=None ): """Read from table the data with hash_keys that are missing. Args: table: Table name (e.g content_mimetype, content_language, etc...) data: Dict of data to read from hash_keys: List of keys to read in the data dict. Yields: The data which is missing from the db. """ cur = self._cursor(cur) keys = ", ".join(hash_keys) equality = " AND ".join(("t.%s = c.%s" % (key, key)) for key in hash_keys) yield from execute_values_generator( cur, """ select %s from (values %%s) as t(%s) where not exists ( select 1 from %s c where %s ) """ % (keys, keys, table, equality), (tuple(m[k] for k in hash_keys) for m in data), ) def content_mimetype_missing_from_list( self, mimetypes: Iterable[Dict], cur=None ) -> Iterator[Sha1]: """List missing mimetypes.""" yield from self._missing_from_list( "content_mimetype", mimetypes, self.content_mimetype_hash_keys, cur=cur ) content_mimetype_cols = [ "id", "mimetype", "encoding", "tool_id", "tool_name", "tool_version", "tool_configuration", ] @stored_procedure("swh_mktemp_content_mimetype") def mktemp_content_mimetype(self, cur=None): pass def content_mimetype_add_from_temp(self, cur=None): cur = self._cursor(cur) cur.execute("select * from swh_content_mimetype_add()") return cur.fetchone()[0] def _convert_key(self, key, main_table="c"): """Convert keys according to specific use in the module. Args: key (str): Key expression to change according to the alias used in the query main_table (str): Alias to use for the main table. Default to c for content_{something}. Expected: Tables content_{something} being aliased as 'c' (something in {language, mimetype, ...}), table indexer_configuration being aliased as 'i'. """ if key == "id": return "%s.id" % main_table elif key == "tool_id": return "i.id as tool_id" elif key == "license": return ( """ ( select name from fossology_license where id = %s.license_id ) as licenses""" % main_table ) return key def _get_from_list(self, table, ids, cols, cur=None, id_col="id"): """Fetches entries from the `table` such that their `id` field (or whatever is given to `id_col`) is in `ids`. Returns the columns `cols`. The `cur` parameter is used to connect to the database. """ cur = self._cursor(cur) keys = map(self._convert_key, cols) query = """ select {keys} from (values %s) as t(id) inner join {table} c on c.{id_col}=t.id inner join indexer_configuration i on c.indexer_configuration_id=i.id; """.format( keys=", ".join(keys), id_col=id_col, table=table ) yield from execute_values_generator(cur, query, ((_id,) for _id in ids)) content_indexer_names = { "mimetype": "content_mimetype", "fossology_license": "content_fossology_license", } def content_get_range( self, content_type, start, end, indexer_configuration_id, limit=1000, with_textual_data=False, cur=None, ): """Retrieve contents with content_type, within range [start, end] bound by limit and associated to the given indexer configuration id. When asking to work on textual content, that filters on the mimetype table with any mimetype that is not binary. """ cur = self._cursor(cur) table = self.content_indexer_names[content_type] if with_textual_data: extra = """inner join content_mimetype cm on (t.id=cm.id and cm.mimetype like 'text/%%' and %(start)s <= cm.id and cm.id <= %(end)s) """ else: extra = "" query = f"""select t.id from {table} t {extra} where t.indexer_configuration_id=%(tool_id)s and %(start)s <= t.id and t.id <= %(end)s order by t.indexer_configuration_id, t.id limit %(limit)s""" cur.execute( query, { "start": start, "end": end, "tool_id": indexer_configuration_id, "limit": limit, }, ) yield from cur def content_mimetype_get_from_list(self, ids, cur=None): yield from self._get_from_list( "content_mimetype", ids, self.content_mimetype_cols, cur=cur ) content_language_hash_keys = ["id", "indexer_configuration_id"] def content_language_missing_from_list(self, languages, cur=None): """List missing languages.""" yield from self._missing_from_list( "content_language", languages, self.content_language_hash_keys, cur=cur ) content_language_cols = [ "id", "lang", "tool_id", "tool_name", "tool_version", "tool_configuration", ] @stored_procedure("swh_mktemp_content_language") def mktemp_content_language(self, cur=None): pass def content_language_add_from_temp(self, cur=None): cur = self._cursor(cur) cur.execute("select * from swh_content_language_add()") return cur.fetchone()[0] def content_language_get_from_list(self, ids, cur=None): yield from self._get_from_list( "content_language", ids, self.content_language_cols, cur=cur ) - content_ctags_hash_keys = ["id", "indexer_configuration_id"] - - def content_ctags_missing_from_list(self, ctags, cur=None): - """List missing ctags.""" - yield from self._missing_from_list( - "content_ctags", ctags, self.content_ctags_hash_keys, cur=cur - ) - - content_ctags_cols = [ - "id", - "name", - "kind", - "line", - "lang", - "tool_id", - "tool_name", - "tool_version", - "tool_configuration", - ] - - @stored_procedure("swh_mktemp_content_ctags") - def mktemp_content_ctags(self, cur=None): - pass - - def content_ctags_add_from_temp(self, cur=None): - cur = self._cursor(cur) - cur.execute("select * from swh_content_ctags_add()") - return cur.fetchone()[0] - - def content_ctags_get_from_list(self, ids, cur=None): - cur = self._cursor(cur) - keys = map(self._convert_key, self.content_ctags_cols) - yield from execute_values_generator( - cur, - """ - select %s - from (values %%s) as t(id) - inner join content_ctags c - on c.id=t.id - inner join indexer_configuration i - on c.indexer_configuration_id=i.id - order by line - """ - % ", ".join(keys), - ((_id,) for _id in ids), - ) - - def content_ctags_search(self, expression, last_sha1, limit, cur=None): - cur = self._cursor(cur) - if not last_sha1: - query = """SELECT %s - FROM swh_content_ctags_search(%%s, %%s)""" % ( - ",".join(self.content_ctags_cols) - ) - cur.execute(query, (expression, limit)) - else: - if last_sha1 and isinstance(last_sha1, bytes): - last_sha1 = "\\x%s" % hashutil.hash_to_hex(last_sha1) - elif last_sha1: - last_sha1 = "\\x%s" % last_sha1 - - query = """SELECT %s - FROM swh_content_ctags_search(%%s, %%s, %%s)""" % ( - ",".join(self.content_ctags_cols) - ) - cur.execute(query, (expression, limit, last_sha1)) - - yield from cur - content_fossology_license_cols = [ "id", "tool_id", "tool_name", "tool_version", "tool_configuration", "license", ] @stored_procedure("swh_mktemp_content_fossology_license") def mktemp_content_fossology_license(self, cur=None): pass def content_fossology_license_add_from_temp(self, cur=None): """Add new licenses per content.""" cur = self._cursor(cur) cur.execute("select * from swh_content_fossology_license_add()") return cur.fetchone()[0] def content_fossology_license_get_from_list(self, ids, cur=None): """Retrieve licenses per id.""" cur = self._cursor(cur) keys = map(self._convert_key, self.content_fossology_license_cols) yield from execute_values_generator( cur, """ select %s from (values %%s) as t(id) inner join content_fossology_license c on t.id=c.id inner join indexer_configuration i on i.id=c.indexer_configuration_id """ % ", ".join(keys), ((_id,) for _id in ids), ) content_metadata_hash_keys = ["id", "indexer_configuration_id"] def content_metadata_missing_from_list(self, metadata, cur=None): """List missing metadata.""" yield from self._missing_from_list( "content_metadata", metadata, self.content_metadata_hash_keys, cur=cur ) content_metadata_cols = [ "id", "metadata", "tool_id", "tool_name", "tool_version", "tool_configuration", ] @stored_procedure("swh_mktemp_content_metadata") def mktemp_content_metadata(self, cur=None): pass def content_metadata_add_from_temp(self, cur=None): cur = self._cursor(cur) cur.execute("select * from swh_content_metadata_add()") return cur.fetchone()[0] def content_metadata_get_from_list(self, ids, cur=None): yield from self._get_from_list( "content_metadata", ids, self.content_metadata_cols, cur=cur ) directory_intrinsic_metadata_hash_keys = ["id", "indexer_configuration_id"] def directory_intrinsic_metadata_missing_from_list(self, metadata, cur=None): """List missing metadata.""" yield from self._missing_from_list( "directory_intrinsic_metadata", metadata, self.directory_intrinsic_metadata_hash_keys, cur=cur, ) directory_intrinsic_metadata_cols = [ "id", "metadata", "mappings", "tool_id", "tool_name", "tool_version", "tool_configuration", ] @stored_procedure("swh_mktemp_directory_intrinsic_metadata") def mktemp_directory_intrinsic_metadata(self, cur=None): pass def directory_intrinsic_metadata_add_from_temp(self, cur=None): cur = self._cursor(cur) cur.execute("select * from swh_directory_intrinsic_metadata_add()") return cur.fetchone()[0] def directory_intrinsic_metadata_get_from_list(self, ids, cur=None): yield from self._get_from_list( "directory_intrinsic_metadata", ids, self.directory_intrinsic_metadata_cols, cur=cur, ) origin_intrinsic_metadata_cols = [ "id", "metadata", "from_directory", "mappings", "tool_id", "tool_name", "tool_version", "tool_configuration", ] origin_intrinsic_metadata_regconfig = "pg_catalog.simple" """The dictionary used to normalize 'metadata' and queries. 'pg_catalog.simple' provides no stopword, so it should be suitable for proper names and non-English content. When updating this value, make sure to add a new index on origin_intrinsic_metadata.metadata.""" @stored_procedure("swh_mktemp_origin_intrinsic_metadata") def mktemp_origin_intrinsic_metadata(self, cur=None): pass def origin_intrinsic_metadata_add_from_temp(self, cur=None): cur = self._cursor(cur) cur.execute("select * from swh_origin_intrinsic_metadata_add()") return cur.fetchone()[0] def origin_intrinsic_metadata_get_from_list(self, ids, cur=None): yield from self._get_from_list( "origin_intrinsic_metadata", ids, self.origin_intrinsic_metadata_cols, cur=cur, id_col="id", ) def origin_intrinsic_metadata_search_fulltext(self, terms, *, limit, cur): regconfig = self.origin_intrinsic_metadata_regconfig tsquery_template = " && ".join( "plainto_tsquery('%s', %%s)" % regconfig for _ in terms ) tsquery_args = [(term,) for term in terms] keys = ( self._convert_key(col, "oim") for col in self.origin_intrinsic_metadata_cols ) query = ( "SELECT {keys} FROM origin_intrinsic_metadata AS oim " "INNER JOIN indexer_configuration AS i " "ON oim.indexer_configuration_id=i.id " "JOIN LATERAL (SELECT {tsquery_template}) AS s(tsq) ON true " "WHERE oim.metadata_tsvector @@ tsq " "ORDER BY ts_rank(oim.metadata_tsvector, tsq, 1) DESC " "LIMIT %s;" ).format(keys=", ".join(keys), tsquery_template=tsquery_template) cur.execute(query, tsquery_args + [limit]) yield from cur def origin_intrinsic_metadata_search_by_producer( self, last, limit, ids_only, mappings, tool_ids, cur ): if ids_only: keys = "oim.id" else: keys = ", ".join( ( self._convert_key(col, "oim") for col in self.origin_intrinsic_metadata_cols ) ) query_parts = [ "SELECT %s" % keys, "FROM origin_intrinsic_metadata AS oim", "INNER JOIN indexer_configuration AS i", "ON oim.indexer_configuration_id=i.id", ] args = [] where = [] if last: where.append("oim.id > %s") args.append(last) if mappings is not None: where.append("oim.mappings && %s") args.append(list(mappings)) if tool_ids is not None: where.append("oim.indexer_configuration_id = ANY(%s)") args.append(list(tool_ids)) if where: query_parts.append("WHERE") query_parts.append(" AND ".join(where)) if limit: query_parts.append("LIMIT %s") args.append(limit) cur.execute(" ".join(query_parts), args) yield from cur origin_extrinsic_metadata_cols = [ "id", "metadata", "from_remd_id", "mappings", "tool_id", "tool_name", "tool_version", "tool_configuration", ] @stored_procedure("swh_mktemp_origin_extrinsic_metadata") def mktemp_origin_extrinsic_metadata(self, cur=None): pass def origin_extrinsic_metadata_add_from_temp(self, cur=None): cur = self._cursor(cur) cur.execute("select * from swh_origin_extrinsic_metadata_add()") return cur.fetchone()[0] def origin_extrinsic_metadata_get_from_list(self, ids, cur=None): yield from self._get_from_list( "origin_extrinsic_metadata", ids, self.origin_extrinsic_metadata_cols, cur=cur, id_col="id", ) indexer_configuration_cols = [ "id", "tool_name", "tool_version", "tool_configuration", ] @stored_procedure("swh_mktemp_indexer_configuration") def mktemp_indexer_configuration(self, cur=None): pass def indexer_configuration_add_from_temp(self, cur=None): cur = self._cursor(cur) cur.execute( "SELECT %s from swh_indexer_configuration_add()" % (",".join(self.indexer_configuration_cols),) ) yield from cur def indexer_configuration_get( self, tool_name, tool_version, tool_configuration, cur=None ): cur = self._cursor(cur) cur.execute( """select %s from indexer_configuration where tool_name=%%s and tool_version=%%s and tool_configuration=%%s""" % (",".join(self.indexer_configuration_cols)), (tool_name, tool_version, tool_configuration), ) return cur.fetchone() def indexer_configuration_get_from_id(self, id_, cur=None): cur = self._cursor(cur) cur.execute( """select %s from indexer_configuration where id=%%s""" % (",".join(self.indexer_configuration_cols)), (id_,), ) return cur.fetchone() diff --git a/swh/indexer/storage/in_memory.py b/swh/indexer/storage/in_memory.py index afe7f57..757f3c0 100644 --- a/swh/indexer/storage/in_memory.py +++ b/swh/indexer/storage/in_memory.py @@ -1,519 +1,483 @@ -# Copyright (C) 2018-2020 The Software Heritage developers +# Copyright (C) 2018-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from collections import Counter, defaultdict import itertools import json import math import operator import re from typing import ( Any, Dict, Generic, Iterable, List, Optional, Set, Tuple, Type, TypeVar, Union, ) from swh.core.collections import SortedList from swh.model.hashutil import hash_to_bytes, hash_to_hex -from swh.model.model import SHA1_SIZE, Sha1Git +from swh.model.model import SHA1_SIZE from swh.storage.utils import get_partition_bounds_bytes from . import MAPPING_NAMES, check_id_duplicates from .exc import IndexerStorageArgumentException from .interface import PagedResult, Sha1 from .model import ( BaseRow, - ContentCtagsRow, ContentLanguageRow, ContentLicenseRow, ContentMetadataRow, ContentMimetypeRow, DirectoryIntrinsicMetadataRow, OriginExtrinsicMetadataRow, OriginIntrinsicMetadataRow, ) from .writer import JournalWriter SHA1_DIGEST_SIZE = 160 ToolId = int def _transform_tool(tool): return { "id": tool["id"], "name": tool["tool_name"], "version": tool["tool_version"], "configuration": tool["tool_configuration"], } def check_id_types(data: List[Dict[str, Any]]): """Checks all elements of the list have an 'id' whose type is 'bytes'.""" if not all(isinstance(item.get("id"), bytes) for item in data): raise IndexerStorageArgumentException("identifiers must be bytes.") def _key_from_dict(d): return tuple(sorted(d.items())) TValue = TypeVar("TValue", bound=BaseRow) class SubStorage(Generic[TValue]): """Implements common missing/get/add logic for each indexer type.""" _data: Dict[Sha1, Dict[Tuple, Dict[str, Any]]] _tools_per_id: Dict[Sha1, Set[ToolId]] def __init__(self, row_class: Type[TValue], tools, journal_writer): self.row_class = row_class self._tools = tools self._sorted_ids = SortedList[bytes, Sha1]() self._data = defaultdict(dict) self._journal_writer = journal_writer self._tools_per_id = defaultdict(set) def _key_from_dict(self, d) -> Tuple: """Like the global _key_from_dict, but filters out dict keys that don't belong in the unique key.""" return _key_from_dict({k: d[k] for k in self.row_class.UNIQUE_KEY_FIELDS}) def missing(self, keys: Iterable[Dict]) -> List[Sha1]: """List data missing from storage. Args: data (iterable): dictionaries with keys: - **id** (bytes): sha1 identifier - **indexer_configuration_id** (int): tool used to compute the results Yields: missing sha1s """ results = [] for key in keys: tool_id = key["indexer_configuration_id"] id_ = key["id"] if tool_id not in self._tools_per_id.get(id_, set()): results.append(id_) return results def get(self, ids: Iterable[Sha1]) -> List[TValue]: """Retrieve data per id. Args: ids (iterable): sha1 checksums Yields: dict: dictionaries with the following keys: - **id** (bytes) - **tool** (dict): tool used to compute metadata - arbitrary data (as provided to `add`) """ results = [] for id_ in ids: for entry in self._data[id_].values(): entry = entry.copy() tool_id = entry.pop("indexer_configuration_id") results.append( self.row_class( id=id_, tool=_transform_tool(self._tools[tool_id]), **entry, ) ) return results def get_all(self) -> List[TValue]: return self.get(self._sorted_ids) def get_partition( self, indexer_configuration_id: int, partition_id: int, nb_partitions: int, page_token: Optional[str] = None, limit: int = 1000, ) -> PagedResult[Sha1]: """Retrieve ids of content with `indexer_type` within partition partition_id bound by limit. Args: **indexer_type**: Type of data content to index (mimetype, language, etc...) **indexer_configuration_id**: The tool used to index data **partition_id**: index of the partition to fetch **nb_partitions**: total number of partitions to split into **page_token**: opaque token used for pagination **limit**: Limit result (default to 1000) **with_textual_data** (bool): Deal with only textual content (True) or all content (all contents by defaults, False) Raises: IndexerStorageArgumentException for; - limit to None - wrong indexer_type provided Returns: PagedResult of Sha1. If next_page_token is None, there is no more data to fetch """ if limit is None: raise IndexerStorageArgumentException("limit should not be None") (start, end) = get_partition_bounds_bytes( partition_id, nb_partitions, SHA1_SIZE ) if page_token: start = hash_to_bytes(page_token) if end is None: end = b"\xff" * SHA1_SIZE next_page_token: Optional[str] = None ids: List[Sha1] = [] sha1s = (sha1 for sha1 in self._sorted_ids.iter_from(start)) for counter, sha1 in enumerate(sha1s): if sha1 > end: break if counter >= limit: next_page_token = hash_to_hex(sha1) break ids.append(sha1) assert len(ids) <= limit return PagedResult(results=ids, next_page_token=next_page_token) def add(self, data: Iterable[TValue]) -> int: """Add data not present in storage. Args: data (iterable): dictionaries with keys: - **id**: sha1 - **indexer_configuration_id**: tool used to compute the results - arbitrary data """ data = list(data) check_id_duplicates(data) object_type = self.row_class.object_type # type: ignore self._journal_writer.write_additions(object_type, data) count = 0 for obj in data: item = obj.to_dict() id_ = item.pop("id") tool_id = item["indexer_configuration_id"] key = _key_from_dict(obj.unique_key()) self._data[id_][key] = item self._tools_per_id[id_].add(tool_id) count += 1 if id_ not in self._sorted_ids: self._sorted_ids.add(id_) return count class IndexerStorage: """In-memory SWH indexer storage.""" def __init__(self, journal_writer=None): self._tools = {} def tool_getter(id_): tool = self._tools[id_] return { "id": tool["id"], "name": tool["tool_name"], "version": tool["tool_version"], "configuration": tool["tool_configuration"], } self.journal_writer = JournalWriter(tool_getter, journal_writer) args = (self._tools, self.journal_writer) self._mimetypes = SubStorage(ContentMimetypeRow, *args) self._languages = SubStorage(ContentLanguageRow, *args) - self._content_ctags = SubStorage(ContentCtagsRow, *args) self._licenses = SubStorage(ContentLicenseRow, *args) self._content_metadata = SubStorage(ContentMetadataRow, *args) self._directory_intrinsic_metadata = SubStorage( DirectoryIntrinsicMetadataRow, *args ) self._origin_intrinsic_metadata = SubStorage(OriginIntrinsicMetadataRow, *args) self._origin_extrinsic_metadata = SubStorage(OriginExtrinsicMetadataRow, *args) def check_config(self, *, check_write): return True def content_mimetype_missing( self, mimetypes: Iterable[Dict] ) -> List[Tuple[Sha1, int]]: return self._mimetypes.missing(mimetypes) def content_mimetype_get_partition( self, indexer_configuration_id: int, partition_id: int, nb_partitions: int, page_token: Optional[str] = None, limit: int = 1000, ) -> PagedResult[Sha1]: return self._mimetypes.get_partition( indexer_configuration_id, partition_id, nb_partitions, page_token, limit ) def content_mimetype_add( self, mimetypes: List[ContentMimetypeRow] ) -> Dict[str, int]: added = self._mimetypes.add(mimetypes) return {"content_mimetype:add": added} def content_mimetype_get(self, ids: Iterable[Sha1]) -> List[ContentMimetypeRow]: return self._mimetypes.get(ids) def content_language_missing( self, languages: Iterable[Dict] ) -> List[Tuple[Sha1, int]]: return self._languages.missing(languages) def content_language_get(self, ids: Iterable[Sha1]) -> List[ContentLanguageRow]: return self._languages.get(ids) def content_language_add( self, languages: List[ContentLanguageRow] ) -> Dict[str, int]: added = self._languages.add(languages) return {"content_language:add": added} - def content_ctags_missing(self, ctags: Iterable[Dict]) -> List[Tuple[Sha1, int]]: - return self._content_ctags.missing(ctags) - - def content_ctags_get(self, ids: Iterable[Sha1]) -> List[ContentCtagsRow]: - return self._content_ctags.get(ids) - - def content_ctags_add(self, ctags: List[ContentCtagsRow]) -> Dict[str, int]: - added = self._content_ctags.add(ctags) - return {"content_ctags:add": added} - - def content_ctags_search( - self, expression: str, limit: int = 10, last_sha1: Optional[Sha1] = None - ) -> List[ContentCtagsRow]: - nb_matches = 0 - items_per_id: Dict[Tuple[Sha1Git, ToolId], List[ContentCtagsRow]] = {} - for item in sorted(self._content_ctags.get_all()): - if item.id <= (last_sha1 or bytes(0 for _ in range(SHA1_DIGEST_SIZE))): - continue - items_per_id.setdefault( - (item.id, item.indexer_configuration_id), [] - ).append(item) - - results = [] - for items in items_per_id.values(): - for item in items: - if item.name != expression: - continue - nb_matches += 1 - if nb_matches > limit: - break - results.append(item) - - return results - def content_fossology_license_get( self, ids: Iterable[Sha1] ) -> List[ContentLicenseRow]: return self._licenses.get(ids) def content_fossology_license_add( self, licenses: List[ContentLicenseRow] ) -> Dict[str, int]: added = self._licenses.add(licenses) return {"content_fossology_license:add": added} def content_fossology_license_get_partition( self, indexer_configuration_id: int, partition_id: int, nb_partitions: int, page_token: Optional[str] = None, limit: int = 1000, ) -> PagedResult[Sha1]: return self._licenses.get_partition( indexer_configuration_id, partition_id, nb_partitions, page_token, limit ) def content_metadata_missing( self, metadata: Iterable[Dict] ) -> List[Tuple[Sha1, int]]: return self._content_metadata.missing(metadata) def content_metadata_get(self, ids: Iterable[Sha1]) -> List[ContentMetadataRow]: return self._content_metadata.get(ids) def content_metadata_add( self, metadata: List[ContentMetadataRow] ) -> Dict[str, int]: added = self._content_metadata.add(metadata) return {"content_metadata:add": added} def directory_intrinsic_metadata_missing( self, metadata: Iterable[Dict] ) -> List[Tuple[Sha1, int]]: return self._directory_intrinsic_metadata.missing(metadata) def directory_intrinsic_metadata_get( self, ids: Iterable[Sha1] ) -> List[DirectoryIntrinsicMetadataRow]: return self._directory_intrinsic_metadata.get(ids) def directory_intrinsic_metadata_add( self, metadata: List[DirectoryIntrinsicMetadataRow] ) -> Dict[str, int]: added = self._directory_intrinsic_metadata.add(metadata) return {"directory_intrinsic_metadata:add": added} def origin_intrinsic_metadata_get( self, urls: Iterable[str] ) -> List[OriginIntrinsicMetadataRow]: return self._origin_intrinsic_metadata.get(urls) def origin_intrinsic_metadata_add( self, metadata: List[OriginIntrinsicMetadataRow] ) -> Dict[str, int]: added = self._origin_intrinsic_metadata.add(metadata) return {"origin_intrinsic_metadata:add": added} def origin_intrinsic_metadata_search_fulltext( self, conjunction: List[str], limit: int = 100 ) -> List[OriginIntrinsicMetadataRow]: # A very crude fulltext search implementation, but that's enough # to work on English metadata tokens_re = re.compile("[a-zA-Z0-9]+") search_tokens = list(itertools.chain(*map(tokens_re.findall, conjunction))) def rank(data): # Tokenize the metadata text = json.dumps(data.metadata) text_tokens = tokens_re.findall(text) text_token_occurences = Counter(text_tokens) # Count the number of occurrences of search tokens in the text score = 0 for search_token in search_tokens: if text_token_occurences[search_token] == 0: # Search token is not in the text. return 0 score += text_token_occurences[search_token] # Normalize according to the text's length return score / math.log(len(text_tokens)) results = [ (rank(data), data) for data in self._origin_intrinsic_metadata.get_all() ] results = [(rank_, data) for (rank_, data) in results if rank_ > 0] results.sort( key=operator.itemgetter(0), reverse=True # Don't try to order 'data' ) return [result for (rank_, result) in results[:limit]] def origin_intrinsic_metadata_search_by_producer( self, page_token: str = "", limit: int = 100, ids_only: bool = False, mappings: Optional[List[str]] = None, tool_ids: Optional[List[int]] = None, ) -> PagedResult[Union[str, OriginIntrinsicMetadataRow]]: assert isinstance(page_token, str) nb_results = 0 if mappings is not None: mapping_set = frozenset(mappings) if tool_ids is not None: tool_id_set = frozenset(tool_ids) rows = [] # we go to limit+1 to check whether we should add next_page_token in # the response for entry in self._origin_intrinsic_metadata.get_all(): if entry.id <= page_token: continue if nb_results >= (limit + 1): break if mappings and mapping_set.isdisjoint(entry.mappings): continue if tool_ids and entry.tool["id"] not in tool_id_set: continue rows.append(entry) nb_results += 1 if len(rows) > limit: rows = rows[:limit] next_page_token = rows[-1].id else: next_page_token = None if ids_only: rows = [row.id for row in rows] return PagedResult( results=rows, next_page_token=next_page_token, ) def origin_intrinsic_metadata_stats(self): mapping_count = {m: 0 for m in MAPPING_NAMES} total = non_empty = 0 for data in self._origin_intrinsic_metadata.get_all(): total += 1 if set(data.metadata) - {"@context"}: non_empty += 1 for mapping in data.mappings: mapping_count[mapping] += 1 return {"per_mapping": mapping_count, "total": total, "non_empty": non_empty} def origin_extrinsic_metadata_get( self, urls: Iterable[str] ) -> List[OriginExtrinsicMetadataRow]: return self._origin_extrinsic_metadata.get(urls) def origin_extrinsic_metadata_add( self, metadata: List[OriginExtrinsicMetadataRow] ) -> Dict[str, int]: added = self._origin_extrinsic_metadata.add(metadata) return {"origin_extrinsic_metadata:add": added} def indexer_configuration_add(self, tools): inserted = [] for tool in tools: tool = tool.copy() id_ = self._tool_key(tool) tool["id"] = id_ self._tools[id_] = tool inserted.append(tool) return inserted def indexer_configuration_get(self, tool): return self._tools.get(self._tool_key(tool)) def _tool_key(self, tool): return hash( ( tool["tool_name"], tool["tool_version"], json.dumps(tool["tool_configuration"], sort_keys=True), ) ) diff --git a/swh/indexer/storage/interface.py b/swh/indexer/storage/interface.py index 012c685..121fdc2 100644 --- a/swh/indexer/storage/interface.py +++ b/swh/indexer/storage/interface.py @@ -1,549 +1,482 @@ -# Copyright (C) 2015-2020 The Software Heritage developers +# Copyright (C) 2015-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from typing import Dict, Iterable, List, Optional, Tuple, TypeVar, Union from typing_extensions import Protocol, runtime_checkable from swh.core.api import remote_api_endpoint from swh.core.api.classes import PagedResult as CorePagedResult from swh.indexer.storage.model import ( - ContentCtagsRow, ContentLanguageRow, ContentLicenseRow, ContentMetadataRow, ContentMimetypeRow, DirectoryIntrinsicMetadataRow, OriginExtrinsicMetadataRow, OriginIntrinsicMetadataRow, ) TResult = TypeVar("TResult") PagedResult = CorePagedResult[TResult, str] Sha1 = bytes @runtime_checkable class IndexerStorageInterface(Protocol): @remote_api_endpoint("check_config") def check_config(self, *, check_write): """Check that the storage is configured and ready to go.""" ... @remote_api_endpoint("content_mimetype/missing") def content_mimetype_missing( self, mimetypes: Iterable[Dict] ) -> List[Tuple[Sha1, int]]: """Generate mimetypes missing from storage. Args: mimetypes (iterable): iterable of dict with keys: - **id** (bytes): sha1 identifier - **indexer_configuration_id** (int): tool used to compute the results Returns: list of tuple (id, indexer_configuration_id) missing """ ... @remote_api_endpoint("content_mimetype/range") def content_mimetype_get_partition( self, indexer_configuration_id: int, partition_id: int, nb_partitions: int, page_token: Optional[str] = None, limit: int = 1000, ) -> PagedResult[Sha1]: """Retrieve mimetypes within partition partition_id bound by limit. Args: **indexer_configuration_id**: The tool used to index data **partition_id**: index of the partition to fetch **nb_partitions**: total number of partitions to split into **page_token**: opaque token used for pagination **limit**: Limit result (default to 1000) Raises: IndexerStorageArgumentException for; - limit to None - wrong indexer_type provided Returns: PagedResult of Sha1. If next_page_token is None, there is no more data to fetch """ ... @remote_api_endpoint("content_mimetype/add") def content_mimetype_add( self, mimetypes: List[ContentMimetypeRow] ) -> Dict[str, int]: """Add mimetypes not present in storage. Args: mimetypes: mimetype rows to be added, with their `tool` attribute set to None. overwrite (``True``) or skip duplicates (``False``, the default) Returns: Dict summary of number of rows added """ ... @remote_api_endpoint("content_mimetype") def content_mimetype_get(self, ids: Iterable[Sha1]) -> List[ContentMimetypeRow]: """Retrieve full content mimetype per ids. Args: ids: sha1 identifiers Returns: mimetype row objects """ ... @remote_api_endpoint("content_language/missing") def content_language_missing( self, languages: Iterable[Dict] ) -> List[Tuple[Sha1, int]]: """List languages missing from storage. Args: languages (iterable): dictionaries with keys: - **id** (bytes): sha1 identifier - **indexer_configuration_id** (int): tool used to compute the results Returns: list of tuple (id, indexer_configuration_id) missing """ ... @remote_api_endpoint("content_language") def content_language_get(self, ids: Iterable[Sha1]) -> List[ContentLanguageRow]: """Retrieve full content language per ids. Args: ids (iterable): sha1 identifier Returns: language row objects """ ... @remote_api_endpoint("content_language/add") def content_language_add( self, languages: List[ContentLanguageRow] ) -> Dict[str, int]: """Add languages not present in storage. Args: languages: language row objects Returns: Dict summary of number of rows added """ ... - @remote_api_endpoint("content/ctags/missing") - def content_ctags_missing(self, ctags: Iterable[Dict]) -> List[Tuple[Sha1, int]]: - """List ctags missing from storage. - - Args: - ctags (iterable): dicts with keys: - - - **id** (bytes): sha1 identifier - - **indexer_configuration_id** (int): tool used to compute - the results - - Returns: - list of missing id for the tuple (id, - indexer_configuration_id) - - """ - ... - - @remote_api_endpoint("content/ctags") - def content_ctags_get(self, ids: Iterable[Sha1]) -> List[ContentCtagsRow]: - """Retrieve ctags per id. - - Args: - ids (iterable): sha1 checksums - - Returns: - list of language rows - - - """ - ... - - @remote_api_endpoint("content/ctags/add") - def content_ctags_add(self, ctags: List[ContentCtagsRow]) -> Dict[str, int]: - """Add ctags not present in storage - - Args: - ctags (iterable): dictionaries with keys: - - - **id** (bytes): sha1 - - **ctags** ([list): List of dictionary with keys: name, kind, - line, lang - - Returns: - Dict summary of number of rows added - - """ - ... - - @remote_api_endpoint("content/ctags/search") - def content_ctags_search( - self, expression: str, limit: int = 10, last_sha1: Optional[Sha1] = None - ) -> List[ContentCtagsRow]: - """Search through content's raw ctags symbols. - - Args: - expression (str): Expression to search for - limit (int): Number of rows to return (default to 10). - last_sha1 (str): Offset from which retrieving data (default to ''). - - Returns: - rows of ctags including id, name, lang, kind, line, etc... - - """ - ... - @remote_api_endpoint("content/fossology_license") def content_fossology_license_get( self, ids: Iterable[Sha1] ) -> List[ContentLicenseRow]: """Retrieve licenses per id. Args: ids: sha1 identifiers Yields: license rows; possibly more than one per (sha1, tool_id) if there are multiple licenses. """ ... @remote_api_endpoint("content/fossology_license/add") def content_fossology_license_add( self, licenses: List[ContentLicenseRow] ) -> Dict[str, int]: """Add licenses not present in storage. Args: license: license rows to be added, with their `tool` attribute set to None. Returns: Dict summary of number of rows added """ ... @remote_api_endpoint("content/fossology_license/range") def content_fossology_license_get_partition( self, indexer_configuration_id: int, partition_id: int, nb_partitions: int, page_token: Optional[str] = None, limit: int = 1000, ) -> PagedResult[Sha1]: """Retrieve licenses within the partition partition_id bound by limit. Args: **indexer_configuration_id**: The tool used to index data **partition_id**: index of the partition to fetch **nb_partitions**: total number of partitions to split into **page_token**: opaque token used for pagination **limit**: Limit result (default to 1000) Raises: IndexerStorageArgumentException for; - limit to None - wrong indexer_type provided Returns: PagedResult of Sha1. If next_page_token is None, there is no more data to fetch """ ... @remote_api_endpoint("content_metadata/missing") def content_metadata_missing( self, metadata: Iterable[Dict] ) -> List[Tuple[Sha1, int]]: """List metadata missing from storage. Args: metadata (iterable): dictionaries with keys: - **id** (bytes): sha1 identifier - **indexer_configuration_id** (int): tool used to compute the results Yields: missing sha1s """ ... @remote_api_endpoint("content_metadata") def content_metadata_get(self, ids: Iterable[Sha1]) -> List[ContentMetadataRow]: """Retrieve metadata per id. Args: ids (iterable): sha1 checksums Yields: dictionaries with the following keys: id (bytes) metadata (str): associated metadata tool (dict): tool used to compute metadata """ ... @remote_api_endpoint("content_metadata/add") def content_metadata_add( self, metadata: List[ContentMetadataRow] ) -> Dict[str, int]: """Add metadata not present in storage. Args: metadata (iterable): dictionaries with keys: - **id**: sha1 - **metadata**: arbitrary dict Returns: Dict summary of number of rows added """ ... @remote_api_endpoint("directory_intrinsic_metadata/missing") def directory_intrinsic_metadata_missing( self, metadata: Iterable[Dict] ) -> List[Tuple[Sha1, int]]: """List metadata missing from storage. Args: metadata (iterable): dictionaries with keys: - **id** (bytes): sha1_git directory identifier - **indexer_configuration_id** (int): tool used to compute the results Returns: missing ids """ ... @remote_api_endpoint("directory_intrinsic_metadata") def directory_intrinsic_metadata_get( self, ids: Iterable[Sha1] ) -> List[DirectoryIntrinsicMetadataRow]: """Retrieve directory metadata per id. Args: ids (iterable): sha1 checksums Returns: ContentMetadataRow objects """ ... @remote_api_endpoint("directory_intrinsic_metadata/add") def directory_intrinsic_metadata_add( self, metadata: List[DirectoryIntrinsicMetadataRow], ) -> Dict[str, int]: """Add metadata not present in storage. Args: metadata: ContentMetadataRow objects Returns: Dict summary of number of rows added """ ... @remote_api_endpoint("origin_intrinsic_metadata") def origin_intrinsic_metadata_get( self, urls: Iterable[str] ) -> List[OriginIntrinsicMetadataRow]: """Retrieve origin metadata per id. Args: urls (iterable): origin URLs Returns: list of OriginIntrinsicMetadataRow """ ... @remote_api_endpoint("origin_intrinsic_metadata/add") def origin_intrinsic_metadata_add( self, metadata: List[OriginIntrinsicMetadataRow] ) -> Dict[str, int]: """Add origin metadata not present in storage. Args: metadata: list of OriginIntrinsicMetadataRow objects Returns: Dict summary of number of rows added """ ... @remote_api_endpoint("origin_intrinsic_metadata/search/fulltext") def origin_intrinsic_metadata_search_fulltext( self, conjunction: List[str], limit: int = 100 ) -> List[OriginIntrinsicMetadataRow]: """Returns the list of origins whose metadata contain all the terms. Args: conjunction: List of terms to be searched for. limit: The maximum number of results to return Returns: list of OriginIntrinsicMetadataRow """ ... @remote_api_endpoint("origin_intrinsic_metadata/search/by_producer") def origin_intrinsic_metadata_search_by_producer( self, page_token: str = "", limit: int = 100, ids_only: bool = False, mappings: Optional[List[str]] = None, tool_ids: Optional[List[int]] = None, ) -> PagedResult[Union[str, OriginIntrinsicMetadataRow]]: """Returns the list of origins whose metadata contain all the terms. Args: page_token (str): Opaque token used for pagination. limit (int): The maximum number of results to return ids_only (bool): Determines whether only origin urls are returned or the content as well mappings (List[str]): Returns origins whose intrinsic metadata were generated using at least one of these mappings. Returns: OriginIntrinsicMetadataRow objects """ ... @remote_api_endpoint("origin_intrinsic_metadata/stats") def origin_intrinsic_metadata_stats(self): """Returns counts of indexed metadata per origins, broken down into metadata types. Returns: dict: dictionary with keys: - total (int): total number of origins that were indexed (possibly yielding an empty metadata dictionary) - non_empty (int): total number of origins that we extracted a non-empty metadata dictionary from - per_mapping (dict): a dictionary with mapping names as keys and number of origins whose indexing used this mapping. Note that indexing a given origin may use 0, 1, or many mappings. """ ... @remote_api_endpoint("origin_extrinsic_metadata") def origin_extrinsic_metadata_get( self, urls: Iterable[str] ) -> List[OriginExtrinsicMetadataRow]: """Retrieve origin metadata per id. Args: urls (iterable): origin URLs Returns: list of OriginExtrinsicMetadataRow """ ... @remote_api_endpoint("origin_extrinsic_metadata/add") def origin_extrinsic_metadata_add( self, metadata: List[OriginExtrinsicMetadataRow] ) -> Dict[str, int]: """Add origin metadata not present in storage. Args: metadata: list of OriginExtrinsicMetadataRow objects Returns: Dict summary of number of rows added """ ... @remote_api_endpoint("indexer_configuration/add") def indexer_configuration_add(self, tools): """Add new tools to the storage. Args: tools ([dict]): List of dictionary representing tool to insert in the db. Dictionary with the following keys: - **tool_name** (str): tool's name - **tool_version** (str): tool's version - **tool_configuration** (dict): tool's configuration (free form dict) Returns: List of dict inserted in the db (holding the id key as well). The order of the list is not guaranteed to match the order of the initial list. """ ... @remote_api_endpoint("indexer_configuration/data") def indexer_configuration_get(self, tool): """Retrieve tool information. Args: tool (dict): Dictionary representing a tool with the following keys: - **tool_name** (str): tool's name - **tool_version** (str): tool's version - **tool_configuration** (dict): tool's configuration (free form dict) Returns: The same dictionary with an `id` key, None otherwise. """ ... diff --git a/swh/indexer/storage/model.py b/swh/indexer/storage/model.py index df8a897..6c1f6bf 100644 --- a/swh/indexer/storage/model.py +++ b/swh/indexer/storage/model.py @@ -1,150 +1,131 @@ -# Copyright (C) 2020 The Software Heritage developers +# Copyright (C) 2020-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information """Classes used internally by the in-memory idx-storage, and will be used for the interface of the idx-storage in the near future.""" from __future__ import annotations from typing import Any, Dict, List, Optional, Tuple, Type, TypeVar import attr from typing_extensions import Final from swh.model.model import Sha1Git, dictify TSelf = TypeVar("TSelf") @attr.s class BaseRow: UNIQUE_KEY_FIELDS: Tuple = ("id", "indexer_configuration_id") id = attr.ib(type=Any) indexer_configuration_id = attr.ib(type=Optional[int], default=None, kw_only=True) tool = attr.ib(type=Optional[Dict], default=None, kw_only=True) def __attrs_post_init__(self): if self.indexer_configuration_id is None and self.tool is None: raise TypeError("Either indexer_configuration_id or tool must be not None.") if self.indexer_configuration_id is not None and self.tool is not None: raise TypeError( "indexer_configuration_id and tool are mutually exclusive; " "only one may be not None." ) def anonymize(self: TSelf) -> Optional[TSelf]: # Needed to implement swh.journal.writer.ValueProtocol return None def to_dict(self) -> Dict[str, Any]: """Wrapper of `attr.asdict` that can be overridden by subclasses that have special handling of some of the fields.""" d = dictify(attr.asdict(self, recurse=False)) if d["indexer_configuration_id"] is None: del d["indexer_configuration_id"] if d["tool"] is None: del d["tool"] return d @classmethod def from_dict(cls: Type[TSelf], d) -> TSelf: return cls(**d) def unique_key(self) -> Dict: obj = self # tool["id"] and obj.indexer_configuration_id are the same value, but # only one of them is set for any given object if obj.indexer_configuration_id is None: assert obj.tool # constructors ensures tool XOR indexer_configuration_id obj = attr.evolve(obj, indexer_configuration_id=obj.tool["id"], tool=None) return {key: getattr(obj, key) for key in self.UNIQUE_KEY_FIELDS} @attr.s class ContentMimetypeRow(BaseRow): object_type: Final = "content_mimetype" id = attr.ib(type=Sha1Git) mimetype = attr.ib(type=str) encoding = attr.ib(type=str) @attr.s class ContentLanguageRow(BaseRow): object_type: Final = "content_language" id = attr.ib(type=Sha1Git) lang = attr.ib(type=str) -@attr.s -class ContentCtagsRow(BaseRow): - object_type: Final = "content_ctags" - UNIQUE_KEY_FIELDS = ( - "id", - "indexer_configuration_id", - "name", - "kind", - "line", - "lang", - ) - - id = attr.ib(type=Sha1Git) - name = attr.ib(type=str) - kind = attr.ib(type=str) - line = attr.ib(type=int) - lang = attr.ib(type=str) - - @attr.s class ContentLicenseRow(BaseRow): object_type: Final = "content_fossology_license" UNIQUE_KEY_FIELDS = ("id", "indexer_configuration_id", "license") id = attr.ib(type=Sha1Git) license = attr.ib(type=str) @attr.s class ContentMetadataRow(BaseRow): object_type: Final = "content_metadata" id = attr.ib(type=Sha1Git) metadata = attr.ib(type=Dict[str, Any]) @attr.s class DirectoryIntrinsicMetadataRow(BaseRow): object_type: Final = "directory_intrinsic_metadata" id = attr.ib(type=Sha1Git) metadata = attr.ib(type=Dict[str, Any]) mappings = attr.ib(type=List[str]) @attr.s class OriginIntrinsicMetadataRow(BaseRow): object_type: Final = "origin_intrinsic_metadata" id = attr.ib(type=str) metadata = attr.ib(type=Dict[str, Any]) from_directory = attr.ib(type=Sha1Git) mappings = attr.ib(type=List[str]) @attr.s class OriginExtrinsicMetadataRow(BaseRow): object_type: Final = "origin_extrinsic_metadata" id = attr.ib(type=str) """origin URL""" metadata = attr.ib(type=Dict[str, Any]) from_remd_id = attr.ib(type=Sha1Git) """id of the RawExtrinsicMetadata object used as source for indexed metadata""" mappings = attr.ib(type=List[str]) diff --git a/swh/indexer/tasks.py b/swh/indexer/tasks.py index bf20862..a3f41e7 100644 --- a/swh/indexer/tasks.py +++ b/swh/indexer/tasks.py @@ -1,48 +1,42 @@ -# Copyright (C) 2016-2020 The Software Heritage developers +# Copyright (C) 2016-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from celery import shared_task -from .ctags import CtagsIndexer from .fossology_license import FossologyLicenseIndexer, FossologyLicensePartitionIndexer from .metadata import OriginMetadataIndexer from .mimetype import MimetypeIndexer, MimetypePartitionIndexer from .rehash import RecomputeChecksums @shared_task(name=__name__ + ".OriginMetadata") def origin_metadata(*args, **kwargs): return OriginMetadataIndexer().run(*args, **kwargs) -@shared_task(name=__name__ + ".Ctags") -def ctags(*args, **kwargs): - return CtagsIndexer().run(*args, **kwargs) - - @shared_task(name=__name__ + ".ContentFossologyLicense") def fossology_license(*args, **kwargs): return FossologyLicenseIndexer().run(*args, **kwargs) @shared_task(name=__name__ + ".RecomputeChecksums") def recompute_checksums(*args, **kwargs): return RecomputeChecksums().run(*args, **kwargs) @shared_task(name=__name__ + ".ContentMimetype") def mimetype(*args, **kwargs): return MimetypeIndexer().run(*args, **kwargs) @shared_task(name=__name__ + ".ContentMimetypePartition") def mimetype_partition(*args, **kwargs): return MimetypePartitionIndexer().run(*args, **kwargs) @shared_task(name=__name__ + ".ContentFossologyLicensePartition") def license_partition(*args, **kwargs): return FossologyLicensePartitionIndexer().run(*args, **kwargs) diff --git a/swh/indexer/tests/storage/generate_data_test.py b/swh/indexer/tests/storage/generate_data_test.py index 9fa73fb..10fc8fa 100644 --- a/swh/indexer/tests/storage/generate_data_test.py +++ b/swh/indexer/tests/storage/generate_data_test.py @@ -1,212 +1,204 @@ -# Copyright (C) 2018-2019 The Software Heritage developers +# Copyright (C) 2018-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from uuid import uuid1 from hypothesis.strategies import composite, one_of, sampled_from, sets, tuples, uuids from swh.model.hashutil import MultiHash MIMETYPES = [ b"application/json", b"application/octet-stream", b"application/xml", b"text/plain", ] ENCODINGS = [ b"iso8859-1", b"iso8859-15", b"latin1", b"utf-8", ] def gen_mimetype(): """Generate one mimetype strategy.""" return one_of(sampled_from(MIMETYPES)) def gen_encoding(): """Generate one encoding strategy.""" return one_of(sampled_from(ENCODINGS)) def _init_content(uuid): """Given a uuid, initialize a content""" return { "id": MultiHash.from_data(uuid.bytes, {"sha1"}).digest()["sha1"], "indexer_configuration_id": 1, } @composite def gen_content_mimetypes(draw, *, min_size=0, max_size=100): """Generate valid and consistent content_mimetypes. Context: Test purposes Args: **draw** (callable): Used by hypothesis to generate data **min_size** (int): Minimal number of elements to generate (default: 0) **max_size** (int): Maximal number of elements to generate (default: 100) Returns: List of content_mimetypes as expected by the content_mimetype_add api endpoint. """ _ids = draw( sets( tuples(uuids(), gen_mimetype(), gen_encoding()), min_size=min_size, max_size=max_size, ) ) content_mimetypes = [] for uuid, mimetype, encoding in _ids: content_mimetypes.append( { **_init_content(uuid), "mimetype": mimetype, "encoding": encoding, } ) return content_mimetypes TOOLS = [ - { - "tool_name": "universal-ctags", - "tool_version": "~git7859817b", - "tool_configuration": { - "command_line": "ctags --fields=+lnz --sort=no --links=no " - "--output-format=json " - }, - }, { "tool_name": "swh-metadata-translator", "tool_version": "0.0.1", "tool_configuration": {"type": "local", "context": "NpmMapping"}, }, { "tool_name": "swh-metadata-detector", "tool_version": "0.0.1", "tool_configuration": { "type": "local", "context": ["NpmMapping", "CodemetaMapping"], }, }, { "tool_name": "swh-metadata-detector2", "tool_version": "0.0.1", "tool_configuration": { "type": "local", "context": ["NpmMapping", "CodemetaMapping"], }, }, { "tool_name": "file", "tool_version": "5.22", "tool_configuration": {"command_line": "file --mime "}, }, { "tool_name": "pygments", "tool_version": "2.0.1+dfsg-1.1+deb8u1", "tool_configuration": {"type": "library", "debian-package": "python3-pygments"}, }, { "tool_name": "pygments2", "tool_version": "2.0.1+dfsg-1.1+deb8u1", "tool_configuration": { "type": "library", "debian-package": "python3-pygments", "max_content_size": 10240, }, }, { "tool_name": "nomos", "tool_version": "3.1.0rc2-31-ga2cbb8c", "tool_configuration": {"command_line": "nomossa "}, }, ] MIMETYPE_OBJECTS = [ { "id": MultiHash.from_data(uuid1().bytes, {"sha1"}).digest()["sha1"], "mimetype": mt, "encoding": enc, # 'indexer_configuration_id' will be added after TOOLS get registered } for mt in MIMETYPES for enc in ENCODINGS ] LICENSES = [ b"3DFX", b"BSD", b"GPL", b"Apache2", b"MIT", ] FOSSOLOGY_LICENSES = [ { "id": MultiHash.from_data(uuid1().bytes, {"sha1"}).digest()["sha1"], "licenses": [ LICENSES[i % len(LICENSES)], ], # 'indexer_configuration_id' will be added after TOOLS get registered } for i in range(10) ] def gen_license(): return one_of(sampled_from(LICENSES)) @composite def gen_content_fossology_licenses(draw, *, min_size=0, max_size=100): """Generate valid and consistent content_fossology_licenses. Context: Test purposes Args: **draw** (callable): Used by hypothesis to generate data **min_size** (int): Minimal number of elements to generate (default: 0) **max_size** (int): Maximal number of elements to generate (default: 100) Returns: List of content_fossology_licenses as expected by the content_fossology_license_add api endpoint. """ _ids = draw( sets( tuples( uuids(), gen_license(), ), min_size=min_size, max_size=max_size, ) ) content_licenses = [] for uuid, license in _ids: content_licenses.append( { **_init_content(uuid), "licenses": [license], } ) return content_licenses diff --git a/swh/indexer/tests/storage/test_converters.py b/swh/indexer/tests/storage/test_converters.py index 4119293..3a3a2b5 100644 --- a/swh/indexer/tests/storage/test_converters.py +++ b/swh/indexer/tests/storage/test_converters.py @@ -1,191 +1,112 @@ -# Copyright (C) 2015-2020 The Software Heritage developers +# Copyright (C) 2015-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from swh.indexer.storage import converters -def test_ctags_to_db() -> None: - input_ctag = { - "id": b"some-id", - "indexer_configuration_id": 100, - "ctags": [ - { - "name": "some-name", - "kind": "some-kind", - "line": 10, - "lang": "Yaml", - }, - { - "name": "main", - "kind": "function", - "line": 12, - "lang": "Yaml", - }, - ], - } - - expected_ctags = [ - { - "id": b"some-id", - "name": "some-name", - "kind": "some-kind", - "line": 10, - "lang": "Yaml", - "indexer_configuration_id": 100, - }, - { - "id": b"some-id", - "name": "main", - "kind": "function", - "line": 12, - "lang": "Yaml", - "indexer_configuration_id": 100, - }, - ] - - # when - actual_ctags = list(converters.ctags_to_db(input_ctag)) - - # then - assert actual_ctags == expected_ctags - - -def test_db_to_ctags() -> None: - input_ctags = { - "id": b"some-id", - "name": "some-name", - "kind": "some-kind", - "line": 10, - "lang": "Yaml", - "tool_id": 200, - "tool_name": "some-toolname", - "tool_version": "some-toolversion", - "tool_configuration": {}, - } - expected_ctags = { - "id": b"some-id", - "name": "some-name", - "kind": "some-kind", - "line": 10, - "lang": "Yaml", - "tool": { - "id": 200, - "name": "some-toolname", - "version": "some-toolversion", - "configuration": {}, - }, - } - - # when - actual_ctags = converters.db_to_ctags(input_ctags) - - # then - assert actual_ctags == expected_ctags - - def test_db_to_mimetype() -> None: input_mimetype = { "id": b"some-id", "tool_id": 10, "tool_name": "some-toolname", "tool_version": "some-toolversion", "tool_configuration": {}, "encoding": b"ascii", "mimetype": b"text/plain", } expected_mimetype = { "id": b"some-id", "encoding": b"ascii", "mimetype": b"text/plain", "tool": { "id": 10, "name": "some-toolname", "version": "some-toolversion", "configuration": {}, }, } actual_mimetype = converters.db_to_mimetype(input_mimetype) assert actual_mimetype == expected_mimetype def test_db_to_language() -> None: input_language = { "id": b"some-id", "tool_id": 20, "tool_name": "some-toolname", "tool_version": "some-toolversion", "tool_configuration": {}, "lang": b"css", } expected_language = { "id": b"some-id", "lang": b"css", "tool": { "id": 20, "name": "some-toolname", "version": "some-toolversion", "configuration": {}, }, } actual_language = converters.db_to_language(input_language) assert actual_language == expected_language def test_db_to_fossology_license() -> None: input_license = { "id": b"some-id", "tool_id": 20, "tool_name": "nomossa", "tool_version": "5.22", "tool_configuration": {}, "license": "GPL2.0", } expected_license = { "id": b"some-id", "license": "GPL2.0", "tool": { "id": 20, "name": "nomossa", "version": "5.22", "configuration": {}, }, } actual_license = converters.db_to_fossology_license(input_license) assert actual_license == expected_license def test_db_to_metadata() -> None: input_metadata = { "id": b"some-id", "tool_id": 20, "tool_name": "some-toolname", "tool_version": "some-toolversion", "tool_configuration": {}, "metadata": b"metadata", } expected_metadata = { "id": b"some-id", "metadata": b"metadata", "tool": { "id": 20, "name": "some-toolname", "version": "some-toolversion", "configuration": {}, }, } actual_metadata = converters.db_to_metadata(input_metadata) assert actual_metadata == expected_metadata diff --git a/swh/indexer/tests/storage/test_storage.py b/swh/indexer/tests/storage/test_storage.py index 7a135fb..c8ccd80 100644 --- a/swh/indexer/tests/storage/test_storage.py +++ b/swh/indexer/tests/storage/test_storage.py @@ -1,2088 +1,1825 @@ -# Copyright (C) 2015-2020 The Software Heritage developers +# Copyright (C) 2015-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import math import threading from typing import Any, Dict, List, Tuple, Type import attr import pytest from swh.indexer.storage.exc import DuplicateId, IndexerStorageArgumentException from swh.indexer.storage.interface import IndexerStorageInterface, PagedResult from swh.indexer.storage.model import ( BaseRow, - ContentCtagsRow, ContentLanguageRow, ContentLicenseRow, ContentMetadataRow, ContentMimetypeRow, DirectoryIntrinsicMetadataRow, OriginExtrinsicMetadataRow, OriginIntrinsicMetadataRow, ) from swh.model.hashutil import hash_to_bytes def prepare_mimetypes_from_licenses( fossology_licenses: List[ContentLicenseRow], ) -> List[ContentMimetypeRow]: """Fossology license needs some consistent data in db to run.""" mimetypes = [] for c in fossology_licenses: mimetypes.append( ContentMimetypeRow( id=c.id, mimetype="text/plain", # for filtering on textual data to work encoding="utf-8", indexer_configuration_id=c.indexer_configuration_id, ) ) return mimetypes def endpoint_name(etype: str, ename: str) -> str: """Compute the storage's endpoint's name >>> endpoint_name('content_mimetype', 'add') 'content_mimetype_add' >>> endpoint_name('content_fosso_license', 'delete') 'content_fosso_license_delete' """ return f"{etype}_{ename}" def endpoint(storage, etype: str, ename: str): return getattr(storage, endpoint_name(etype, ename)) def expected_summary(count: int, etype: str, ename: str = "add") -> Dict[str, int]: """Compute the expected summary The key is determine according to etype and ename >>> expected_summary(10, 'content_mimetype', 'add') {'content_mimetype:add': 10} >>> expected_summary(9, 'origin_intrinsic_metadata', 'delete') {'origin_intrinsic_metadata:del': 9} """ pattern = ename[0:3] key = endpoint_name(etype, ename).replace(f"_{ename}", f":{pattern}") return {key: count} def test_check_config(swh_indexer_storage) -> None: assert swh_indexer_storage.check_config(check_write=True) assert swh_indexer_storage.check_config(check_write=False) class StorageETypeTester: """Base class for testing a series of common behaviour between a bunch of endpoint types supported by an IndexerStorage. This is supposed to be inherited with the following class attributes: - endpoint_type - tool_name - example_data See below for example usage. """ endpoint_type: str tool_name: str example_data: List[Dict] row_class: Type[BaseRow] def test_missing( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data etype = self.endpoint_type tool_id = data.tools[self.tool_name]["id"] # given 2 (hopefully) unknown objects query = [ { "id": data.sha1_1, "indexer_configuration_id": tool_id, }, { "id": data.sha1_2, "indexer_configuration_id": tool_id, }, ] # we expect these are both returned by the xxx_missing endpoint actual_missing = endpoint(storage, etype, "missing")(query) assert list(actual_missing) == [ data.sha1_1, data.sha1_2, ] # now, when we add one of them summary = endpoint(storage, etype, "add")( [ self.row_class.from_dict( { "id": data.sha1_2, **self.example_data[0], "indexer_configuration_id": tool_id, } ) ] ) assert summary == expected_summary(1, etype) # we expect only the other one returned actual_missing = endpoint(storage, etype, "missing")(query) assert list(actual_missing) == [data.sha1_1] def test_add__update_in_place_duplicate( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data etype = self.endpoint_type tool = data.tools[self.tool_name] data_v1 = { "id": data.sha1_2, **self.example_data[0], "indexer_configuration_id": tool["id"], } # given summary = endpoint(storage, etype, "add")([self.row_class.from_dict(data_v1)]) assert summary == expected_summary(1, etype) # not added # when actual_data = list(endpoint(storage, etype, "get")([data.sha1_2])) expected_data_v1 = [ self.row_class.from_dict( {"id": data.sha1_2, **self.example_data[0], "tool": tool} ) ] # then assert actual_data == expected_data_v1 # given data_v2 = data_v1.copy() data_v2.update(self.example_data[1]) endpoint(storage, etype, "add")([self.row_class.from_dict(data_v2)]) assert summary == expected_summary(1, etype) # modified so counted actual_data = list(endpoint(storage, etype, "get")([data.sha1_2])) expected_data_v2 = [ self.row_class.from_dict( { "id": data.sha1_2, **self.example_data[1], "tool": tool, } ) ] # data did change as the v2 was used to overwrite v1 assert actual_data == expected_data_v2 def test_add_deadlock( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data etype = self.endpoint_type tool = data.tools[self.tool_name] hashes = [ hash_to_bytes("34973274ccef6ab4dfaaf86599792fa9c3fe4{:03d}".format(i)) for i in range(1000) ] data_v1 = [ self.row_class.from_dict( { "id": hash_, **self.example_data[0], "indexer_configuration_id": tool["id"], } ) for hash_ in hashes ] data_v2 = [ self.row_class.from_dict( { "id": hash_, **self.example_data[1], "indexer_configuration_id": tool["id"], } ) for hash_ in hashes ] # Remove one item from each, so that both queries have to succeed for # all items to be in the DB. data_v2a = data_v2[1:] data_v2b = list(reversed(data_v2[0:-1])) # given endpoint(storage, etype, "add")(data_v1) # when actual_data = sorted( endpoint(storage, etype, "get")(hashes), key=lambda x: x.id, ) expected_data_v1 = [ self.row_class.from_dict( {"id": hash_, **self.example_data[0], "tool": tool} ) for hash_ in hashes ] # then assert actual_data == expected_data_v1 # given def f1() -> None: endpoint(storage, etype, "add")(data_v2a) def f2() -> None: endpoint(storage, etype, "add")(data_v2b) t1 = threading.Thread(target=f1) t2 = threading.Thread(target=f2) t2.start() t1.start() t1.join() t2.join() actual_data = sorted( endpoint(storage, etype, "get")(hashes), key=lambda x: x.id, ) expected_data_v2 = [ self.row_class.from_dict( {"id": hash_, **self.example_data[1], "tool": tool} ) for hash_ in hashes ] assert len(actual_data) == len(expected_data_v1) == len(expected_data_v2) for (item, expected_item_v1, expected_item_v2) in zip( actual_data, expected_data_v1, expected_data_v2 ): assert item in (expected_item_v1, expected_item_v2) def test_add__duplicate_twice( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data etype = self.endpoint_type tool = data.tools[self.tool_name] data_dir1 = self.row_class.from_dict( { "id": data.directory_id_2, **self.example_data[0], "indexer_configuration_id": tool["id"], } ) data_dir2 = self.row_class.from_dict( { "id": data.directory_id_2, **self.example_data[1], "indexer_configuration_id": tool["id"], } ) # when summary = endpoint(storage, etype, "add")([data_dir1]) assert summary == expected_summary(1, etype) with pytest.raises(DuplicateId): endpoint(storage, etype, "add")([data_dir2, data_dir2]) # then actual_data = list( endpoint(storage, etype, "get")([data.directory_id_2, data.directory_id_1]) ) expected_data = [ self.row_class.from_dict( {"id": data.directory_id_2, **self.example_data[0], "tool": tool} ) ] assert actual_data == expected_data def test_add( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data etype = self.endpoint_type tool = data.tools[self.tool_name] # conftest fills it with mimetypes storage.journal_writer.journal.objects = [] # type: ignore query = [data.sha1_2, data.sha1_1] data1 = self.row_class.from_dict( { "id": data.sha1_2, **self.example_data[0], "indexer_configuration_id": tool["id"], } ) # when summary = endpoint(storage, etype, "add")([data1]) assert summary == expected_summary(1, etype) # then actual_data = list(endpoint(storage, etype, "get")(query)) # then expected_data = [ self.row_class.from_dict( {"id": data.sha1_2, **self.example_data[0], "tool": tool} ) ] assert actual_data == expected_data journal_objects = storage.journal_writer.journal.objects # type: ignore actual_journal_data = [ obj for (obj_type, obj) in journal_objects if obj_type == self.endpoint_type ] assert list(sorted(actual_journal_data)) == list(sorted(expected_data)) class TestIndexerStorageContentMimetypes(StorageETypeTester): """Test Indexer Storage content_mimetype related methods""" endpoint_type = "content_mimetype" tool_name = "file" example_data = [ { "mimetype": "text/plain", "encoding": "utf-8", }, { "mimetype": "text/html", "encoding": "us-ascii", }, ] row_class = ContentMimetypeRow def test_generate_content_mimetype_get_partition_failure( self, swh_indexer_storage: IndexerStorageInterface ) -> None: """get_partition call with wrong limit input should fail""" storage = swh_indexer_storage indexer_configuration_id = 42 with pytest.raises( IndexerStorageArgumentException, match="limit should not be None" ): storage.content_mimetype_get_partition( indexer_configuration_id, 0, 3, limit=None # type: ignore ) def test_generate_content_mimetype_get_partition_no_limit( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: """get_partition should return result""" storage, data = swh_indexer_storage_with_data mimetypes = data.mimetypes expected_ids = set([c.id for c in mimetypes]) indexer_configuration_id = mimetypes[0].indexer_configuration_id assert len(mimetypes) == 16 nb_partitions = 16 actual_ids = [] for partition_id in range(nb_partitions): actual_result = storage.content_mimetype_get_partition( indexer_configuration_id, partition_id, nb_partitions ) assert actual_result.next_page_token is None actual_ids.extend(actual_result.results) assert len(actual_ids) == len(expected_ids) for actual_id in actual_ids: assert actual_id in expected_ids def test_generate_content_mimetype_get_partition_full( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: """get_partition for a single partition should return available ids""" storage, data = swh_indexer_storage_with_data mimetypes = data.mimetypes expected_ids = set([c.id for c in mimetypes]) indexer_configuration_id = mimetypes[0].indexer_configuration_id actual_result = storage.content_mimetype_get_partition( indexer_configuration_id, 0, 1 ) assert actual_result.next_page_token is None actual_ids = actual_result.results assert len(actual_ids) == len(expected_ids) for actual_id in actual_ids: assert actual_id in expected_ids def test_generate_content_mimetype_get_partition_empty( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: """get_partition when at least one of the partitions is empty""" storage, data = swh_indexer_storage_with_data mimetypes = data.mimetypes expected_ids = set([c.id for c in mimetypes]) indexer_configuration_id = mimetypes[0].indexer_configuration_id # nb_partitions = smallest power of 2 such that at least one of # the partitions is empty nb_mimetypes = len(mimetypes) nb_partitions = 1 << math.floor(math.log2(nb_mimetypes) + 1) seen_ids = [] for partition_id in range(nb_partitions): actual_result = storage.content_mimetype_get_partition( indexer_configuration_id, partition_id, nb_partitions, limit=nb_mimetypes + 1, ) for actual_id in actual_result.results: seen_ids.append(actual_id) # Limit is higher than the max number of results assert actual_result.next_page_token is None assert set(seen_ids) == expected_ids def test_generate_content_mimetype_get_partition_with_pagination( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: """get_partition should return ids provided with pagination""" storage, data = swh_indexer_storage_with_data mimetypes = data.mimetypes expected_ids = set([c.id for c in mimetypes]) indexer_configuration_id = mimetypes[0].indexer_configuration_id nb_partitions = 4 actual_ids = [] for partition_id in range(nb_partitions): next_page_token = None while True: actual_result = storage.content_mimetype_get_partition( indexer_configuration_id, partition_id, nb_partitions, limit=2, page_token=next_page_token, ) actual_ids.extend(actual_result.results) next_page_token = actual_result.next_page_token if next_page_token is None: break assert len(set(actual_ids)) == len(set(expected_ids)) for actual_id in actual_ids: assert actual_id in expected_ids class TestIndexerStorageContentLanguage(StorageETypeTester): """Test Indexer Storage content_language related methods""" endpoint_type = "content_language" tool_name = "pygments" example_data = [ { "lang": "haskell", }, { "lang": "common-lisp", }, ] row_class = ContentLanguageRow -class TestIndexerStorageContentCTags(StorageETypeTester): - """Test Indexer Storage content_ctags related methods""" - - endpoint_type = "content_ctags" - tool_name = "universal-ctags" - example_data = [ - { - "name": "done", - "kind": "variable", - "line": 119, - "lang": "OCaml", - }, - { - "name": "done", - "kind": "variable", - "line": 100, - "lang": "Python", - }, - { - "name": "main", - "kind": "function", - "line": 119, - "lang": "Python", - }, - ] - row_class = ContentCtagsRow - - # the following tests are disabled because CTAGS behaves differently - @pytest.mark.skip - def test_add__update_in_place_duplicate(self): - pass - - @pytest.mark.skip - def test_add_deadlock(self): - pass - - def test_content_ctags_search( - self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] - ) -> None: - storage, data = swh_indexer_storage_with_data - # 1. given - tool = data.tools["universal-ctags"] - tool_id = tool["id"] - - ctags1 = [ - ContentCtagsRow( - id=data.sha1_1, - indexer_configuration_id=tool_id, - **kwargs, # type: ignore - ) - for kwargs in [ - { - "name": "hello", - "kind": "function", - "line": 133, - "lang": "Python", - }, - { - "name": "counter", - "kind": "variable", - "line": 119, - "lang": "Python", - }, - { - "name": "hello", - "kind": "variable", - "line": 210, - "lang": "Python", - }, - ] - ] - ctags1_with_tool = [ - attr.evolve(ctag, indexer_configuration_id=None, tool=tool) - for ctag in ctags1 - ] - - ctags2 = [ - ContentCtagsRow( - id=data.sha1_2, - indexer_configuration_id=tool_id, - **kwargs, # type: ignore - ) - for kwargs in [ - { - "name": "hello", - "kind": "variable", - "line": 100, - "lang": "C", - }, - { - "name": "result", - "kind": "variable", - "line": 120, - "lang": "C", - }, - ] - ] - ctags2_with_tool = [ - attr.evolve(ctag, indexer_configuration_id=None, tool=tool) - for ctag in ctags2 - ] - - storage.content_ctags_add(ctags1 + ctags2) - - # 1. when - actual_ctags = list(storage.content_ctags_search("hello", limit=1)) - - # 1. then - assert actual_ctags == [ctags1_with_tool[0]] - - # 2. when - actual_ctags = list( - storage.content_ctags_search("hello", limit=1, last_sha1=data.sha1_1) - ) - - # 2. then - assert actual_ctags == [ctags2_with_tool[0]] - - # 3. when - actual_ctags = list(storage.content_ctags_search("hello")) - - # 3. then - assert actual_ctags == [ - ctags1_with_tool[0], - ctags1_with_tool[2], - ctags2_with_tool[0], - ] - - # 4. when - actual_ctags = list(storage.content_ctags_search("counter")) - - # then - assert actual_ctags == [ctags1_with_tool[1]] - - # 5. when - actual_ctags = list(storage.content_ctags_search("result", limit=1)) - - # then - assert actual_ctags == [ctags2_with_tool[1]] - - def test_content_ctags_search_no_result( - self, swh_indexer_storage: IndexerStorageInterface - ) -> None: - storage = swh_indexer_storage - actual_ctags = list(storage.content_ctags_search("counter")) - - assert not actual_ctags - - def test_content_ctags_add__add_new_ctags_added( - self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] - ) -> None: - storage, data = swh_indexer_storage_with_data - - # given - tool = data.tools["universal-ctags"] - tool_id = tool["id"] - - ctag1 = ContentCtagsRow( - id=data.sha1_2, - indexer_configuration_id=tool_id, - name="done", - kind="variable", - line=100, - lang="Scheme", - ) - ctag1_with_tool = attr.evolve(ctag1, indexer_configuration_id=None, tool=tool) - - # given - storage.content_ctags_add([ctag1]) - storage.content_ctags_add([ctag1]) # conflict does nothing - - # when - actual_ctags = list(storage.content_ctags_get([data.sha1_2])) - - # then - assert actual_ctags == [ctag1_with_tool] - - # given - ctag2 = ContentCtagsRow( - id=data.sha1_2, - indexer_configuration_id=tool_id, - name="defn", - kind="function", - line=120, - lang="Scheme", - ) - ctag2_with_tool = attr.evolve(ctag2, indexer_configuration_id=None, tool=tool) - - storage.content_ctags_add([ctag2]) - - actual_ctags = list(storage.content_ctags_get([data.sha1_2])) - - assert actual_ctags == [ctag1_with_tool, ctag2_with_tool] - - def test_content_ctags_add__update_in_place( - self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] - ) -> None: - storage, data = swh_indexer_storage_with_data - # given - tool = data.tools["universal-ctags"] - tool_id = tool["id"] - - ctag1 = ContentCtagsRow( - id=data.sha1_2, - indexer_configuration_id=tool_id, - name="done", - kind="variable", - line=100, - lang="Scheme", - ) - ctag1_with_tool = attr.evolve(ctag1, indexer_configuration_id=None, tool=tool) - - # given - storage.content_ctags_add([ctag1]) - - # when - actual_ctags = list(storage.content_ctags_get([data.sha1_2])) - - # then - assert actual_ctags == [ctag1_with_tool] - - # given - ctag2 = ContentCtagsRow( - id=data.sha1_2, - indexer_configuration_id=tool_id, - name="defn", - kind="function", - line=120, - lang="Scheme", - ) - ctag2_with_tool = attr.evolve(ctag2, indexer_configuration_id=None, tool=tool) - - storage.content_ctags_add([ctag1, ctag2]) - - actual_ctags = list(storage.content_ctags_get([data.sha1_2])) - - assert actual_ctags == [ctag1_with_tool, ctag2_with_tool] - - def test_add_empty( - self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] - ) -> None: - (storage, data) = swh_indexer_storage_with_data - etype = self.endpoint_type - - summary = endpoint(storage, etype, "add")([]) - assert summary == {"content_ctags:add": 0} - - actual_ctags = list(endpoint(storage, etype, "get")([data.sha1_2])) - - assert actual_ctags == [] - - def test_get_unknown( - self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] - ) -> None: - (storage, data) = swh_indexer_storage_with_data - etype = self.endpoint_type - - actual_ctags = list(endpoint(storage, etype, "get")([data.sha1_2])) - - assert actual_ctags == [] - - class TestIndexerStorageContentMetadata(StorageETypeTester): """Test Indexer Storage content_metadata related methods""" tool_name = "swh-metadata-detector" endpoint_type = "content_metadata" example_data = [ { "metadata": { "other": {}, "codeRepository": { "type": "git", "url": "https://github.com/moranegg/metadata_test", }, "description": "Simple package.json test for indexer", "name": "test_metadata", "version": "0.0.1", }, }, { "metadata": {"other": {}, "name": "test_metadata", "version": "0.0.1"}, }, ] row_class = ContentMetadataRow class TestIndexerStorageDirectoryIntrinsicMetadata(StorageETypeTester): """Test Indexer Storage directory_intrinsic_metadata related methods""" tool_name = "swh-metadata-detector" endpoint_type = "directory_intrinsic_metadata" example_data = [ { "metadata": { "other": {}, "codeRepository": { "type": "git", "url": "https://github.com/moranegg/metadata_test", }, "description": "Simple package.json test for indexer", "name": "test_metadata", "version": "0.0.1", }, "mappings": ["mapping1"], }, { "metadata": {"other": {}, "name": "test_metadata", "version": "0.0.1"}, "mappings": ["mapping2"], }, ] row_class = DirectoryIntrinsicMetadataRow class TestIndexerStorageContentFossologyLicense(StorageETypeTester): endpoint_type = "content_fossology_license" tool_name = "nomos" example_data = [ {"license": "Apache-2.0"}, {"license": "BSD-2-Clause"}, ] row_class = ContentLicenseRow # the following tests are disabled because licenses behaves differently @pytest.mark.skip def test_add__update_in_place_duplicate(self): pass @pytest.mark.skip def test_add_deadlock(self): pass # content_fossology_license_missing does not exist @pytest.mark.skip def test_missing(self): pass def test_content_fossology_license_add__new_license_added( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data # given tool = data.tools["nomos"] tool_id = tool["id"] license1 = ContentLicenseRow( id=data.sha1_1, license="Apache-2.0", indexer_configuration_id=tool_id, ) # given storage.content_fossology_license_add([license1]) # conflict does nothing storage.content_fossology_license_add([license1]) # when actual_licenses = list(storage.content_fossology_license_get([data.sha1_1])) # then expected_licenses = [ ContentLicenseRow( id=data.sha1_1, license="Apache-2.0", tool=tool, ) ] assert actual_licenses == expected_licenses # given license2 = ContentLicenseRow( id=data.sha1_1, license="BSD-2-Clause", indexer_configuration_id=tool_id, ) storage.content_fossology_license_add([license2]) actual_licenses = list(storage.content_fossology_license_get([data.sha1_1])) expected_licenses.append( ContentLicenseRow( id=data.sha1_1, license="BSD-2-Clause", tool=tool, ) ) # first license was not removed when the second one was added assert sorted(actual_licenses) == sorted(expected_licenses) def test_generate_content_fossology_license_get_partition_failure( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: """get_partition call with wrong limit input should fail""" storage, data = swh_indexer_storage_with_data indexer_configuration_id = 42 with pytest.raises( IndexerStorageArgumentException, match="limit should not be None" ): storage.content_fossology_license_get_partition( indexer_configuration_id, 0, 3, limit=None, # type: ignore ) def test_generate_content_fossology_license_get_partition_no_limit( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: """get_partition should return results""" storage, data = swh_indexer_storage_with_data # craft some consistent mimetypes fossology_licenses = data.fossology_licenses mimetypes = prepare_mimetypes_from_licenses(fossology_licenses) indexer_configuration_id = fossology_licenses[0].indexer_configuration_id storage.content_mimetype_add(mimetypes) # add fossology_licenses to storage storage.content_fossology_license_add(fossology_licenses) # All ids from the db expected_ids = set([c.id for c in fossology_licenses]) assert len(fossology_licenses) == 10 assert len(mimetypes) == 10 nb_partitions = 4 actual_ids = [] for partition_id in range(nb_partitions): actual_result = storage.content_fossology_license_get_partition( indexer_configuration_id, partition_id, nb_partitions ) assert actual_result.next_page_token is None actual_ids.extend(actual_result.results) assert len(set(actual_ids)) == len(expected_ids) for actual_id in actual_ids: assert actual_id in expected_ids def test_generate_content_fossology_license_get_partition_full( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: """get_partition for a single partition should return available ids""" storage, data = swh_indexer_storage_with_data # craft some consistent mimetypes fossology_licenses = data.fossology_licenses mimetypes = prepare_mimetypes_from_licenses(fossology_licenses) indexer_configuration_id = fossology_licenses[0].indexer_configuration_id storage.content_mimetype_add(mimetypes) # add fossology_licenses to storage storage.content_fossology_license_add(fossology_licenses) # All ids from the db expected_ids = set([c.id for c in fossology_licenses]) actual_result = storage.content_fossology_license_get_partition( indexer_configuration_id, 0, 1 ) assert actual_result.next_page_token is None actual_ids = actual_result.results assert len(set(actual_ids)) == len(expected_ids) for actual_id in actual_ids: assert actual_id in expected_ids def test_generate_content_fossology_license_get_partition_empty( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: """get_partition when at least one of the partitions is empty""" storage, data = swh_indexer_storage_with_data # craft some consistent mimetypes fossology_licenses = data.fossology_licenses mimetypes = prepare_mimetypes_from_licenses(fossology_licenses) indexer_configuration_id = fossology_licenses[0].indexer_configuration_id storage.content_mimetype_add(mimetypes) # add fossology_licenses to storage storage.content_fossology_license_add(fossology_licenses) # All ids from the db expected_ids = set([c.id for c in fossology_licenses]) # nb_partitions = smallest power of 2 such that at least one of # the partitions is empty nb_licenses = len(fossology_licenses) nb_partitions = 1 << math.floor(math.log2(nb_licenses) + 1) seen_ids = [] for partition_id in range(nb_partitions): actual_result = storage.content_fossology_license_get_partition( indexer_configuration_id, partition_id, nb_partitions, limit=nb_licenses + 1, ) for actual_id in actual_result.results: seen_ids.append(actual_id) # Limit is higher than the max number of results assert actual_result.next_page_token is None assert set(seen_ids) == expected_ids def test_generate_content_fossology_license_get_partition_with_pagination( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: """get_partition should return ids provided with paginationv""" storage, data = swh_indexer_storage_with_data # craft some consistent mimetypes fossology_licenses = data.fossology_licenses mimetypes = prepare_mimetypes_from_licenses(fossology_licenses) indexer_configuration_id = fossology_licenses[0].indexer_configuration_id storage.content_mimetype_add(mimetypes) # add fossology_licenses to storage storage.content_fossology_license_add(fossology_licenses) # All ids from the db expected_ids = [c.id for c in fossology_licenses] nb_partitions = 4 actual_ids = [] for partition_id in range(nb_partitions): next_page_token = None while True: actual_result = storage.content_fossology_license_get_partition( indexer_configuration_id, partition_id, nb_partitions, limit=2, page_token=next_page_token, ) actual_ids.extend(actual_result.results) next_page_token = actual_result.next_page_token if next_page_token is None: break assert len(set(actual_ids)) == len(set(expected_ids)) for actual_id in actual_ids: assert actual_id in expected_ids def test_add_empty( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: (storage, data) = swh_indexer_storage_with_data etype = self.endpoint_type summary = endpoint(storage, etype, "add")([]) assert summary == {"content_fossology_license:add": 0} actual_license = list(endpoint(storage, etype, "get")([data.sha1_2])) assert actual_license == [] def test_get_unknown( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: (storage, data) = swh_indexer_storage_with_data etype = self.endpoint_type actual_license = list(endpoint(storage, etype, "get")([data.sha1_2])) assert actual_license == [] class TestIndexerStorageOriginIntrinsicMetadata: def test_origin_intrinsic_metadata_add( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data # given tool_id = data.tools["swh-metadata-detector"]["id"] metadata = { "version": None, "name": None, } metadata_dir = DirectoryIntrinsicMetadataRow( id=data.directory_id_2, metadata=metadata, mappings=["mapping1"], indexer_configuration_id=tool_id, ) metadata_origin = OriginIntrinsicMetadataRow( id=data.origin_url_1, metadata=metadata, indexer_configuration_id=tool_id, mappings=["mapping1"], from_directory=data.directory_id_2, ) # when storage.directory_intrinsic_metadata_add([metadata_dir]) storage.origin_intrinsic_metadata_add([metadata_origin]) # then actual_metadata = list( storage.origin_intrinsic_metadata_get([data.origin_url_1, "no://where"]) ) expected_metadata = [ OriginIntrinsicMetadataRow( id=data.origin_url_1, metadata=metadata, tool=data.tools["swh-metadata-detector"], from_directory=data.directory_id_2, mappings=["mapping1"], ) ] assert actual_metadata == expected_metadata journal_objects = storage.journal_writer.journal.objects # type: ignore actual_journal_metadata = [ obj for (obj_type, obj) in journal_objects if obj_type == "origin_intrinsic_metadata" ] assert list(sorted(actual_journal_metadata)) == list(sorted(expected_metadata)) def test_origin_intrinsic_metadata_add_update_in_place_duplicate( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data # given tool_id = data.tools["swh-metadata-detector"]["id"] metadata_v1: Dict[str, Any] = { "version": None, "name": None, } metadata_dir_v1 = DirectoryIntrinsicMetadataRow( id=data.directory_id_2, metadata=metadata_v1, mappings=[], indexer_configuration_id=tool_id, ) metadata_origin_v1 = OriginIntrinsicMetadataRow( id=data.origin_url_1, metadata=metadata_v1.copy(), indexer_configuration_id=tool_id, mappings=[], from_directory=data.directory_id_2, ) # given storage.directory_intrinsic_metadata_add([metadata_dir_v1]) storage.origin_intrinsic_metadata_add([metadata_origin_v1]) # when actual_metadata = list( storage.origin_intrinsic_metadata_get([data.origin_url_1]) ) # then expected_metadata_v1 = [ OriginIntrinsicMetadataRow( id=data.origin_url_1, metadata=metadata_v1, tool=data.tools["swh-metadata-detector"], from_directory=data.directory_id_2, mappings=[], ) ] assert actual_metadata == expected_metadata_v1 # given metadata_v2 = metadata_v1.copy() metadata_v2.update( { "name": "test_update_duplicated_metadata", "author": "MG", } ) metadata_dir_v2 = attr.evolve(metadata_dir_v1, metadata=metadata_v2) metadata_origin_v2 = OriginIntrinsicMetadataRow( id=data.origin_url_1, metadata=metadata_v2.copy(), indexer_configuration_id=tool_id, mappings=["npm"], from_directory=data.directory_id_1, ) storage.directory_intrinsic_metadata_add([metadata_dir_v2]) storage.origin_intrinsic_metadata_add([metadata_origin_v2]) actual_metadata = list( storage.origin_intrinsic_metadata_get([data.origin_url_1]) ) expected_metadata_v2 = [ OriginIntrinsicMetadataRow( id=data.origin_url_1, metadata=metadata_v2, tool=data.tools["swh-metadata-detector"], from_directory=data.directory_id_1, mappings=["npm"], ) ] # metadata did change as the v2 was used to overwrite v1 assert actual_metadata == expected_metadata_v2 def test_origin_intrinsic_metadata_add__deadlock( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data # given tool_id = data.tools["swh-metadata-detector"]["id"] origins = ["file:///tmp/origin{:02d}".format(i) for i in range(100)] example_data1: Dict[str, Any] = { "metadata": { "version": None, "name": None, }, "mappings": [], } example_data2: Dict[str, Any] = { "metadata": { "version": "v1.1.1", "name": "foo", }, "mappings": [], } metadata_dir_v1 = DirectoryIntrinsicMetadataRow( id=data.directory_id_2, metadata={ "version": None, "name": None, }, mappings=[], indexer_configuration_id=tool_id, ) data_v1 = [ OriginIntrinsicMetadataRow( id=origin, from_directory=data.directory_id_2, indexer_configuration_id=tool_id, **example_data1, ) for origin in origins ] data_v2 = [ OriginIntrinsicMetadataRow( id=origin, from_directory=data.directory_id_2, indexer_configuration_id=tool_id, **example_data2, ) for origin in origins ] # Remove one item from each, so that both queries have to succeed for # all items to be in the DB. data_v2a = data_v2[1:] data_v2b = list(reversed(data_v2[0:-1])) # given storage.directory_intrinsic_metadata_add([metadata_dir_v1]) storage.origin_intrinsic_metadata_add(data_v1) # when actual_data = list(storage.origin_intrinsic_metadata_get(origins)) expected_data_v1 = [ OriginIntrinsicMetadataRow( id=origin, from_directory=data.directory_id_2, tool=data.tools["swh-metadata-detector"], **example_data1, ) for origin in origins ] # then assert actual_data == expected_data_v1 # given def f1() -> None: storage.origin_intrinsic_metadata_add(data_v2a) def f2() -> None: storage.origin_intrinsic_metadata_add(data_v2b) t1 = threading.Thread(target=f1) t2 = threading.Thread(target=f2) t2.start() t1.start() t1.join() t2.join() actual_data = list(storage.origin_intrinsic_metadata_get(origins)) expected_data_v2 = [ OriginIntrinsicMetadataRow( id=origin, from_directory=data.directory_id_2, tool=data.tools["swh-metadata-detector"], **example_data2, ) for origin in origins ] actual_data.sort(key=lambda item: item.id) assert len(actual_data) == len(expected_data_v1) == len(expected_data_v2) for (item, expected_item_v1, expected_item_v2) in zip( actual_data, expected_data_v1, expected_data_v2 ): assert item in (expected_item_v1, expected_item_v2) def test_origin_intrinsic_metadata_add__duplicate_twice( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data # given tool_id = data.tools["swh-metadata-detector"]["id"] metadata = { "developmentStatus": None, "name": None, } metadata_dir = DirectoryIntrinsicMetadataRow( id=data.directory_id_2, metadata=metadata, mappings=["mapping1"], indexer_configuration_id=tool_id, ) metadata_origin = OriginIntrinsicMetadataRow( id=data.origin_url_1, metadata=metadata, indexer_configuration_id=tool_id, mappings=["mapping1"], from_directory=data.directory_id_2, ) # when storage.directory_intrinsic_metadata_add([metadata_dir]) with pytest.raises(DuplicateId): storage.origin_intrinsic_metadata_add([metadata_origin, metadata_origin]) def test_origin_intrinsic_metadata_search_fulltext( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data # given tool_id = data.tools["swh-metadata-detector"]["id"] metadata1 = { "author": "John Doe", } metadata1_dir = DirectoryIntrinsicMetadataRow( id=data.directory_id_1, metadata=metadata1, mappings=[], indexer_configuration_id=tool_id, ) metadata1_origin = OriginIntrinsicMetadataRow( id=data.origin_url_1, metadata=metadata1, mappings=[], indexer_configuration_id=tool_id, from_directory=data.directory_id_1, ) metadata2 = { "author": "Jane Doe", } metadata2_dir = DirectoryIntrinsicMetadataRow( id=data.directory_id_2, metadata=metadata2, mappings=[], indexer_configuration_id=tool_id, ) metadata2_origin = OriginIntrinsicMetadataRow( id=data.origin_url_2, metadata=metadata2, mappings=[], indexer_configuration_id=tool_id, from_directory=data.directory_id_2, ) # when storage.directory_intrinsic_metadata_add([metadata1_dir]) storage.origin_intrinsic_metadata_add([metadata1_origin]) storage.directory_intrinsic_metadata_add([metadata2_dir]) storage.origin_intrinsic_metadata_add([metadata2_origin]) # then search = storage.origin_intrinsic_metadata_search_fulltext assert set([res.id for res in search(["Doe"])]) == set( [data.origin_url_1, data.origin_url_2] ) assert [res.id for res in search(["John", "Doe"])] == [data.origin_url_1] assert [res.id for res in search(["John"])] == [data.origin_url_1] assert not list(search(["John", "Jane"])) def test_origin_intrinsic_metadata_search_fulltext_rank( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data # given tool_id = data.tools["swh-metadata-detector"]["id"] # The following authors have "Random Person" to add some more content # to the JSON data, to work around normalization quirks when there # are few words (rank/(1+ln(nb_words)) is very sensitive to nb_words # for small values of nb_words). metadata1 = { "author": [ "Random Person", "John Doe", "Jane Doe", ] } metadata1_dir = DirectoryIntrinsicMetadataRow( id=data.directory_id_1, metadata=metadata1, mappings=[], indexer_configuration_id=tool_id, ) metadata1_origin = OriginIntrinsicMetadataRow( id=data.origin_url_1, metadata=metadata1, mappings=[], indexer_configuration_id=tool_id, from_directory=data.directory_id_1, ) metadata2 = { "author": [ "Random Person", "Jane Doe", ] } metadata2_dir = DirectoryIntrinsicMetadataRow( id=data.directory_id_2, metadata=metadata2, mappings=[], indexer_configuration_id=tool_id, ) metadata2_origin = OriginIntrinsicMetadataRow( id=data.origin_url_2, metadata=metadata2, mappings=[], indexer_configuration_id=tool_id, from_directory=data.directory_id_2, ) # when storage.directory_intrinsic_metadata_add([metadata1_dir]) storage.origin_intrinsic_metadata_add([metadata1_origin]) storage.directory_intrinsic_metadata_add([metadata2_dir]) storage.origin_intrinsic_metadata_add([metadata2_origin]) # then search = storage.origin_intrinsic_metadata_search_fulltext assert [res.id for res in search(["Doe"])] == [ data.origin_url_1, data.origin_url_2, ] assert [res.id for res in search(["Doe"], limit=1)] == [data.origin_url_1] assert [res.id for res in search(["John"])] == [data.origin_url_1] assert [res.id for res in search(["Jane"])] == [ data.origin_url_2, data.origin_url_1, ] assert [res.id for res in search(["John", "Jane"])] == [data.origin_url_1] def _fill_origin_intrinsic_metadata( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data tool1_id = data.tools["swh-metadata-detector"]["id"] tool2_id = data.tools["swh-metadata-detector2"]["id"] metadata1 = { "@context": "foo", "author": "John Doe", } metadata1_dir = DirectoryIntrinsicMetadataRow( id=data.directory_id_1, metadata=metadata1, mappings=["npm"], indexer_configuration_id=tool1_id, ) metadata1_origin = OriginIntrinsicMetadataRow( id=data.origin_url_1, metadata=metadata1, mappings=["npm"], indexer_configuration_id=tool1_id, from_directory=data.directory_id_1, ) metadata2 = { "@context": "foo", "author": "Jane Doe", } metadata2_dir = DirectoryIntrinsicMetadataRow( id=data.directory_id_2, metadata=metadata2, mappings=["npm", "gemspec"], indexer_configuration_id=tool2_id, ) metadata2_origin = OriginIntrinsicMetadataRow( id=data.origin_url_2, metadata=metadata2, mappings=["npm", "gemspec"], indexer_configuration_id=tool2_id, from_directory=data.directory_id_2, ) metadata3 = { "@context": "foo", } metadata3_dir = DirectoryIntrinsicMetadataRow( id=data.directory_id_3, metadata=metadata3, mappings=["npm", "gemspec"], indexer_configuration_id=tool2_id, ) metadata3_origin = OriginIntrinsicMetadataRow( id=data.origin_url_3, metadata=metadata3, mappings=["pkg-info"], indexer_configuration_id=tool2_id, from_directory=data.directory_id_3, ) storage.directory_intrinsic_metadata_add([metadata1_dir]) storage.origin_intrinsic_metadata_add([metadata1_origin]) storage.directory_intrinsic_metadata_add([metadata2_dir]) storage.origin_intrinsic_metadata_add([metadata2_origin]) storage.directory_intrinsic_metadata_add([metadata3_dir]) storage.origin_intrinsic_metadata_add([metadata3_origin]) def test_origin_intrinsic_metadata_search_by_producer( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data self._fill_origin_intrinsic_metadata(swh_indexer_storage_with_data) tool1 = data.tools["swh-metadata-detector"] tool2 = data.tools["swh-metadata-detector2"] endpoint = storage.origin_intrinsic_metadata_search_by_producer # test pagination # no 'page_token' param, return all origins result = endpoint(ids_only=True) assert result == PagedResult( results=[ data.origin_url_1, data.origin_url_2, data.origin_url_3, ], next_page_token=None, ) # 'page_token' is < than origin_1, return everything result = endpoint(page_token=data.origin_url_1[:-1], ids_only=True) assert result == PagedResult( results=[ data.origin_url_1, data.origin_url_2, data.origin_url_3, ], next_page_token=None, ) # 'page_token' is origin_3, return nothing result = endpoint(page_token=data.origin_url_3, ids_only=True) assert result == PagedResult(results=[], next_page_token=None) # test limit argument result = endpoint(page_token=data.origin_url_1[:-1], limit=2, ids_only=True) assert result == PagedResult( results=[data.origin_url_1, data.origin_url_2], next_page_token=data.origin_url_2, ) result = endpoint(page_token=data.origin_url_1, limit=2, ids_only=True) assert result == PagedResult( results=[data.origin_url_2, data.origin_url_3], next_page_token=None, ) result = endpoint(page_token=data.origin_url_2, limit=2, ids_only=True) assert result == PagedResult( results=[data.origin_url_3], next_page_token=None, ) # test mappings filtering result = endpoint(mappings=["npm"], ids_only=True) assert result == PagedResult( results=[data.origin_url_1, data.origin_url_2], next_page_token=None, ) result = endpoint(mappings=["npm", "gemspec"], ids_only=True) assert result == PagedResult( results=[data.origin_url_1, data.origin_url_2], next_page_token=None, ) result = endpoint(mappings=["gemspec"], ids_only=True) assert result == PagedResult( results=[data.origin_url_2], next_page_token=None, ) result = endpoint(mappings=["pkg-info"], ids_only=True) assert result == PagedResult( results=[data.origin_url_3], next_page_token=None, ) result = endpoint(mappings=["foobar"], ids_only=True) assert result == PagedResult( results=[], next_page_token=None, ) # test pagination + mappings result = endpoint(mappings=["npm"], limit=1, ids_only=True) assert result == PagedResult( results=[data.origin_url_1], next_page_token=data.origin_url_1, ) # test tool filtering result = endpoint(tool_ids=[tool1["id"]], ids_only=True) assert result == PagedResult( results=[data.origin_url_1], next_page_token=None, ) result = endpoint(tool_ids=[tool2["id"]], ids_only=True) assert sorted(result.results) == [data.origin_url_2, data.origin_url_3] assert result.next_page_token is None result = endpoint(tool_ids=[tool1["id"], tool2["id"]], ids_only=True) assert sorted(result.results) == [ data.origin_url_1, data.origin_url_2, data.origin_url_3, ] assert result.next_page_token is None # test ids_only=False assert endpoint(mappings=["gemspec"]) == PagedResult( results=[ OriginIntrinsicMetadataRow( id=data.origin_url_2, metadata={ "@context": "foo", "author": "Jane Doe", }, mappings=["npm", "gemspec"], tool=tool2, from_directory=data.directory_id_2, ) ], next_page_token=None, ) def test_origin_intrinsic_metadata_stats( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data self._fill_origin_intrinsic_metadata(swh_indexer_storage_with_data) result = storage.origin_intrinsic_metadata_stats() assert result == { "per_mapping": { "cff": 0, "gemspec": 1, "npm": 2, "pkg-info": 1, "codemeta": 0, "maven": 0, }, "total": 3, "non_empty": 2, } class TestIndexerStorageOriginExtrinsicMetadata: def test_origin_extrinsic_metadata_add( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data # given tool_id = data.tools["swh-metadata-detector"]["id"] metadata = { "version": None, "name": None, } metadata_origin = OriginExtrinsicMetadataRow( id=data.origin_url_1, metadata=metadata, indexer_configuration_id=tool_id, mappings=["mapping1"], from_remd_id=b"\x02" * 20, ) # when storage.origin_extrinsic_metadata_add([metadata_origin]) # then actual_metadata = list( storage.origin_extrinsic_metadata_get([data.origin_url_1, "no://where"]) ) expected_metadata = [ OriginExtrinsicMetadataRow( id=data.origin_url_1, metadata=metadata, tool=data.tools["swh-metadata-detector"], from_remd_id=b"\x02" * 20, mappings=["mapping1"], ) ] assert actual_metadata == expected_metadata journal_objects = storage.journal_writer.journal.objects # type: ignore actual_journal_metadata = [ obj for (obj_type, obj) in journal_objects if obj_type == "origin_extrinsic_metadata" ] assert list(sorted(actual_journal_metadata)) == list(sorted(expected_metadata)) def test_origin_extrinsic_metadata_add_update_in_place_duplicate( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data # given tool_id = data.tools["swh-metadata-detector"]["id"] metadata_v1: Dict[str, Any] = { "version": None, "name": None, } metadata_origin_v1 = OriginExtrinsicMetadataRow( id=data.origin_url_1, metadata=metadata_v1.copy(), indexer_configuration_id=tool_id, mappings=[], from_remd_id=b"\x02" * 20, ) # given storage.origin_extrinsic_metadata_add([metadata_origin_v1]) # when actual_metadata = list( storage.origin_extrinsic_metadata_get([data.origin_url_1]) ) # then expected_metadata_v1 = [ OriginExtrinsicMetadataRow( id=data.origin_url_1, metadata=metadata_v1, tool=data.tools["swh-metadata-detector"], from_remd_id=b"\x02" * 20, mappings=[], ) ] assert actual_metadata == expected_metadata_v1 # given metadata_v2 = metadata_v1.copy() metadata_v2.update( { "name": "test_update_duplicated_metadata", "author": "MG", } ) metadata_origin_v2 = OriginExtrinsicMetadataRow( id=data.origin_url_1, metadata=metadata_v2.copy(), indexer_configuration_id=tool_id, mappings=["github"], from_remd_id=b"\x02" * 20, ) storage.origin_extrinsic_metadata_add([metadata_origin_v2]) actual_metadata = list( storage.origin_extrinsic_metadata_get([data.origin_url_1]) ) expected_metadata_v2 = [ OriginExtrinsicMetadataRow( id=data.origin_url_1, metadata=metadata_v2, tool=data.tools["swh-metadata-detector"], from_remd_id=b"\x02" * 20, mappings=["github"], ) ] # metadata did change as the v2 was used to overwrite v1 assert actual_metadata == expected_metadata_v2 def test_origin_extrinsic_metadata_add__deadlock( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data # given tool_id = data.tools["swh-metadata-detector"]["id"] origins = ["file:///tmp/origin{:02d}".format(i) for i in range(100)] example_data1: Dict[str, Any] = { "metadata": { "version": None, "name": None, }, "mappings": [], } example_data2: Dict[str, Any] = { "metadata": { "version": "v1.1.1", "name": "foo", }, "mappings": [], } data_v1 = [ OriginExtrinsicMetadataRow( id=origin, from_remd_id=b"\x02" * 20, indexer_configuration_id=tool_id, **example_data1, ) for origin in origins ] data_v2 = [ OriginExtrinsicMetadataRow( id=origin, from_remd_id=b"\x02" * 20, indexer_configuration_id=tool_id, **example_data2, ) for origin in origins ] # Remove one item from each, so that both queries have to succeed for # all items to be in the DB. data_v2a = data_v2[1:] data_v2b = list(reversed(data_v2[0:-1])) # given storage.origin_extrinsic_metadata_add(data_v1) # when actual_data = list(storage.origin_extrinsic_metadata_get(origins)) expected_data_v1 = [ OriginExtrinsicMetadataRow( id=origin, from_remd_id=b"\x02" * 20, tool=data.tools["swh-metadata-detector"], **example_data1, ) for origin in origins ] # then assert actual_data == expected_data_v1 # given def f1() -> None: storage.origin_extrinsic_metadata_add(data_v2a) def f2() -> None: storage.origin_extrinsic_metadata_add(data_v2b) t1 = threading.Thread(target=f1) t2 = threading.Thread(target=f2) t2.start() t1.start() t1.join() t2.join() actual_data = list(storage.origin_extrinsic_metadata_get(origins)) expected_data_v2 = [ OriginExtrinsicMetadataRow( id=origin, from_remd_id=b"\x02" * 20, tool=data.tools["swh-metadata-detector"], **example_data2, ) for origin in origins ] actual_data.sort(key=lambda item: item.id) assert len(actual_data) == len(expected_data_v1) == len(expected_data_v2) for (item, expected_item_v1, expected_item_v2) in zip( actual_data, expected_data_v1, expected_data_v2 ): assert item in (expected_item_v1, expected_item_v2) def test_origin_extrinsic_metadata_add__duplicate_twice( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data # given tool_id = data.tools["swh-metadata-detector"]["id"] metadata = { "developmentStatus": None, "name": None, } metadata_origin = OriginExtrinsicMetadataRow( id=data.origin_url_1, metadata=metadata, indexer_configuration_id=tool_id, mappings=["mapping1"], from_remd_id=b"\x02" * 20, ) # when with pytest.raises(DuplicateId): storage.origin_extrinsic_metadata_add([metadata_origin, metadata_origin]) class TestIndexerStorageIndexerConfiguration: def test_indexer_configuration_add( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data tool = { "tool_name": "some-unknown-tool", "tool_version": "some-version", "tool_configuration": {"debian-package": "some-package"}, } actual_tool = storage.indexer_configuration_get(tool) assert actual_tool is None # does not exist # add it actual_tools = list(storage.indexer_configuration_add([tool])) assert len(actual_tools) == 1 actual_tool = actual_tools[0] assert actual_tool is not None # now it exists new_id = actual_tool.pop("id") assert actual_tool == tool actual_tools2 = list(storage.indexer_configuration_add([tool])) actual_tool2 = actual_tools2[0] assert actual_tool2 is not None # now it exists new_id2 = actual_tool2.pop("id") assert new_id == new_id2 assert actual_tool == actual_tool2 def test_indexer_configuration_add_multiple( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data tool = { "tool_name": "some-unknown-tool", "tool_version": "some-version", "tool_configuration": {"debian-package": "some-package"}, } actual_tools = list(storage.indexer_configuration_add([tool])) assert len(actual_tools) == 1 new_tools = [ tool, { "tool_name": "yet-another-tool", "tool_version": "version", "tool_configuration": {}, }, ] actual_tools = list(storage.indexer_configuration_add(new_tools)) assert len(actual_tools) == 2 # order not guaranteed, so we iterate over results to check for tool in actual_tools: _id = tool.pop("id") assert _id is not None assert tool in new_tools def test_indexer_configuration_get_missing( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data tool = { "tool_name": "unknown-tool", "tool_version": "3.1.0rc2-31-ga2cbb8c", "tool_configuration": {"command_line": "nomossa "}, } actual_tool = storage.indexer_configuration_get(tool) assert actual_tool is None def test_indexer_configuration_get( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data tool = { "tool_name": "nomos", "tool_version": "3.1.0rc2-31-ga2cbb8c", "tool_configuration": {"command_line": "nomossa "}, } actual_tool = storage.indexer_configuration_get(tool) assert actual_tool expected_tool = tool.copy() del actual_tool["id"] assert expected_tool == actual_tool def test_indexer_configuration_metadata_get_missing_context( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data tool = { "tool_name": "swh-metadata-translator", "tool_version": "0.0.1", "tool_configuration": {"context": "unknown-context"}, } actual_tool = storage.indexer_configuration_get(tool) assert actual_tool is None def test_indexer_configuration_metadata_get( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data tool = { "tool_name": "swh-metadata-translator", "tool_version": "0.0.1", "tool_configuration": {"type": "local", "context": "NpmMapping"}, } storage.indexer_configuration_add([tool]) actual_tool = storage.indexer_configuration_get(tool) assert actual_tool expected_tool = tool.copy() expected_tool["id"] = actual_tool["id"] assert expected_tool == actual_tool diff --git a/swh/indexer/tests/test_ctags.py b/swh/indexer/tests/test_ctags.py deleted file mode 100644 index 02626ea..0000000 --- a/swh/indexer/tests/test_ctags.py +++ /dev/null @@ -1,171 +0,0 @@ -# Copyright (C) 2017-2022 The Software Heritage developers -# See the AUTHORS file at the top-level directory of this distribution -# License: GNU General Public License version 3, or any later version -# See top-level LICENSE file for more information - -import json -import unittest -from unittest.mock import patch - -import pytest - -import swh.indexer.ctags -from swh.indexer.ctags import CtagsIndexer, run_ctags -from swh.indexer.storage.model import ContentCtagsRow -from swh.indexer.tests.utils import ( - BASE_TEST_CONFIG, - OBJ_STORAGE_DATA, - RAW_CONTENT_IDS, - SHA1_TO_CTAGS, - CommonContentIndexerTest, - fill_obj_storage, - fill_storage, - filter_dict, -) -from swh.model.hashutil import hash_to_bytes - - -class BasicTest(unittest.TestCase): - @patch("swh.indexer.ctags.subprocess") - def test_run_ctags(self, mock_subprocess): - """Computing licenses from a raw content should return results""" - output0 = """ -{"name":"defun","kind":"function","line":1,"language":"scheme"} -{"name":"name","kind":"symbol","line":5,"language":"else"}""" - output1 = """ -{"name":"let","kind":"var","line":10,"language":"something"}""" - - expected_result0 = [ - {"name": "defun", "kind": "function", "line": 1, "lang": "scheme"}, - {"name": "name", "kind": "symbol", "line": 5, "lang": "else"}, - ] - - expected_result1 = [ - {"name": "let", "kind": "var", "line": 10, "lang": "something"} - ] - for path, lang, intermediary_result, expected_result in [ - (b"some/path", "lisp", output0, expected_result0), - (b"some/path/2", "markdown", output1, expected_result1), - ]: - mock_subprocess.check_output.return_value = intermediary_result - actual_result = list(run_ctags(path, lang=lang)) - self.assertEqual(actual_result, expected_result) - - -class InjectCtagsIndexer: - """Override ctags computations.""" - - def compute_ctags(self, path, lang): - """Inject fake ctags given path (sha1 identifier).""" - return {"lang": lang, **SHA1_TO_CTAGS.get(path)} - - -CONFIG = { - **BASE_TEST_CONFIG, - "tools": { - "name": "universal-ctags", - "version": "~git7859817b", - "configuration": { - "command_line": """ctags --fields=+lnz --sort=no """ - """ --links=no """, - "max_content_size": 1000, - }, - }, - "languages": { - "python": "python", - "haskell": "haskell", - "bar": "bar", - }, - "workdir": "/tmp", -} - - -class TestCtagsIndexer(CommonContentIndexerTest, unittest.TestCase): - """Ctags indexer test scenarios: - - - Known sha1s in the input list have their data indexed - - Unknown sha1 in the input list are not indexed - - """ - - def get_indexer_results(self, ids): - yield from self.idx_storage.content_ctags_get(ids) - - def setUp(self): - super().setUp() - self.indexer = CtagsIndexer(config=CONFIG) - self.indexer.catch_exceptions = False - self.idx_storage = self.indexer.idx_storage - fill_storage(self.indexer.storage) - fill_obj_storage(self.indexer.objstorage) - - # Prepare test input - self.id0, self.id1, self.id2 = RAW_CONTENT_IDS - - tool = {k.replace("tool_", ""): v for (k, v) in self.indexer.tool.items()} - - self.expected_results = [ - *[ - ContentCtagsRow( - id=self.id0, - tool=tool, - **kwargs, - ) - for kwargs in SHA1_TO_CTAGS[self.id0] - ], - *[ - ContentCtagsRow( - id=self.id1, - tool=tool, - **kwargs, - ) - for kwargs in SHA1_TO_CTAGS[self.id1] - ], - *[ - ContentCtagsRow( - id=self.id2, - tool=tool, - **kwargs, - ) - for kwargs in SHA1_TO_CTAGS[self.id2] - ], - ] - - self._set_mocks() - - def _set_mocks(self): - def find_ctags_for_content(raw_content): - for (sha1, ctags) in SHA1_TO_CTAGS.items(): - if OBJ_STORAGE_DATA[hash_to_bytes(sha1)] == raw_content: - return ctags - else: - raise ValueError( - ("%r not found in objstorage, can't mock its ctags.") % raw_content - ) - - def fake_language(raw_content, *args, **kwargs): - ctags = find_ctags_for_content(raw_content) - return {"lang": ctags[0]["lang"]} - - self._real_compute_language = swh.indexer.ctags.compute_language - swh.indexer.ctags.compute_language = fake_language - - def fake_check_output(cmd, *args, **kwargs): - id_ = cmd[-1].split("/")[-1] - return "\n".join( - json.dumps({"language": ctag["lang"], **ctag}) - for ctag in SHA1_TO_CTAGS[hash_to_bytes(id_)] - ) - - self._real_check_output = swh.indexer.ctags.subprocess.check_output - swh.indexer.ctags.subprocess.check_output = fake_check_output - - def tearDown(self): - swh.indexer.ctags.compute_language = self._real_compute_language - swh.indexer.ctags.subprocess.check_output = self._real_check_output - super().tearDown() - - -def test_ctags_w_no_tool(): - with pytest.raises(ValueError): - CtagsIndexer(config=filter_dict(CONFIG, "tools")) diff --git a/swh/indexer/tests/utils.py b/swh/indexer/tests/utils.py index c5586b3..db0ee95 100644 --- a/swh/indexer/tests/utils.py +++ b/swh/indexer/tests/utils.py @@ -1,802 +1,774 @@ # Copyright (C) 2017-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import abc import datetime import functools from typing import Any, Dict, List, Tuple import unittest from hypothesis import strategies from swh.core.api.classes import stream_results from swh.indexer.storage import INDEXER_CFG_KEY from swh.model.hashutil import hash_to_bytes from swh.model.model import ( Content, Directory, DirectoryEntry, ObjectType, Origin, OriginVisit, OriginVisitStatus, Person, Release, Revision, RevisionType, Snapshot, SnapshotBranch, TargetType, TimestampWithTimezone, ) from swh.storage.utils import now BASE_TEST_CONFIG: Dict[str, Dict[str, Any]] = { "storage": {"cls": "memory"}, "objstorage": {"cls": "memory"}, INDEXER_CFG_KEY: {"cls": "memory"}, } ORIGIN_VISITS = [ {"type": "git", "origin": "https://github.com/SoftwareHeritage/swh-storage"}, {"type": "ftp", "origin": "rsync://ftp.gnu.org/gnu/3dldf"}, { "type": "deposit", "origin": "https://forge.softwareheritage.org/source/jesuisgpl/", }, { "type": "pypi", "origin": "https://old-pypi.example.org/project/limnoria/", }, # with rev head {"type": "pypi", "origin": "https://pypi.org/project/limnoria/"}, # with rel head {"type": "svn", "origin": "http://0-512-md.googlecode.com/svn/"}, {"type": "git", "origin": "https://github.com/librariesio/yarn-parser"}, {"type": "git", "origin": "https://github.com/librariesio/yarn-parser.git"}, {"type": "git", "origin": "https://npm.example.org/yarn-parser"}, ] ORIGINS = [Origin(url=visit["origin"]) for visit in ORIGIN_VISITS] OBJ_STORAGE_RAW_CONTENT: Dict[str, bytes] = { "text:some": b"this is some text", "text:another": b"another text", "text:yet": b"yet another text", "python:code": b""" import unittest import logging from swh.indexer.mimetype import MimetypeIndexer from swh.indexer.tests.test_utils import MockObjStorage class MockStorage(): def content_mimetype_add(self, mimetypes): self.state = mimetypes def indexer_configuration_add(self, tools): return [{ 'id': 10, }] """, "c:struct": b""" #ifndef __AVL__ #define __AVL__ typedef struct _avl_tree avl_tree; typedef struct _data_t { int content; } data_t; """, "lisp:assertion": b""" (should 'pygments (recognize 'lisp 'easily)) """, "json:test-metadata-package.json": b""" { "name": "test_metadata", "version": "0.0.1", "description": "Simple package.json test for indexer", "repository": { "type": "git", "url": "https://github.com/moranegg/metadata_test" } } """, "json:npm-package.json": b""" { "version": "5.0.3", "name": "npm", "description": "a package manager for JavaScript", "keywords": [ "install", "modules", "package manager", "package.json" ], "preferGlobal": true, "config": { "publishtest": false }, "homepage": "https://docs.npmjs.com/", "author": "Isaac Z. Schlueter (http://blog.izs.me)", "repository": { "type": "git", "url": "https://github.com/npm/npm" }, "bugs": { "url": "https://github.com/npm/npm/issues" }, "dependencies": { "JSONStream": "~1.3.1", "abbrev": "~1.1.0", "ansi-regex": "~2.1.1", "ansicolors": "~0.3.2", "ansistyles": "~0.1.3" }, "devDependencies": { "tacks": "~1.2.6", "tap": "~10.3.2" }, "license": "Artistic-2.0" } """, "text:carriage-return": b""" """, "text:empty": b"", # was 626364 / b'bcd' "text:unimportant": b"unimportant content for bcd", # was 636465 / b'cde' now yarn-parser package.json "json:yarn-parser-package.json": b""" { "name": "yarn-parser", "version": "1.0.0", "description": "Tiny web service for parsing yarn.lock files", "main": "index.js", "scripts": { "start": "node index.js", "test": "mocha" }, "engines": { "node": "9.8.0" }, "repository": { "type": "git", "url": "git+https://github.com/librariesio/yarn-parser.git" }, "keywords": [ "yarn", "parse", "lock", "dependencies" ], "author": "Andrew Nesbitt", "license": "AGPL-3.0", "bugs": { "url": "https://github.com/librariesio/yarn-parser/issues" }, "homepage": "https://github.com/librariesio/yarn-parser#readme", "dependencies": { "@yarnpkg/lockfile": "^1.0.0", "body-parser": "^1.15.2", "express": "^4.14.0" }, "devDependencies": { "chai": "^4.1.2", "mocha": "^5.2.0", "request": "^2.87.0", "test": "^0.6.0" } } """, } MAPPING_DESCRIPTION_CONTENT_SHA1GIT: Dict[str, bytes] = {} MAPPING_DESCRIPTION_CONTENT_SHA1: Dict[str, bytes] = {} OBJ_STORAGE_DATA: Dict[bytes, bytes] = {} for key_description, data in OBJ_STORAGE_RAW_CONTENT.items(): content = Content.from_data(data) MAPPING_DESCRIPTION_CONTENT_SHA1GIT[key_description] = content.sha1_git MAPPING_DESCRIPTION_CONTENT_SHA1[key_description] = content.sha1 OBJ_STORAGE_DATA[content.sha1] = data RAW_CONTENT_METADATA = [ ( "du français".encode(), "text/plain", "utf-8", ), ( b"def __init__(self):", ("text/x-python", "text/x-script.python"), "us-ascii", ), ( b"\xff\xfe\x00\x00\x00\x00\xff\xfe\xff\xff", "application/octet-stream", "", ), ] RAW_CONTENTS: Dict[bytes, Tuple] = {} RAW_CONTENT_IDS: List[bytes] = [] for index, raw_content_d in enumerate(RAW_CONTENT_METADATA): raw_content = raw_content_d[0] content = Content.from_data(raw_content) RAW_CONTENTS[content.sha1] = raw_content_d RAW_CONTENT_IDS.append(content.sha1) # and write it to objstorage data so it's flushed in the objstorage OBJ_STORAGE_DATA[content.sha1] = raw_content SHA1_TO_LICENSES: Dict[bytes, List[str]] = { RAW_CONTENT_IDS[0]: ["GPL"], RAW_CONTENT_IDS[1]: ["AGPL"], RAW_CONTENT_IDS[2]: [], } -SHA1_TO_CTAGS: Dict[bytes, List[Dict[str, Any]]] = { - RAW_CONTENT_IDS[0]: [ - { - "name": "foo", - "kind": "str", - "line": 10, - "lang": "bar", - } - ], - RAW_CONTENT_IDS[1]: [ - { - "name": "symbol", - "kind": "float", - "line": 99, - "lang": "python", - } - ], - RAW_CONTENT_IDS[2]: [ - { - "name": "let", - "kind": "int", - "line": 100, - "lang": "haskell", - } - ], -} - - DIRECTORY = Directory( entries=( DirectoryEntry( name=b"index.js", type="file", target=MAPPING_DESCRIPTION_CONTENT_SHA1GIT["text:some"], perms=0o100644, ), DirectoryEntry( name=b"package.json", type="file", target=MAPPING_DESCRIPTION_CONTENT_SHA1GIT[ "json:test-metadata-package.json" ], perms=0o100644, ), DirectoryEntry( name=b".github", type="dir", target=Directory(entries=()).id, perms=0o040000, ), ), ) DIRECTORY2 = Directory( entries=( DirectoryEntry( name=b"package.json", type="file", target=MAPPING_DESCRIPTION_CONTENT_SHA1GIT["json:yarn-parser-package.json"], perms=0o100644, ), ), ) _utc_plus_2 = datetime.timezone(datetime.timedelta(minutes=120)) REVISION = Revision( message=b"Improve search functionality", author=Person( name=b"Andrew Nesbitt", fullname=b"Andrew Nesbitt ", email=b"andrewnez@gmail.com", ), committer=Person( name=b"Andrew Nesbitt", fullname=b"Andrew Nesbitt ", email=b"andrewnez@gmail.com", ), committer_date=TimestampWithTimezone.from_datetime( datetime.datetime(2013, 10, 4, 12, 50, 49, tzinfo=_utc_plus_2) ), type=RevisionType.GIT, synthetic=False, date=TimestampWithTimezone.from_datetime( datetime.datetime(2017, 2, 20, 16, 14, 16, tzinfo=_utc_plus_2) ), directory=DIRECTORY2.id, parents=(), ) REVISIONS = [REVISION] RELEASE = Release( name=b"v0.0.0", message=None, author=Person( name=b"Andrew Nesbitt", fullname=b"Andrew Nesbitt ", email=b"andrewnez@gmail.com", ), synthetic=False, date=TimestampWithTimezone.from_datetime( datetime.datetime(2017, 2, 20, 16, 14, 16, tzinfo=_utc_plus_2) ), target_type=ObjectType.DIRECTORY, target=DIRECTORY2.id, ) RELEASES = [RELEASE] SNAPSHOTS = [ # https://github.com/SoftwareHeritage/swh-storage Snapshot( branches={ b"refs/heads/add-revision-origin-cache": SnapshotBranch( target=b'L[\xce\x1c\x88\x8eF\t\xf1"\x19\x1e\xfb\xc0s\xe7/\xe9l\x1e', target_type=TargetType.REVISION, ), b"refs/head/master": SnapshotBranch( target=b"8K\x12\x00d\x03\xcc\xe4]bS\xe3\x8f{\xd7}\xac\xefrm", target_type=TargetType.REVISION, ), b"HEAD": SnapshotBranch( target=b"refs/head/master", target_type=TargetType.ALIAS ), b"refs/tags/v0.0.103": SnapshotBranch( target=b'\xb6"Im{\xfdLb\xb0\x94N\xea\x96m\x13x\x88+\x0f\xdd', target_type=TargetType.RELEASE, ), }, ), # rsync://ftp.gnu.org/gnu/3dldf Snapshot( branches={ b"3DLDF-1.1.4.tar.gz": SnapshotBranch( target=b'dJ\xfb\x1c\x91\xf4\x82B%]6\xa2\x90|\xd3\xfc"G\x99\x11', target_type=TargetType.REVISION, ), b"3DLDF-2.0.2.tar.gz": SnapshotBranch( target=b"\xb6\x0e\xe7\x9e9\xac\xaa\x19\x9e=\xd1\xc5\x00\\\xc6\xfc\xe0\xa6\xb4V", # noqa target_type=TargetType.REVISION, ), b"3DLDF-2.0.3-examples.tar.gz": SnapshotBranch( target=b"!H\x19\xc0\xee\x82-\x12F1\xbd\x97\xfe\xadZ\x80\x80\xc1\x83\xff", # noqa target_type=TargetType.REVISION, ), b"3DLDF-2.0.3.tar.gz": SnapshotBranch( target=b"\x8e\xa9\x8e/\xea}\x9feF\xf4\x9f\xfd\xee\xcc\x1a\xb4`\x8c\x8by", # noqa target_type=TargetType.REVISION, ), b"3DLDF-2.0.tar.gz": SnapshotBranch( target=b"F6*\xff(?\x19a\xef\xb6\xc2\x1fv$S\xe3G\xd3\xd1m", target_type=TargetType.REVISION, ), }, ), # https://forge.softwareheritage.org/source/jesuisgpl/", Snapshot( branches={ b"master": SnapshotBranch( target=b"\xe7n\xa4\x9c\x9f\xfb\xb7\xf76\x11\x08{\xa6\xe9\x99\xb1\x9e]q\xeb", # noqa target_type=TargetType.REVISION, ) }, ), # https://old-pypi.example.org/project/limnoria/ Snapshot( branches={ b"HEAD": SnapshotBranch( target=b"releases/2018.09.09", target_type=TargetType.ALIAS ), b"releases/2018.09.01": SnapshotBranch( target=b"<\xee1(\xe8\x8d_\xc1\xc9\xa6rT\xf1\x1d\xbb\xdfF\xfdw\xcf", target_type=TargetType.REVISION, ), b"releases/2018.09.09": SnapshotBranch( target=b"\x83\xb9\xb6\xc7\x05\xb1%\xd0\xfem\xd8kA\x10\x9d\xc5\xfa2\xf8t", # noqa target_type=TargetType.REVISION, ), }, ), # https://pypi.org/project/limnoria/ Snapshot( branches={ b"HEAD": SnapshotBranch( target=b"releases/2018.09.09", target_type=TargetType.ALIAS ), b"releases/2018.09.01": SnapshotBranch( target=b"<\xee1(\xe8\x8d_\xc1\xc9\xa6rT\xf1\x1d\xbb\xdfF\xfdw\xcf", target_type=TargetType.RELEASE, ), b"releases/2018.09.09": SnapshotBranch( target=b"\x83\xb9\xb6\xc7\x05\xb1%\xd0\xfem\xd8kA\x10\x9d\xc5\xfa2\xf8t", # noqa target_type=TargetType.RELEASE, ), }, ), # http://0-512-md.googlecode.com/svn/ Snapshot( branches={ b"master": SnapshotBranch( target=b"\xe4?r\xe1,\x88\xab\xec\xe7\x9a\x87\xb8\xc9\xad#.\x1bw=\x18", target_type=TargetType.REVISION, ) }, ), # https://github.com/librariesio/yarn-parser Snapshot( branches={ b"HEAD": SnapshotBranch( target=REVISION.id, target_type=TargetType.REVISION, ) }, ), # https://github.com/librariesio/yarn-parser.git Snapshot( branches={ b"HEAD": SnapshotBranch( target=REVISION.id, target_type=TargetType.REVISION, ) }, ), # https://npm.example.org/yarn-parser Snapshot( branches={ b"HEAD": SnapshotBranch( target=RELEASE.id, target_type=TargetType.RELEASE, ) }, ), ] assert len(SNAPSHOTS) == len(ORIGIN_VISITS) YARN_PARSER_METADATA = { "@context": "https://doi.org/10.5063/schema/codemeta-2.0", "url": "https://github.com/librariesio/yarn-parser#readme", "codeRepository": "git+git+https://github.com/librariesio/yarn-parser.git", "author": [{"type": "Person", "name": "Andrew Nesbitt"}], "license": "https://spdx.org/licenses/AGPL-3.0", "version": "1.0.0", "description": "Tiny web service for parsing yarn.lock files", "issueTracker": "https://github.com/librariesio/yarn-parser/issues", "name": "yarn-parser", "keywords": ["yarn", "parse", "lock", "dependencies"], "type": "SoftwareSourceCode", } json_dict_keys = strategies.one_of( strategies.characters(), strategies.just("type"), strategies.just("url"), strategies.just("name"), strategies.just("email"), strategies.just("@id"), strategies.just("@context"), strategies.just("repository"), strategies.just("license"), strategies.just("repositories"), strategies.just("licenses"), ) """Hypothesis strategy that generates strings, with an emphasis on those that are often used as dictionary keys in metadata files.""" generic_json_document = strategies.recursive( strategies.none() | strategies.booleans() | strategies.floats() | strategies.characters(), lambda children: ( strategies.lists(children, min_size=1) | strategies.dictionaries(json_dict_keys, children, min_size=1) ), ) """Hypothesis strategy that generates possible values for values of JSON metadata files.""" def json_document_strategy(keys=None): """Generates an hypothesis strategy that generates metadata files for a JSON-based format that uses the given keys.""" if keys is None: keys = strategies.characters() else: keys = strategies.one_of(map(strategies.just, keys)) return strategies.dictionaries(keys, generic_json_document, min_size=1) def _tree_to_xml(root, xmlns, data): def encode(s): "Skips unpaired surrogates generated by json_document_strategy" return s.encode("utf8", "replace") def to_xml(data, indent=b" "): if data is None: return b"" elif isinstance(data, (bool, str, int, float)): return indent + encode(str(data)) elif isinstance(data, list): return b"\n".join(to_xml(v, indent=indent) for v in data) elif isinstance(data, dict): lines = [] for (key, value) in data.items(): lines.append(indent + encode("<{}>".format(key))) lines.append(to_xml(value, indent=indent + b" ")) lines.append(indent + encode("".format(key))) return b"\n".join(lines) else: raise TypeError(data) return b"\n".join( [ '<{} xmlns="{}">'.format(root, xmlns).encode(), to_xml(data), "".format(root).encode(), ] ) class TreeToXmlTest(unittest.TestCase): def test_leaves(self): self.assertEqual( _tree_to_xml("root", "http://example.com", None), b'\n\n', ) self.assertEqual( _tree_to_xml("root", "http://example.com", True), b'\n True\n', ) self.assertEqual( _tree_to_xml("root", "http://example.com", "abc"), b'\n abc\n', ) self.assertEqual( _tree_to_xml("root", "http://example.com", 42), b'\n 42\n', ) self.assertEqual( _tree_to_xml("root", "http://example.com", 3.14), b'\n 3.14\n', ) def test_dict(self): self.assertIn( _tree_to_xml("root", "http://example.com", {"foo": "bar", "baz": "qux"}), [ b'\n' b" \n bar\n \n" b" \n qux\n \n" b"", b'\n' b" \n qux\n \n" b" \n bar\n \n" b"", ], ) def test_list(self): self.assertEqual( _tree_to_xml( "root", "http://example.com", [ {"foo": "bar"}, {"foo": "baz"}, ], ), b'\n' b" \n bar\n \n" b" \n baz\n \n" b"", ) def xml_document_strategy(keys, root, xmlns): """Generates an hypothesis strategy that generates metadata files for an XML format that uses the given keys.""" return strategies.builds( functools.partial(_tree_to_xml, root, xmlns), json_document_strategy(keys) ) def filter_dict(d, keys): "return a copy of the dict with keys deleted" if not isinstance(keys, (list, tuple)): keys = (keys,) return dict((k, v) for (k, v) in d.items() if k not in keys) def fill_obj_storage(obj_storage): """Add some content in an object storage.""" for obj_id, content in OBJ_STORAGE_DATA.items(): obj_storage.add(content, obj_id) def fill_storage(storage): """Fill in storage with consistent test dataset.""" storage.content_add([Content.from_data(data) for data in OBJ_STORAGE_DATA.values()]) storage.directory_add([DIRECTORY, DIRECTORY2]) storage.revision_add(REVISIONS) storage.release_add(RELEASES) storage.snapshot_add(SNAPSHOTS) storage.origin_add(ORIGINS) for visit, snapshot in zip(ORIGIN_VISITS, SNAPSHOTS): assert snapshot.id is not None visit = storage.origin_visit_add( [OriginVisit(origin=visit["origin"], date=now(), type=visit["type"])] )[0] visit_status = OriginVisitStatus( origin=visit.origin, visit=visit.visit, date=now(), status="full", snapshot=snapshot.id, ) storage.origin_visit_status_add([visit_status]) class CommonContentIndexerTest(metaclass=abc.ABCMeta): def get_indexer_results(self, ids): """Override this for indexers that don't have a mock storage.""" return self.indexer.idx_storage.state def assert_results_ok(self, sha1s, expected_results=None): sha1s = [hash_to_bytes(sha1) for sha1 in sha1s] actual_results = list(self.get_indexer_results(sha1s)) if expected_results is None: expected_results = self.expected_results # expected results may contain slightly duplicated results assert 0 < len(actual_results) <= len(expected_results) for result in actual_results: assert result in expected_results def test_index(self): """Known sha1 have their data indexed""" sha1s = [self.id0, self.id1, self.id2] # when self.indexer.run(sha1s) self.assert_results_ok(sha1s) # 2nd pass self.indexer.run(sha1s) self.assert_results_ok(sha1s) def test_index_one_unknown_sha1(self): """Unknown sha1s are not indexed""" sha1s = [ self.id1, "799a5ef812c53907562fe379d4b3851e69c7cb15", # unknown "800a5ef812c53907562fe379d4b3851e69c7cb15", # unknown ] # unknown # when self.indexer.run(sha1s) # then expected_results = [res for res in self.expected_results if res.id in sha1s] self.assert_results_ok(sha1s, expected_results) class CommonContentIndexerPartitionTest: """Allows to factorize tests on range indexer.""" def setUp(self): self.contents = sorted(OBJ_STORAGE_DATA) def assert_results_ok(self, partition_id, nb_partitions, actual_results): expected_ids = [ c.sha1 for c in stream_results( self.indexer.storage.content_get_partition, partition_id=partition_id, nb_partitions=nb_partitions, ) ] actual_results = list(actual_results) for indexed_data in actual_results: _id = indexed_data.id assert _id in expected_ids _tool_id = indexed_data.indexer_configuration_id assert _tool_id == self.indexer.tool["id"] def test__index_contents(self): """Indexing contents without existing data results in indexed data""" partition_id = 0 nb_partitions = 4 actual_results = list( self.indexer._index_contents(partition_id, nb_partitions, indexed={}) ) self.assert_results_ok(partition_id, nb_partitions, actual_results) def test__index_contents_with_indexed_data(self): """Indexing contents with existing data results in less indexed data""" partition_id = 3 nb_partitions = 4 # first pass actual_results = list( self.indexer._index_contents(partition_id, nb_partitions, indexed={}), ) self.assert_results_ok(partition_id, nb_partitions, actual_results) indexed_ids = {res.id for res in actual_results} actual_results = list( self.indexer._index_contents( partition_id, nb_partitions, indexed=indexed_ids ) ) # already indexed, so nothing new assert actual_results == [] def test_generate_content_get(self): """Optimal indexing should result in indexed data""" partition_id = 0 nb_partitions = 1 actual_results = self.indexer.run( partition_id, nb_partitions, skip_existing=False ) assert actual_results["status"] == "eventful", actual_results def test_generate_content_get_no_result(self): """No result indexed returns False""" actual_results = self.indexer.run(1, 2**512, incremental=False) assert actual_results == {"status": "uneventful"} def mock_compute_license(path): """path is the content identifier""" if isinstance(id, bytes): path = path.decode("utf-8") # path is something like /tmp/tmpXXX/ so we keep only the sha1 part id_ = path.split("/")[-1] return {"licenses": SHA1_TO_LICENSES.get(hash_to_bytes(id_), [])} diff --git a/swh/indexer/tests/zz_celery/test_tasks.py b/swh/indexer/tests/zz_celery/test_tasks.py index e4be328..270bdbf 100644 --- a/swh/indexer/tests/zz_celery/test_tasks.py +++ b/swh/indexer/tests/zz_celery/test_tasks.py @@ -1,132 +1,114 @@ -# Copyright (C) 2020 The Software Heritage developers +# Copyright (C) 2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information def test_task_origin_metadata( mocker, swh_scheduler_celery_app, swh_scheduler_celery_worker, swh_config ): mock_indexer = mocker.patch("swh.indexer.tasks.OriginMetadataIndexer.run") mock_indexer.return_value = {"status": "eventful"} res = swh_scheduler_celery_app.send_task( "swh.indexer.tasks.OriginMetadata", args=["origin-url"], ) assert res res.wait() assert res.successful() assert res.result == {"status": "eventful"} -def test_task_ctags( - mocker, swh_scheduler_celery_app, swh_scheduler_celery_worker, swh_config -): - - mock_indexer = mocker.patch("swh.indexer.tasks.CtagsIndexer.run") - mock_indexer.return_value = {"status": "eventful"} - - res = swh_scheduler_celery_app.send_task( - "swh.indexer.tasks.Ctags", - args=["id0"], - ) - assert res - res.wait() - assert res.successful() - - assert res.result == {"status": "eventful"} - - def test_task_fossology_license( mocker, swh_scheduler_celery_app, swh_scheduler_celery_worker, swh_config ): mock_indexer = mocker.patch("swh.indexer.tasks.FossologyLicenseIndexer.run") mock_indexer.return_value = {"status": "eventful"} res = swh_scheduler_celery_app.send_task( "swh.indexer.tasks.ContentFossologyLicense", args=["id0"], ) assert res res.wait() assert res.successful() assert res.result == {"status": "eventful"} def test_task_recompute_checksums( mocker, swh_scheduler_celery_app, swh_scheduler_celery_worker, swh_config ): mock_indexer = mocker.patch("swh.indexer.tasks.RecomputeChecksums.run") mock_indexer.return_value = {"status": "eventful"} res = swh_scheduler_celery_app.send_task( "swh.indexer.tasks.RecomputeChecksums", args=[[{"blake2b256": "id"}]], ) assert res res.wait() assert res.successful() assert res.result == {"status": "eventful"} def test_task_mimetype( mocker, swh_scheduler_celery_app, swh_scheduler_celery_worker, swh_config ): mock_indexer = mocker.patch("swh.indexer.tasks.MimetypeIndexer.run") mock_indexer.return_value = {"status": "eventful"} res = swh_scheduler_celery_app.send_task( "swh.indexer.tasks.ContentMimetype", args=["id0"], ) assert res res.wait() assert res.successful() assert res.result == {"status": "eventful"} def test_task_mimetype_partition( mocker, swh_scheduler_celery_app, swh_scheduler_celery_worker, swh_config ): mock_indexer = mocker.patch("swh.indexer.tasks.MimetypePartitionIndexer.run") mock_indexer.return_value = {"status": "eventful"} res = swh_scheduler_celery_app.send_task( "swh.indexer.tasks.ContentMimetypePartition", args=[0, 4], ) assert res res.wait() assert res.successful() assert res.result == {"status": "eventful"} def test_task_license_partition( mocker, swh_scheduler_celery_app, swh_scheduler_celery_worker, swh_config ): mock_indexer = mocker.patch( "swh.indexer.tasks.FossologyLicensePartitionIndexer.run" ) mock_indexer.return_value = {"status": "eventful"} res = swh_scheduler_celery_app.send_task( "swh.indexer.tasks.ContentFossologyLicensePartition", args=[0, 4], ) assert res res.wait() assert res.successful() assert res.result == {"status": "eventful"}