diff --git a/PKG-INFO b/PKG-INFO index 2f25003..117bfb0 100644 --- a/PKG-INFO +++ b/PKG-INFO @@ -1,69 +1,69 @@ Metadata-Version: 2.1 Name: swh.indexer -Version: 0.0.156 +Version: 0.0.157 Summary: Software Heritage Content Indexer Home-page: https://forge.softwareheritage.org/diffusion/78/ Author: Software Heritage developers Author-email: swh-devel@inria.fr License: UNKNOWN Project-URL: Bug Reports, https://forge.softwareheritage.org/maniphest -Project-URL: Source, https://forge.softwareheritage.org/source/swh-indexer Project-URL: Funding, https://www.softwareheritage.org/donate +Project-URL: Source, https://forge.softwareheritage.org/source/swh-indexer Description: swh-indexer ============ Tools to compute multiple indexes on SWH's raw contents: - content: - mimetype - ctags - language - fossology-license - metadata - revision: - metadata An indexer is in charge of: - looking up objects - extracting information from those objects - store those information in the swh-indexer db There are multiple indexers working on different object types: - content indexer: works with content sha1 hashes - revision indexer: works with revision sha1 hashes - origin indexer: works with origin identifiers Indexation procedure: - receive batch of ids - retrieve the associated data depending on object type - compute for that object some index - store the result to swh's storage Current content indexers: - mimetype (queue swh_indexer_content_mimetype): detect the encoding and mimetype - language (queue swh_indexer_content_language): detect the programming language - ctags (queue swh_indexer_content_ctags): compute tags information - fossology-license (queue swh_indexer_fossology_license): compute the license - metadata: translate file into translated_metadata dict Current revision indexers: - metadata: detects files containing metadata and retrieves translated_metadata in content_metadata table in storage or run content indexer to translate files. Platform: UNKNOWN Classifier: Programming Language :: Python :: 3 Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3) Classifier: Operating System :: OS Independent Classifier: Development Status :: 5 - Production/Stable Description-Content-Type: text/markdown Provides-Extra: testing diff --git a/swh.indexer.egg-info/PKG-INFO b/swh.indexer.egg-info/PKG-INFO index 2f25003..117bfb0 100644 --- a/swh.indexer.egg-info/PKG-INFO +++ b/swh.indexer.egg-info/PKG-INFO @@ -1,69 +1,69 @@ Metadata-Version: 2.1 Name: swh.indexer -Version: 0.0.156 +Version: 0.0.157 Summary: Software Heritage Content Indexer Home-page: https://forge.softwareheritage.org/diffusion/78/ Author: Software Heritage developers Author-email: swh-devel@inria.fr License: UNKNOWN Project-URL: Bug Reports, https://forge.softwareheritage.org/maniphest -Project-URL: Source, https://forge.softwareheritage.org/source/swh-indexer Project-URL: Funding, https://www.softwareheritage.org/donate +Project-URL: Source, https://forge.softwareheritage.org/source/swh-indexer Description: swh-indexer ============ Tools to compute multiple indexes on SWH's raw contents: - content: - mimetype - ctags - language - fossology-license - metadata - revision: - metadata An indexer is in charge of: - looking up objects - extracting information from those objects - store those information in the swh-indexer db There are multiple indexers working on different object types: - content indexer: works with content sha1 hashes - revision indexer: works with revision sha1 hashes - origin indexer: works with origin identifiers Indexation procedure: - receive batch of ids - retrieve the associated data depending on object type - compute for that object some index - store the result to swh's storage Current content indexers: - mimetype (queue swh_indexer_content_mimetype): detect the encoding and mimetype - language (queue swh_indexer_content_language): detect the programming language - ctags (queue swh_indexer_content_ctags): compute tags information - fossology-license (queue swh_indexer_fossology_license): compute the license - metadata: translate file into translated_metadata dict Current revision indexers: - metadata: detects files containing metadata and retrieves translated_metadata in content_metadata table in storage or run content indexer to translate files. Platform: UNKNOWN Classifier: Programming Language :: Python :: 3 Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3) Classifier: Operating System :: OS Independent Classifier: Development Status :: 5 - Production/Stable Description-Content-Type: text/markdown Provides-Extra: testing diff --git a/swh.indexer.egg-info/SOURCES.txt b/swh.indexer.egg-info/SOURCES.txt index d47b813..569109d 100644 --- a/swh.indexer.egg-info/SOURCES.txt +++ b/swh.indexer.egg-info/SOURCES.txt @@ -1,94 +1,95 @@ MANIFEST.in Makefile README.md requirements-swh.txt requirements.txt setup.py version.txt sql/bin/db-upgrade sql/bin/dot_add_content sql/doc/json/.gitignore sql/doc/json/Makefile sql/doc/json/indexer_configuration.tool_configuration.schema.json sql/doc/json/revision_metadata.translated_metadata.json sql/json/.gitignore sql/json/Makefile sql/json/indexer_configuration.tool_configuration.schema.json sql/json/revision_metadata.translated_metadata.json sql/upgrades/115.sql sql/upgrades/116.sql sql/upgrades/117.sql sql/upgrades/118.sql sql/upgrades/119.sql sql/upgrades/120.sql sql/upgrades/121.sql sql/upgrades/122.sql sql/upgrades/123.sql sql/upgrades/124.sql sql/upgrades/125.sql sql/upgrades/126.sql sql/upgrades/127.sql swh/__init__.py swh.indexer.egg-info/PKG-INFO swh.indexer.egg-info/SOURCES.txt swh.indexer.egg-info/dependency_links.txt swh.indexer.egg-info/entry_points.txt swh.indexer.egg-info/requires.txt swh.indexer.egg-info/top_level.txt swh/indexer/__init__.py swh/indexer/cli.py swh/indexer/codemeta.py swh/indexer/ctags.py swh/indexer/fossology_license.py swh/indexer/indexer.py swh/indexer/journal_client.py swh/indexer/metadata.py swh/indexer/metadata_detector.py swh/indexer/mimetype.py swh/indexer/origin_head.py swh/indexer/py.typed swh/indexer/rehash.py swh/indexer/tasks.py swh/indexer/data/codemeta/CITATION swh/indexer/data/codemeta/LICENSE swh/indexer/data/codemeta/codemeta.jsonld swh/indexer/data/codemeta/crosswalk.csv swh/indexer/metadata_dictionary/__init__.py swh/indexer/metadata_dictionary/base.py swh/indexer/metadata_dictionary/codemeta.py swh/indexer/metadata_dictionary/maven.py swh/indexer/metadata_dictionary/npm.py swh/indexer/metadata_dictionary/python.py swh/indexer/metadata_dictionary/ruby.py swh/indexer/sql/10-swh-init.sql swh/indexer/sql/20-swh-enums.sql swh/indexer/sql/30-swh-schema.sql swh/indexer/sql/40-swh-func.sql swh/indexer/sql/50-swh-data.sql swh/indexer/sql/60-swh-indexes.sql swh/indexer/storage/__init__.py swh/indexer/storage/converters.py swh/indexer/storage/db.py swh/indexer/storage/in_memory.py swh/indexer/storage/api/__init__.py swh/indexer/storage/api/client.py swh/indexer/storage/api/server.py swh/indexer/tests/__init__.py swh/indexer/tests/conftest.py swh/indexer/tests/tasks.py swh/indexer/tests/test_cli.py swh/indexer/tests/test_ctags.py swh/indexer/tests/test_fossology_license.py swh/indexer/tests/test_journal_client.py swh/indexer/tests/test_metadata.py swh/indexer/tests/test_mimetype.py swh/indexer/tests/test_origin_head.py swh/indexer/tests/test_origin_metadata.py swh/indexer/tests/utils.py swh/indexer/tests/storage/__init__.py +swh/indexer/tests/storage/conftest.py swh/indexer/tests/storage/generate_data_test.py swh/indexer/tests/storage/test_api_client.py swh/indexer/tests/storage/test_converters.py swh/indexer/tests/storage/test_in_memory.py swh/indexer/tests/storage/test_server.py swh/indexer/tests/storage/test_storage.py \ No newline at end of file diff --git a/swh/indexer/cli.py b/swh/indexer/cli.py index d14aff5..24ebb63 100644 --- a/swh/indexer/cli.py +++ b/swh/indexer/cli.py @@ -1,261 +1,258 @@ # Copyright (C) 2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import functools import json import time import click from swh.core import config from swh.core.cli import CONTEXT_SETTINGS, AliasedGroup from swh.journal.cli import get_journal_client from swh.scheduler import get_scheduler from swh.scheduler.cli_utils import schedule_origin_batches from swh.storage import get_storage from swh.indexer import metadata_dictionary from swh.indexer.journal_client import process_journal_objects from swh.indexer.storage import get_indexer_storage from swh.indexer.storage.api.server import load_and_check_config, app @click.group(name='indexer', context_settings=CONTEXT_SETTINGS, cls=AliasedGroup) @click.option('--config-file', '-C', default=None, type=click.Path(exists=True, dir_okay=False,), help="Configuration file.") @click.pass_context def cli(ctx, config_file): """Software Heritage Indexer tools. The Indexer is used to mine the content of the archive and extract derived information from archive source code artifacts. """ ctx.ensure_object(dict) conf = config.read(config_file) ctx.obj['config'] = conf def _get_api(getter, config, config_key, url): if url: config[config_key] = { 'cls': 'remote', 'args': {'url': url} } elif config_key not in config: raise click.ClickException( 'Missing configuration for {}'.format(config_key)) return getter(**config[config_key]) @cli.group('mapping') def mapping(): '''Manage Software Heritage Indexer mappings.''' pass @mapping.command('list') def mapping_list(): """Prints the list of known mappings.""" mapping_names = [mapping.name for mapping in metadata_dictionary.MAPPINGS.values()] mapping_names.sort() for mapping_name in mapping_names: click.echo(mapping_name) @mapping.command('list-terms') @click.option('--exclude-mapping', multiple=True, help='Exclude the given mapping from the output') @click.option('--concise', is_flag=True, default=False, help='Don\'t print the list of mappings supporting each term.') def mapping_list_terms(concise, exclude_mapping): """Prints the list of known CodeMeta terms, and which mappings support them.""" properties = metadata_dictionary.list_terms() for (property_name, supported_mappings) in sorted(properties.items()): supported_mappings = {m.name for m in supported_mappings} supported_mappings -= set(exclude_mapping) if supported_mappings: if concise: click.echo(property_name) else: click.echo('{}:'.format(property_name)) click.echo('\t' + ', '.join(sorted(supported_mappings))) @mapping.command('translate') @click.argument('mapping-name') @click.argument('file', type=click.File('rb')) def mapping_translate(mapping_name, file): """Prints the list of known mappings.""" mapping_cls = [cls for cls in metadata_dictionary.MAPPINGS.values() if cls.name == mapping_name] if not mapping_cls: raise click.ClickException('Unknown mapping {}'.format(mapping_name)) assert len(mapping_cls) == 1 mapping_cls = mapping_cls[0] mapping = mapping_cls() codemeta_doc = mapping.translate(file.read()) click.echo(json.dumps(codemeta_doc, indent=4)) @cli.group('schedule') @click.option('--scheduler-url', '-s', default=None, help="URL of the scheduler API") @click.option('--indexer-storage-url', '-i', default=None, help="URL of the indexer storage API") @click.option('--storage-url', '-g', default=None, help="URL of the (graph) storage API") @click.option('--dry-run/--no-dry-run', is_flag=True, default=False, help='List only what would be scheduled.') @click.pass_context def schedule(ctx, scheduler_url, storage_url, indexer_storage_url, dry_run): """Manipulate Software Heritage Indexer tasks. Via SWH Scheduler's API.""" ctx.obj['indexer_storage'] = _get_api( get_indexer_storage, ctx.obj['config'], 'indexer_storage', indexer_storage_url ) ctx.obj['storage'] = _get_api( get_storage, ctx.obj['config'], 'storage', storage_url ) ctx.obj['scheduler'] = _get_api( get_scheduler, ctx.obj['config'], 'scheduler', scheduler_url ) if dry_run: ctx.obj['scheduler'] = None def list_origins_by_producer(idx_storage, mappings, tool_ids): - start = '' + next_page_token = '' limit = 10000 - while True: - origins = list( - idx_storage.origin_intrinsic_metadata_search_by_producer( - start=start, limit=limit, ids_only=True, - mappings=mappings or None, tool_ids=tool_ids or None)) - if not origins: - break - start = origins[-1] + '\x00' # first possible string after this - yield from origins + while next_page_token is not None: + result = idx_storage.origin_intrinsic_metadata_search_by_producer( + page_token=next_page_token, limit=limit, ids_only=True, + mappings=mappings or None, tool_ids=tool_ids or None) + next_page_token = result.get('next_page_token') + yield from result['origins'] @schedule.command('reindex_origin_metadata') @click.option('--batch-size', '-b', 'origin_batch_size', default=10, show_default=True, type=int, help="Number of origins per task") @click.option('--tool-id', '-t', 'tool_ids', type=int, multiple=True, help="Restrict search of old metadata to this/these tool ids.") @click.option('--mapping', '-m', 'mappings', multiple=True, help="Mapping(s) that should be re-scheduled (eg. 'npm', " "'gemspec', 'maven')") @click.option('--task-type', default='index-origin-metadata', show_default=True, help="Name of the task type to schedule.") @click.pass_context def schedule_origin_metadata_reindex( ctx, origin_batch_size, tool_ids, mappings, task_type): """Schedules indexing tasks for origins that were already indexed.""" idx_storage = ctx.obj['indexer_storage'] scheduler = ctx.obj['scheduler'] origins = list_origins_by_producer(idx_storage, mappings, tool_ids) kwargs = {"policy_update": "update-dups"} schedule_origin_batches( scheduler, task_type, origins, origin_batch_size, kwargs) @cli.command('journal-client') @click.option('--scheduler-url', '-s', default=None, help="URL of the scheduler API") @click.option('--origin-metadata-task-type', default='index-origin-metadata', help='Name of the task running the origin metadata indexer.') @click.option('--broker', 'brokers', type=str, multiple=True, help='Kafka broker to connect to.') @click.option('--prefix', type=str, default=None, help='Prefix of Kafka topic names to read from.') @click.option('--group-id', type=str, help='Consumer/group id for reading from Kafka.') @click.option('--max-messages', '-m', default=None, type=int, help='Maximum number of objects to replay. Default is to ' 'run forever.') @click.pass_context def journal_client(ctx, scheduler_url, origin_metadata_task_type, brokers, prefix, group_id, max_messages): """Listens for new objects from the SWH Journal, and schedules tasks to run relevant indexers (currently, only origin-intrinsic-metadata) on these new objects.""" scheduler = _get_api( get_scheduler, ctx.obj['config'], 'scheduler', scheduler_url ) client = get_journal_client( ctx, brokers=brokers, prefix=prefix, group_id=group_id, object_types=['origin_visit'], max_messages=max_messages, ) worker_fn = functools.partial( process_journal_objects, scheduler=scheduler, task_names={ 'origin_metadata': origin_metadata_task_type, } ) nb_messages = 0 last_log_time = 0 try: while not max_messages or nb_messages < max_messages: nb_messages += client.process(worker_fn) if time.monotonic() - last_log_time >= 60: print('Processed %d messages.' % nb_messages) last_log_time = time.monotonic() except KeyboardInterrupt: ctx.exit(0) else: print('Done.') @cli.command('rpc-serve') @click.argument('config-path', required=True) @click.option('--host', default='0.0.0.0', help="Host to run the server") @click.option('--port', default=5007, type=click.INT, help="Binding port of the server") @click.option('--debug/--nodebug', default=True, help="Indicates if the server should run in debug mode") def rpc_server(config_path, host, port, debug): """Starts a Software Heritage Indexer RPC HTTP server.""" api_cfg = load_and_check_config(config_path, type='any') app.config.update(api_cfg) app.run(host, port=int(port), debug=bool(debug)) def main(): return cli(auto_envvar_prefix='SWH_INDEXER') if __name__ == '__main__': main() diff --git a/swh/indexer/storage/__init__.py b/swh/indexer/storage/__init__.py index d90350d..5ea6f4b 100644 --- a/swh/indexer/storage/__init__.py +++ b/swh/indexer/storage/__init__.py @@ -1,924 +1,936 @@ # Copyright (C) 2015-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import json import psycopg2 from collections import defaultdict from swh.core.api import remote_api_endpoint from swh.storage.common import db_transaction_generator, db_transaction from swh.storage.exc import StorageDBError from .db import Db from . import converters INDEXER_CFG_KEY = 'indexer_storage' MAPPING_NAMES = ['codemeta', 'gemspec', 'maven', 'npm', 'pkg-info'] def get_indexer_storage(cls, args): """Get an indexer storage object of class `storage_class` with arguments `storage_args`. Args: cls (str): storage's class, either 'local' or 'remote' args (dict): dictionary of arguments passed to the storage class constructor Returns: an instance of swh.indexer's storage (either local or remote) Raises: ValueError if passed an unknown storage class. """ if cls == 'remote': from .api.client import RemoteStorage as IndexerStorage elif cls == 'local': from . import IndexerStorage elif cls == 'memory': from .in_memory import IndexerStorage else: raise ValueError('Unknown indexer storage class `%s`' % cls) return IndexerStorage(**args) def _check_id_duplicates(data): """ If any two dictionaries in `data` have the same id, raises a `ValueError`. Values associated to the key must be hashable. Args: data (List[dict]): List of dictionaries to be inserted >>> _check_id_duplicates([ ... {'id': 'foo', 'data': 'spam'}, ... {'id': 'bar', 'data': 'egg'}, ... ]) >>> _check_id_duplicates([ ... {'id': 'foo', 'data': 'spam'}, ... {'id': 'foo', 'data': 'egg'}, ... ]) Traceback (most recent call last): ... ValueError: The same id is present more than once. """ if len({item['id'] for item in data}) < len(data): raise ValueError('The same id is present more than once.') class IndexerStorage: """SWH Indexer Storage """ def __init__(self, db, min_pool_conns=1, max_pool_conns=10): """ Args: db_conn: either a libpq connection string, or a psycopg2 connection """ try: if isinstance(db, psycopg2.extensions.connection): self._pool = None self._db = Db(db) else: self._pool = psycopg2.pool.ThreadedConnectionPool( min_pool_conns, max_pool_conns, db ) self._db = None except psycopg2.OperationalError as e: raise StorageDBError(e) def get_db(self): if self._db: return self._db return Db.from_pool(self._pool) def put_db(self, db): if db is not self._db: db.put_conn() @remote_api_endpoint('check_config') @db_transaction() def check_config(self, *, check_write, db=None, cur=None): """Check that the storage is configured and ready to go.""" # Check permissions on one of the tables if check_write: check = 'INSERT' else: check = 'SELECT' cur.execute( "select has_table_privilege(current_user, 'content_mimetype', %s)", # noqa (check,) ) return cur.fetchone()[0] @remote_api_endpoint('content_mimetype/missing') @db_transaction_generator() def content_mimetype_missing(self, mimetypes, db=None, cur=None): """Generate mimetypes missing from storage. Args: mimetypes (iterable): iterable of dict with keys: - **id** (bytes): sha1 identifier - **indexer_configuration_id** (int): tool used to compute the results Yields: tuple (id, indexer_configuration_id): missing id """ for obj in db.content_mimetype_missing_from_list(mimetypes, cur): yield obj[0] def _content_get_range(self, content_type, start, end, indexer_configuration_id, limit=1000, with_textual_data=False, db=None, cur=None): """Retrieve ids of type content_type within range [start, end] bound by limit. Args: **content_type** (str): content's type (mimetype, language, etc...) **start** (bytes): Starting identifier range (expected smaller than end) **end** (bytes): Ending identifier range (expected larger than start) **indexer_configuration_id** (int): The tool used to index data **limit** (int): Limit result (default to 1000) **with_textual_data** (bool): Deal with only textual content (True) or all content (all contents by defaults, False) Raises: ValueError for; - limit to None - wrong content_type provided Returns: a dict with keys: - **ids** [bytes]: iterable of content ids within the range. - **next** (Optional[bytes]): The next range of sha1 starts at this sha1 if any """ if limit is None: raise ValueError('Development error: limit should not be None') if content_type not in db.content_indexer_names: err = 'Development error: Wrong type. Should be one of [%s]' % ( ','.join(db.content_indexer_names)) raise ValueError(err) ids = [] next_id = None for counter, obj in enumerate(db.content_get_range( content_type, start, end, indexer_configuration_id, limit=limit+1, with_textual_data=with_textual_data, cur=cur)): _id = obj[0] if counter >= limit: next_id = _id break ids.append(_id) return { 'ids': ids, 'next': next_id } @remote_api_endpoint('content_mimetype/range') @db_transaction() def content_mimetype_get_range(self, start, end, indexer_configuration_id, limit=1000, db=None, cur=None): """Retrieve mimetypes within range [start, end] bound by limit. Args: **start** (bytes): Starting identifier range (expected smaller than end) **end** (bytes): Ending identifier range (expected larger than start) **indexer_configuration_id** (int): The tool used to index data **limit** (int): Limit result (default to 1000) Raises: ValueError for limit to None Returns: a dict with keys: - **ids** [bytes]: iterable of content ids within the range. - **next** (Optional[bytes]): The next range of sha1 starts at this sha1 if any """ return self._content_get_range('mimetype', start, end, indexer_configuration_id, limit=limit, db=db, cur=cur) @remote_api_endpoint('content_mimetype/add') @db_transaction() def content_mimetype_add(self, mimetypes, conflict_update=False, db=None, cur=None): """Add mimetypes not present in storage. Args: mimetypes (iterable): dictionaries with keys: - **id** (bytes): sha1 identifier - **mimetype** (bytes): raw content's mimetype - **encoding** (bytes): raw content's encoding - **indexer_configuration_id** (int): tool's id used to compute the results - **conflict_update** (bool): Flag to determine if we want to overwrite (``True``) or skip duplicates (``False``, the default) """ _check_id_duplicates(mimetypes) mimetypes.sort(key=lambda m: m['id']) db.mktemp_content_mimetype(cur) db.copy_to(mimetypes, 'tmp_content_mimetype', ['id', 'mimetype', 'encoding', 'indexer_configuration_id'], cur) db.content_mimetype_add_from_temp(conflict_update, cur) @remote_api_endpoint('content_mimetype') @db_transaction_generator() def content_mimetype_get(self, ids, db=None, cur=None): """Retrieve full content mimetype per ids. Args: ids (iterable): sha1 identifier Yields: mimetypes (iterable): dictionaries with keys: - **id** (bytes): sha1 identifier - **mimetype** (bytes): raw content's mimetype - **encoding** (bytes): raw content's encoding - **tool** (dict): Tool used to compute the language """ for c in db.content_mimetype_get_from_list(ids, cur): yield converters.db_to_mimetype( dict(zip(db.content_mimetype_cols, c))) @remote_api_endpoint('content_language/missing') @db_transaction_generator() def content_language_missing(self, languages, db=None, cur=None): """List languages missing from storage. Args: languages (iterable): dictionaries with keys: - **id** (bytes): sha1 identifier - **indexer_configuration_id** (int): tool used to compute the results Yields: an iterable of missing id for the tuple (id, indexer_configuration_id) """ for obj in db.content_language_missing_from_list(languages, cur): yield obj[0] @remote_api_endpoint('content_language') @db_transaction_generator() def content_language_get(self, ids, db=None, cur=None): """Retrieve full content language per ids. Args: ids (iterable): sha1 identifier Yields: languages (iterable): dictionaries with keys: - **id** (bytes): sha1 identifier - **lang** (bytes): raw content's language - **tool** (dict): Tool used to compute the language """ for c in db.content_language_get_from_list(ids, cur): yield converters.db_to_language( dict(zip(db.content_language_cols, c))) @remote_api_endpoint('content_language/add') @db_transaction() def content_language_add(self, languages, conflict_update=False, db=None, cur=None): """Add languages not present in storage. Args: languages (iterable): dictionaries with keys: - **id** (bytes): sha1 - **lang** (bytes): language detected conflict_update (bool): Flag to determine if we want to overwrite (true) or skip duplicates (false, the default) """ _check_id_duplicates(languages) languages.sort(key=lambda m: m['id']) db.mktemp_content_language(cur) # empty language is mapped to 'unknown' db.copy_to( ({ 'id': l['id'], 'lang': 'unknown' if not l['lang'] else l['lang'], 'indexer_configuration_id': l['indexer_configuration_id'], } for l in languages), 'tmp_content_language', ['id', 'lang', 'indexer_configuration_id'], cur) db.content_language_add_from_temp(conflict_update, cur) @remote_api_endpoint('content/ctags/missing') @db_transaction_generator() def content_ctags_missing(self, ctags, db=None, cur=None): """List ctags missing from storage. Args: ctags (iterable): dicts with keys: - **id** (bytes): sha1 identifier - **indexer_configuration_id** (int): tool used to compute the results Yields: an iterable of missing id for the tuple (id, indexer_configuration_id) """ for obj in db.content_ctags_missing_from_list(ctags, cur): yield obj[0] @remote_api_endpoint('content/ctags') @db_transaction_generator() def content_ctags_get(self, ids, db=None, cur=None): """Retrieve ctags per id. Args: ids (iterable): sha1 checksums Yields: Dictionaries with keys: - **id** (bytes): content's identifier - **name** (str): symbol's name - **kind** (str): symbol's kind - **lang** (str): language for that content - **tool** (dict): tool used to compute the ctags' info """ for c in db.content_ctags_get_from_list(ids, cur): yield converters.db_to_ctags(dict(zip(db.content_ctags_cols, c))) @remote_api_endpoint('content/ctags/add') @db_transaction() def content_ctags_add(self, ctags, conflict_update=False, db=None, cur=None): """Add ctags not present in storage Args: ctags (iterable): dictionaries with keys: - **id** (bytes): sha1 - **ctags** ([list): List of dictionary with keys: name, kind, line, lang """ _check_id_duplicates(ctags) ctags.sort(key=lambda m: m['id']) def _convert_ctags(__ctags): """Convert ctags dict to list of ctags. """ for ctags in __ctags: yield from converters.ctags_to_db(ctags) db.mktemp_content_ctags(cur) db.copy_to(list(_convert_ctags(ctags)), tblname='tmp_content_ctags', columns=['id', 'name', 'kind', 'line', 'lang', 'indexer_configuration_id'], cur=cur) db.content_ctags_add_from_temp(conflict_update, cur) @remote_api_endpoint('content/ctags/search') @db_transaction_generator() def content_ctags_search(self, expression, limit=10, last_sha1=None, db=None, cur=None): """Search through content's raw ctags symbols. Args: expression (str): Expression to search for limit (int): Number of rows to return (default to 10). last_sha1 (str): Offset from which retrieving data (default to ''). Yields: rows of ctags including id, name, lang, kind, line, etc... """ for obj in db.content_ctags_search(expression, last_sha1, limit, cur=cur): yield converters.db_to_ctags(dict(zip(db.content_ctags_cols, obj))) @remote_api_endpoint('content/fossology_license') @db_transaction_generator() def content_fossology_license_get(self, ids, db=None, cur=None): """Retrieve licenses per id. Args: ids (iterable): sha1 checksums Yields: dict: ``{id: facts}`` where ``facts`` is a dict with the following keys: - **licenses** ([str]): associated licenses for that content - **tool** (dict): Tool used to compute the license """ d = defaultdict(list) for c in db.content_fossology_license_get_from_list(ids, cur): license = dict(zip(db.content_fossology_license_cols, c)) id_ = license['id'] d[id_].append(converters.db_to_fossology_license(license)) for id_, facts in d.items(): yield {id_: facts} @remote_api_endpoint('content/fossology_license/add') @db_transaction() def content_fossology_license_add(self, licenses, conflict_update=False, db=None, cur=None): """Add licenses not present in storage. Args: licenses (iterable): dictionaries with keys: - **id**: sha1 - **licenses** ([bytes]): List of licenses associated to sha1 - **tool** (str): nomossa conflict_update: Flag to determine if we want to overwrite (true) or skip duplicates (false, the default) Returns: list: content_license entries which failed due to unknown licenses """ _check_id_duplicates(licenses) licenses.sort(key=lambda m: m['id']) db.mktemp_content_fossology_license(cur) db.copy_to( ({ 'id': sha1['id'], 'indexer_configuration_id': sha1['indexer_configuration_id'], 'license': license, } for sha1 in licenses for license in sha1['licenses']), tblname='tmp_content_fossology_license', columns=['id', 'license', 'indexer_configuration_id'], cur=cur) db.content_fossology_license_add_from_temp(conflict_update, cur) @remote_api_endpoint('content/fossology_license/range') @db_transaction() def content_fossology_license_get_range( self, start, end, indexer_configuration_id, limit=1000, db=None, cur=None): """Retrieve licenses within range [start, end] bound by limit. Args: **start** (bytes): Starting identifier range (expected smaller than end) **end** (bytes): Ending identifier range (expected larger than start) **indexer_configuration_id** (int): The tool used to index data **limit** (int): Limit result (default to 1000) Raises: ValueError for limit to None Returns: a dict with keys: - **ids** [bytes]: iterable of content ids within the range. - **next** (Optional[bytes]): The next range of sha1 starts at this sha1 if any """ return self._content_get_range('fossology_license', start, end, indexer_configuration_id, limit=limit, with_textual_data=True, db=db, cur=cur) @remote_api_endpoint('content_metadata/missing') @db_transaction_generator() def content_metadata_missing(self, metadata, db=None, cur=None): """List metadata missing from storage. Args: metadata (iterable): dictionaries with keys: - **id** (bytes): sha1 identifier - **indexer_configuration_id** (int): tool used to compute the results Yields: missing sha1s """ for obj in db.content_metadata_missing_from_list(metadata, cur): yield obj[0] @remote_api_endpoint('content_metadata') @db_transaction_generator() def content_metadata_get(self, ids, db=None, cur=None): """Retrieve metadata per id. Args: ids (iterable): sha1 checksums Yields: dictionaries with the following keys: id (bytes) metadata (str): associated metadata tool (dict): tool used to compute metadata """ for c in db.content_metadata_get_from_list(ids, cur): yield converters.db_to_metadata( dict(zip(db.content_metadata_cols, c))) @remote_api_endpoint('content_metadata/add') @db_transaction() def content_metadata_add(self, metadata, conflict_update=False, db=None, cur=None): """Add metadata not present in storage. Args: metadata (iterable): dictionaries with keys: - **id**: sha1 - **metadata**: arbitrary dict conflict_update: Flag to determine if we want to overwrite (true) or skip duplicates (false, the default) """ _check_id_duplicates(metadata) metadata.sort(key=lambda m: m['id']) db.mktemp_content_metadata(cur) db.copy_to(metadata, 'tmp_content_metadata', ['id', 'metadata', 'indexer_configuration_id'], cur) db.content_metadata_add_from_temp(conflict_update, cur) @remote_api_endpoint('revision_intrinsic_metadata/missing') @db_transaction_generator() def revision_intrinsic_metadata_missing(self, metadata, db=None, cur=None): """List metadata missing from storage. Args: metadata (iterable): dictionaries with keys: - **id** (bytes): sha1_git revision identifier - **indexer_configuration_id** (int): tool used to compute the results Yields: missing ids """ for obj in db.revision_intrinsic_metadata_missing_from_list( metadata, cur): yield obj[0] @remote_api_endpoint('revision_intrinsic_metadata') @db_transaction_generator() def revision_intrinsic_metadata_get(self, ids, db=None, cur=None): """Retrieve revision metadata per id. Args: ids (iterable): sha1 checksums Yields: dictionaries with the following keys: - **id** (bytes) - **metadata** (str): associated metadata - **tool** (dict): tool used to compute metadata - **mappings** (List[str]): list of mappings used to translate these metadata """ for c in db.revision_intrinsic_metadata_get_from_list(ids, cur): yield converters.db_to_metadata( dict(zip(db.revision_intrinsic_metadata_cols, c))) @remote_api_endpoint('revision_intrinsic_metadata/add') @db_transaction() def revision_intrinsic_metadata_add(self, metadata, conflict_update=False, db=None, cur=None): """Add metadata not present in storage. Args: metadata (iterable): dictionaries with keys: - **id**: sha1_git of revision - **metadata**: arbitrary dict - **indexer_configuration_id**: tool used to compute metadata - **mappings** (List[str]): list of mappings used to translate these metadata conflict_update: Flag to determine if we want to overwrite (true) or skip duplicates (false, the default) """ _check_id_duplicates(metadata) metadata.sort(key=lambda m: m['id']) db.mktemp_revision_intrinsic_metadata(cur) db.copy_to(metadata, 'tmp_revision_intrinsic_metadata', ['id', 'metadata', 'mappings', 'indexer_configuration_id'], cur) db.revision_intrinsic_metadata_add_from_temp(conflict_update, cur) @remote_api_endpoint('revision_intrinsic_metadata/delete') @db_transaction() def revision_intrinsic_metadata_delete(self, entries, db=None, cur=None): """Remove revision metadata from the storage. Args: entries (dict): dictionaries with the following keys: - **id** (bytes): revision identifier - **indexer_configuration_id** (int): tool used to compute metadata """ db.revision_intrinsic_metadata_delete(entries, cur) @remote_api_endpoint('origin_intrinsic_metadata') @db_transaction_generator() def origin_intrinsic_metadata_get(self, ids, db=None, cur=None): """Retrieve origin metadata per id. Args: ids (iterable): origin identifiers Yields: list: dictionaries with the following keys: - **id** (str): origin url - **from_revision** (bytes): which revision this metadata was extracted from - **metadata** (str): associated metadata - **tool** (dict): tool used to compute metadata - **mappings** (List[str]): list of mappings used to translate these metadata """ for c in db.origin_intrinsic_metadata_get_from_list(ids, cur): yield converters.db_to_metadata( dict(zip(db.origin_intrinsic_metadata_cols, c))) @remote_api_endpoint('origin_intrinsic_metadata/add') @db_transaction() def origin_intrinsic_metadata_add(self, metadata, conflict_update=False, db=None, cur=None): """Add origin metadata not present in storage. Args: metadata (iterable): dictionaries with keys: - **id**: origin urls - **from_revision**: sha1 id of the revision used to generate these metadata. - **metadata**: arbitrary dict - **indexer_configuration_id**: tool used to compute metadata - **mappings** (List[str]): list of mappings used to translate these metadata conflict_update: Flag to determine if we want to overwrite (true) or skip duplicates (false, the default) """ _check_id_duplicates(metadata) metadata.sort(key=lambda m: m['id']) db.mktemp_origin_intrinsic_metadata(cur) db.copy_to(metadata, 'tmp_origin_intrinsic_metadata', ['id', 'metadata', 'indexer_configuration_id', 'from_revision', 'mappings'], cur) db.origin_intrinsic_metadata_add_from_temp(conflict_update, cur) @remote_api_endpoint('origin_intrinsic_metadata/delete') @db_transaction() def origin_intrinsic_metadata_delete( self, entries, db=None, cur=None): """Remove origin metadata from the storage. Args: entries (dict): dictionaries with the following keys: - **id** (str): origin urls - **indexer_configuration_id** (int): tool used to compute metadata """ db.origin_intrinsic_metadata_delete(entries, cur) @remote_api_endpoint('origin_intrinsic_metadata/search/fulltext') @db_transaction_generator() def origin_intrinsic_metadata_search_fulltext( self, conjunction, limit=100, db=None, cur=None): """Returns the list of origins whose metadata contain all the terms. Args: conjunction (List[str]): List of terms to be searched for. limit (int): The maximum number of results to return Yields: list: dictionaries with the following keys: - **id** (str): origin urls - **from_revision**: sha1 id of the revision used to generate these metadata. - **metadata** (str): associated metadata - **tool** (dict): tool used to compute metadata - **mappings** (List[str]): list of mappings used to translate these metadata """ for c in db.origin_intrinsic_metadata_search_fulltext( conjunction, limit=limit, cur=cur): yield converters.db_to_metadata( dict(zip(db.origin_intrinsic_metadata_cols, c))) @remote_api_endpoint('origin_intrinsic_metadata/search/by_producer') - @db_transaction_generator() + @db_transaction() def origin_intrinsic_metadata_search_by_producer( - self, start='', end=None, limit=100, ids_only=False, + self, page_token='', limit=100, ids_only=False, mappings=None, tool_ids=None, db=None, cur=None): """Returns the list of origins whose metadata contain all the terms. Args: - start (str): The minimum origin url to return - end (str): The maximum origin url to return + page_token (str): Opaque token used for pagination. limit (int): The maximum number of results to return ids_only (bool): Determines whether only origin urls are returned or the content as well mappings (List[str]): Returns origins whose intrinsic metadata were generated using at least one of these mappings. - Yields: - list: list of origin ids (int) if `ids_only=True`, else - dictionaries with the following keys: + Returns: + dict: dict with the following keys: + - **next_page_token** (str, optional): opaque token to be used as + `page_token` for retrieveing the next page. If absent, there is + no more pages to gather. + - **origins** (list): list of origin url (str) if `ids_only=True` + else dictionaries with the following keys: - **id** (str): origin urls - **from_revision**: sha1 id of the revision used to generate these metadata. - **metadata** (str): associated metadata - **tool** (dict): tool used to compute metadata - **mappings** (List[str]): list of mappings used to translate these metadata """ + assert isinstance(page_token, str) + # we go to limit+1 to check wether we should add next_page_token in + # the response res = db.origin_intrinsic_metadata_search_by_producer( - start, end, limit, ids_only, mappings, tool_ids, cur) + page_token, limit + 1, ids_only, mappings, tool_ids, cur) + result = {} if ids_only: - for (origin,) in res: - yield origin + result['origins'] = [origin for (origin,) in res] + if len(result['origins']) > limit: + result['origins'] = result['origins'][:limit] + result['next_page_token'] = result['origins'][-1] else: - for c in res: - yield converters.db_to_metadata( - dict(zip(db.origin_intrinsic_metadata_cols, c))) + result['origins'] = [converters.db_to_metadata( + dict(zip(db.origin_intrinsic_metadata_cols, c)))for c in res] + if len(result['origins']) > limit: + result['origins'] = result['origins'][:limit] + result['next_page_token'] = result['origins'][-1]['id'] + return result @remote_api_endpoint('origin_intrinsic_metadata/stats') @db_transaction() def origin_intrinsic_metadata_stats( self, db=None, cur=None): """Returns counts of indexed metadata per origins, broken down into metadata types. Returns: dict: dictionary with keys: - total (int): total number of origins that were indexed (possibly yielding an empty metadata dictionary) - non_empty (int): total number of origins that we extracted a non-empty metadata dictionary from - per_mapping (dict): a dictionary with mapping names as keys and number of origins whose indexing used this mapping. Note that indexing a given origin may use 0, 1, or many mappings. """ mapping_names = [m for m in MAPPING_NAMES] select_parts = [] # Count rows for each mapping for mapping_name in mapping_names: select_parts.append(( "sum(case when (mappings @> ARRAY['%s']) " " then 1 else 0 end)" ) % mapping_name) # Total select_parts.append("sum(1)") # Rows whose metadata has at least one key that is not '@context' select_parts.append( "sum(case when ('{}'::jsonb @> (metadata - '@context')) " " then 0 else 1 end)") cur.execute('select ' + ', '.join(select_parts) + ' from origin_intrinsic_metadata') results = dict(zip(mapping_names + ['total', 'non_empty'], cur.fetchone())) return { 'total': results.pop('total'), 'non_empty': results.pop('non_empty'), 'per_mapping': results, } @remote_api_endpoint('indexer_configuration/add') @db_transaction_generator() def indexer_configuration_add(self, tools, db=None, cur=None): """Add new tools to the storage. Args: tools ([dict]): List of dictionary representing tool to insert in the db. Dictionary with the following keys: - **tool_name** (str): tool's name - **tool_version** (str): tool's version - **tool_configuration** (dict): tool's configuration (free form dict) Returns: List of dict inserted in the db (holding the id key as well). The order of the list is not guaranteed to match the order of the initial list. """ db.mktemp_indexer_configuration(cur) db.copy_to(tools, 'tmp_indexer_configuration', ['tool_name', 'tool_version', 'tool_configuration'], cur) tools = db.indexer_configuration_add_from_temp(cur) for line in tools: yield dict(zip(db.indexer_configuration_cols, line)) @remote_api_endpoint('indexer_configuration/data') @db_transaction() def indexer_configuration_get(self, tool, db=None, cur=None): """Retrieve tool information. Args: tool (dict): Dictionary representing a tool with the following keys: - **tool_name** (str): tool's name - **tool_version** (str): tool's version - **tool_configuration** (dict): tool's configuration (free form dict) Returns: The same dictionary with an `id` key, None otherwise. """ tool_conf = tool['tool_configuration'] if isinstance(tool_conf, dict): tool_conf = json.dumps(tool_conf) idx = db.indexer_configuration_get(tool['tool_name'], tool['tool_version'], tool_conf) if not idx: return None return dict(zip(db.indexer_configuration_cols, idx)) diff --git a/swh/indexer/storage/db.py b/swh/indexer/storage/db.py index 3c02cf1..fb08d62 100644 --- a/swh/indexer/storage/db.py +++ b/swh/indexer/storage/db.py @@ -1,458 +1,455 @@ # Copyright (C) 2015-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from swh.model import hashutil from swh.core.db import BaseDb from swh.core.db.db_utils import execute_values_generator, stored_procedure class Db(BaseDb): """Proxy to the SWH Indexer DB, with wrappers around stored procedures """ content_mimetype_hash_keys = ['id', 'indexer_configuration_id'] def _missing_from_list(self, table, data, hash_keys, cur=None): """Read from table the data with hash_keys that are missing. Args: table (str): Table name (e.g content_mimetype, content_language, etc...) data (dict): Dict of data to read from hash_keys ([str]): List of keys to read in the data dict. Yields: The data which is missing from the db. """ cur = self._cursor(cur) keys = ', '.join(hash_keys) equality = ' AND '.join( ('t.%s = c.%s' % (key, key)) for key in hash_keys ) yield from execute_values_generator( cur, """ select %s from (values %%s) as t(%s) where not exists ( select 1 from %s c where %s ) """ % (keys, keys, table, equality), (tuple(m[k] for k in hash_keys) for m in data) ) def content_mimetype_missing_from_list(self, mimetypes, cur=None): """List missing mimetypes. """ yield from self._missing_from_list( 'content_mimetype', mimetypes, self.content_mimetype_hash_keys, cur=cur) content_mimetype_cols = [ 'id', 'mimetype', 'encoding', 'tool_id', 'tool_name', 'tool_version', 'tool_configuration'] @stored_procedure('swh_mktemp_content_mimetype') def mktemp_content_mimetype(self, cur=None): pass def content_mimetype_add_from_temp(self, conflict_update, cur=None): self._cursor(cur).execute("SELECT swh_content_mimetype_add(%s)", (conflict_update, )) def _convert_key(self, key, main_table='c'): """Convert keys according to specific use in the module. Args: key (str): Key expression to change according to the alias used in the query main_table (str): Alias to use for the main table. Default to c for content_{something}. Expected: Tables content_{something} being aliased as 'c' (something in {language, mimetype, ...}), table indexer_configuration being aliased as 'i'. """ if key == 'id': return '%s.id' % main_table elif key == 'tool_id': return 'i.id as tool_id' elif key == 'licenses': return ''' array(select name from fossology_license where id = ANY( array_agg(%s.license_id))) as licenses''' % main_table return key def _get_from_list(self, table, ids, cols, cur=None, id_col='id'): """Fetches entries from the `table` such that their `id` field (or whatever is given to `id_col`) is in `ids`. Returns the columns `cols`. The `cur`sor is used to connect to the database. """ cur = self._cursor(cur) keys = map(self._convert_key, cols) query = """ select {keys} from (values %s) as t(id) inner join {table} c on c.{id_col}=t.id inner join indexer_configuration i on c.indexer_configuration_id=i.id; """.format( keys=', '.join(keys), id_col=id_col, table=table) yield from execute_values_generator( cur, query, ((_id,) for _id in ids) ) content_indexer_names = { 'mimetype': 'content_mimetype', 'fossology_license': 'content_fossology_license', } def content_get_range(self, content_type, start, end, indexer_configuration_id, limit=1000, with_textual_data=False, cur=None): """Retrieve contents with content_type, within range [start, end] bound by limit and associated to the given indexer configuration id. When asking to work on textual content, that filters on the mimetype table with any mimetype that is not binary. """ cur = self._cursor(cur) table = self.content_indexer_names[content_type] if with_textual_data: extra = """inner join content_mimetype cm on (t.id=cm.id and cm.mimetype like 'text/%%')""" else: extra = "" query = """select t.id from %s t inner join indexer_configuration ic on t.indexer_configuration_id=ic.id %s where ic.id=%%s and %%s <= t.id and t.id <= %%s order by t.indexer_configuration_id, t.id limit %%s""" % (table, extra) cur.execute(query, (indexer_configuration_id, start, end, limit)) yield from cur def content_mimetype_get_from_list(self, ids, cur=None): yield from self._get_from_list( 'content_mimetype', ids, self.content_mimetype_cols, cur=cur) content_language_hash_keys = ['id', 'indexer_configuration_id'] def content_language_missing_from_list(self, languages, cur=None): """List missing languages. """ yield from self._missing_from_list( 'content_language', languages, self.content_language_hash_keys, cur=cur) content_language_cols = [ 'id', 'lang', 'tool_id', 'tool_name', 'tool_version', 'tool_configuration'] @stored_procedure('swh_mktemp_content_language') def mktemp_content_language(self, cur=None): pass def content_language_add_from_temp(self, conflict_update, cur=None): self._cursor(cur).execute("SELECT swh_content_language_add(%s)", (conflict_update, )) def content_language_get_from_list(self, ids, cur=None): yield from self._get_from_list( 'content_language', ids, self.content_language_cols, cur=cur) content_ctags_hash_keys = ['id', 'indexer_configuration_id'] def content_ctags_missing_from_list(self, ctags, cur=None): """List missing ctags. """ yield from self._missing_from_list( 'content_ctags', ctags, self.content_ctags_hash_keys, cur=cur) content_ctags_cols = [ 'id', 'name', 'kind', 'line', 'lang', 'tool_id', 'tool_name', 'tool_version', 'tool_configuration'] @stored_procedure('swh_mktemp_content_ctags') def mktemp_content_ctags(self, cur=None): pass def content_ctags_add_from_temp(self, conflict_update, cur=None): self._cursor(cur).execute("SELECT swh_content_ctags_add(%s)", (conflict_update, )) def content_ctags_get_from_list(self, ids, cur=None): cur = self._cursor(cur) keys = map(self._convert_key, self.content_ctags_cols) yield from execute_values_generator( cur, """ select %s from (values %%s) as t(id) inner join content_ctags c on c.id=t.id inner join indexer_configuration i on c.indexer_configuration_id=i.id order by line """ % ', '.join(keys), ((_id,) for _id in ids) ) def content_ctags_search(self, expression, last_sha1, limit, cur=None): cur = self._cursor(cur) if not last_sha1: query = """SELECT %s FROM swh_content_ctags_search(%%s, %%s)""" % ( ','.join(self.content_ctags_cols)) cur.execute(query, (expression, limit)) else: if last_sha1 and isinstance(last_sha1, bytes): last_sha1 = '\\x%s' % hashutil.hash_to_hex(last_sha1) elif last_sha1: last_sha1 = '\\x%s' % last_sha1 query = """SELECT %s FROM swh_content_ctags_search(%%s, %%s, %%s)""" % ( ','.join(self.content_ctags_cols)) cur.execute(query, (expression, limit, last_sha1)) yield from cur content_fossology_license_cols = [ 'id', 'tool_id', 'tool_name', 'tool_version', 'tool_configuration', 'licenses'] @stored_procedure('swh_mktemp_content_fossology_license') def mktemp_content_fossology_license(self, cur=None): pass def content_fossology_license_add_from_temp(self, conflict_update, cur=None): """Add new licenses per content. """ self._cursor(cur).execute( "SELECT swh_content_fossology_license_add(%s)", (conflict_update, )) def content_fossology_license_get_from_list(self, ids, cur=None): """Retrieve licenses per id. """ cur = self._cursor(cur) keys = map(self._convert_key, self.content_fossology_license_cols) yield from execute_values_generator( cur, """ select %s from (values %%s) as t(id) inner join content_fossology_license c on t.id=c.id inner join indexer_configuration i on i.id=c.indexer_configuration_id group by c.id, i.id, i.tool_name, i.tool_version, i.tool_configuration; """ % ', '.join(keys), ((_id,) for _id in ids) ) content_metadata_hash_keys = ['id', 'indexer_configuration_id'] def content_metadata_missing_from_list(self, metadata, cur=None): """List missing metadata. """ yield from self._missing_from_list( 'content_metadata', metadata, self.content_metadata_hash_keys, cur=cur) content_metadata_cols = [ 'id', 'metadata', 'tool_id', 'tool_name', 'tool_version', 'tool_configuration'] @stored_procedure('swh_mktemp_content_metadata') def mktemp_content_metadata(self, cur=None): pass def content_metadata_add_from_temp(self, conflict_update, cur=None): self._cursor(cur).execute("SELECT swh_content_metadata_add(%s)", (conflict_update, )) def content_metadata_get_from_list(self, ids, cur=None): yield from self._get_from_list( 'content_metadata', ids, self.content_metadata_cols, cur=cur) revision_intrinsic_metadata_hash_keys = [ 'id', 'indexer_configuration_id'] def revision_intrinsic_metadata_missing_from_list( self, metadata, cur=None): """List missing metadata. """ yield from self._missing_from_list( 'revision_intrinsic_metadata', metadata, self.revision_intrinsic_metadata_hash_keys, cur=cur) revision_intrinsic_metadata_cols = [ 'id', 'metadata', 'mappings', 'tool_id', 'tool_name', 'tool_version', 'tool_configuration'] @stored_procedure('swh_mktemp_revision_intrinsic_metadata') def mktemp_revision_intrinsic_metadata(self, cur=None): pass def revision_intrinsic_metadata_add_from_temp( self, conflict_update, cur=None): self._cursor(cur).execute( "SELECT swh_revision_intrinsic_metadata_add(%s)", (conflict_update, )) def revision_intrinsic_metadata_delete( self, entries, cur=None): cur = self._cursor(cur) cur.execute( "DELETE from revision_intrinsic_metadata " "WHERE (id, indexer_configuration_id) IN " " (VALUES %s)" % (', '.join('%s' for _ in entries)), tuple((e['id'], e['indexer_configuration_id']) for e in entries),) def revision_intrinsic_metadata_get_from_list(self, ids, cur=None): yield from self._get_from_list( 'revision_intrinsic_metadata', ids, self.revision_intrinsic_metadata_cols, cur=cur) origin_intrinsic_metadata_cols = [ 'id', 'metadata', 'from_revision', 'mappings', 'tool_id', 'tool_name', 'tool_version', 'tool_configuration'] origin_intrinsic_metadata_regconfig = 'pg_catalog.simple' """The dictionary used to normalize 'metadata' and queries. 'pg_catalog.simple' provides no stopword, so it should be suitable for proper names and non-English content. When updating this value, make sure to add a new index on origin_intrinsic_metadata.metadata.""" @stored_procedure('swh_mktemp_origin_intrinsic_metadata') def mktemp_origin_intrinsic_metadata(self, cur=None): pass def origin_intrinsic_metadata_add_from_temp( self, conflict_update, cur=None): cur = self._cursor(cur) cur.execute( "SELECT swh_origin_intrinsic_metadata_add(%s)", (conflict_update, )) def origin_intrinsic_metadata_delete( self, entries, cur=None): cur = self._cursor(cur) cur.execute( "DELETE from origin_intrinsic_metadata " "WHERE (id, indexer_configuration_id) IN" " (VALUES %s)" % (', '.join('%s' for _ in entries)), tuple((e['id'], e['indexer_configuration_id']) for e in entries),) def origin_intrinsic_metadata_get_from_list(self, ids, cur=None): yield from self._get_from_list( 'origin_intrinsic_metadata', ids, self.origin_intrinsic_metadata_cols, cur=cur, id_col='id') def origin_intrinsic_metadata_search_fulltext(self, terms, *, limit, cur): regconfig = self.origin_intrinsic_metadata_regconfig tsquery_template = ' && '.join("plainto_tsquery('%s', %%s)" % regconfig for _ in terms) tsquery_args = [(term,) for term in terms] keys = (self._convert_key(col, 'oim') for col in self.origin_intrinsic_metadata_cols) query = ("SELECT {keys} FROM origin_intrinsic_metadata AS oim " "INNER JOIN indexer_configuration AS i " "ON oim.indexer_configuration_id=i.id " "JOIN LATERAL (SELECT {tsquery_template}) AS s(tsq) ON true " "WHERE oim.metadata_tsvector @@ tsq " "ORDER BY ts_rank(oim.metadata_tsvector, tsq, 1) DESC " "LIMIT %s;" ).format(keys=', '.join(keys), tsquery_template=tsquery_template) cur.execute(query, tsquery_args + [limit]) yield from cur def origin_intrinsic_metadata_search_by_producer( - self, start, end, limit, ids_only, mappings, tool_ids, cur): + self, last, limit, ids_only, mappings, tool_ids, cur): if ids_only: keys = 'oim.id' else: keys = ', '.join((self._convert_key(col, 'oim') for col in self.origin_intrinsic_metadata_cols)) query_parts = [ "SELECT %s" % keys, "FROM origin_intrinsic_metadata AS oim", "INNER JOIN indexer_configuration AS i", "ON oim.indexer_configuration_id=i.id", ] args = [] where = [] - if start: - where.append('oim.id >= %s') - args.append(start) - if end: - where.append('oim.id <= %s') - args.append(end) + if last: + where.append('oim.id > %s') + args.append(last) if mappings is not None: where.append('oim.mappings && %s') args.append(mappings) if tool_ids is not None: where.append('oim.indexer_configuration_id = ANY(%s)') args.append(tool_ids) if where: query_parts.append('WHERE') query_parts.append(' AND '.join(where)) if limit: query_parts.append('LIMIT %s') args.append(limit) cur.execute(' '.join(query_parts), args) yield from cur indexer_configuration_cols = ['id', 'tool_name', 'tool_version', 'tool_configuration'] @stored_procedure('swh_mktemp_indexer_configuration') def mktemp_indexer_configuration(self, cur=None): pass def indexer_configuration_add_from_temp(self, cur=None): cur = self._cursor(cur) cur.execute("SELECT %s from swh_indexer_configuration_add()" % ( ','.join(self.indexer_configuration_cols), )) yield from cur def indexer_configuration_get(self, tool_name, tool_version, tool_configuration, cur=None): cur = self._cursor(cur) cur.execute('''select %s from indexer_configuration where tool_name=%%s and tool_version=%%s and tool_configuration=%%s''' % ( ','.join(self.indexer_configuration_cols)), (tool_name, tool_version, tool_configuration)) return cur.fetchone() diff --git a/swh/indexer/storage/in_memory.py b/swh/indexer/storage/in_memory.py index a7e395b..d4ea785 100644 --- a/swh/indexer/storage/in_memory.py +++ b/swh/indexer/storage/in_memory.py @@ -1,832 +1,845 @@ # Copyright (C) 2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import bisect from collections import defaultdict, Counter import itertools import json import operator import math import re from . import MAPPING_NAMES SHA1_DIGEST_SIZE = 160 def _transform_tool(tool): return { 'id': tool['id'], 'name': tool['tool_name'], 'version': tool['tool_version'], 'configuration': tool['tool_configuration'], } class SubStorage: """Implements common missing/get/add logic for each indexer type.""" def __init__(self, tools): self._tools = tools self._sorted_ids = [] self._data = {} # map (id_, tool_id) -> metadata_dict self._tools_per_id = defaultdict(set) # map id_ -> Set[tool_id] def missing(self, ids): """List data missing from storage. Args: data (iterable): dictionaries with keys: - **id** (bytes): sha1 identifier - **indexer_configuration_id** (int): tool used to compute the results Yields: missing sha1s """ for id_ in ids: tool_id = id_['indexer_configuration_id'] id_ = id_['id'] if tool_id not in self._tools_per_id.get(id_, set()): yield id_ def get(self, ids): """Retrieve data per id. Args: ids (iterable): sha1 checksums Yields: dict: dictionaries with the following keys: - **id** (bytes) - **tool** (dict): tool used to compute metadata - arbitrary data (as provided to `add`) """ for id_ in ids: for tool_id in self._tools_per_id.get(id_, set()): key = (id_, tool_id) yield { 'id': id_, 'tool': _transform_tool(self._tools[tool_id]), **self._data[key], } def get_all(self): yield from self.get(self._sorted_ids) def get_range(self, start, end, indexer_configuration_id, limit): """Retrieve data within range [start, end] bound by limit. Args: **start** (bytes): Starting identifier range (expected smaller than end) **end** (bytes): Ending identifier range (expected larger than start) **indexer_configuration_id** (int): The tool used to index data **limit** (int): Limit result Raises: ValueError for limit to None Returns: a dict with keys: - **ids** [bytes]: iterable of content ids within the range. - **next** (Optional[bytes]): The next range of sha1 starts at this sha1 if any """ if limit is None: raise ValueError('Development error: limit should not be None') from_index = bisect.bisect_left(self._sorted_ids, start) to_index = bisect.bisect_right(self._sorted_ids, end, lo=from_index) if to_index - from_index >= limit: return { 'ids': self._sorted_ids[from_index:from_index+limit], 'next': self._sorted_ids[from_index+limit], } else: return { 'ids': self._sorted_ids[from_index:to_index], 'next': None, } def add(self, data, conflict_update): """Add data not present in storage. Args: data (iterable): dictionaries with keys: - **id**: sha1 - **indexer_configuration_id**: tool used to compute the results - arbitrary data conflict_update (bool): Flag to determine if we want to overwrite (true) or skip duplicates (false) """ data = list(data) if len({x['id'] for x in data}) < len(data): # For "exception-compatibility" with the pgsql backend raise ValueError('The same id is present more than once.') for item in data: item = item.copy() tool_id = item.pop('indexer_configuration_id') id_ = item.pop('id') data = item if not conflict_update and \ tool_id in self._tools_per_id.get(id_, set()): # Duplicate, should not be updated continue key = (id_, tool_id) self._data[key] = data self._tools_per_id[id_].add(tool_id) if id_ not in self._sorted_ids: bisect.insort(self._sorted_ids, id_) def add_merge(self, new_data, conflict_update, merged_key): for new_item in new_data: id_ = new_item['id'] tool_id = new_item['indexer_configuration_id'] if conflict_update: all_subitems = [] else: existing = list(self.get([id_])) all_subitems = [ old_subitem for existing_item in existing if existing_item['tool']['id'] == tool_id for old_subitem in existing_item[merged_key] ] for new_subitem in new_item[merged_key]: if new_subitem not in all_subitems: all_subitems.append(new_subitem) self.add([ { 'id': id_, 'indexer_configuration_id': tool_id, merged_key: all_subitems, } ], conflict_update=True) if id_ not in self._sorted_ids: bisect.insort(self._sorted_ids, id_) def delete(self, entries): for entry in entries: (id_, tool_id) = (entry['id'], entry['indexer_configuration_id']) key = (id_, tool_id) if tool_id in self._tools_per_id[id_]: self._tools_per_id[id_].remove(tool_id) if key in self._data: del self._data[key] class IndexerStorage: """In-memory SWH indexer storage.""" def __init__(self): self._tools = {} self._mimetypes = SubStorage(self._tools) self._languages = SubStorage(self._tools) self._content_ctags = SubStorage(self._tools) self._licenses = SubStorage(self._tools) self._content_metadata = SubStorage(self._tools) self._revision_intrinsic_metadata = SubStorage(self._tools) self._origin_intrinsic_metadata = SubStorage(self._tools) def content_mimetype_missing(self, mimetypes): """Generate mimetypes missing from storage. Args: mimetypes (iterable): iterable of dict with keys: - **id** (bytes): sha1 identifier - **indexer_configuration_id** (int): tool used to compute the results Yields: tuple (id, indexer_configuration_id): missing id """ yield from self._mimetypes.missing(mimetypes) def content_mimetype_get_range( self, start, end, indexer_configuration_id, limit=1000): """Retrieve mimetypes within range [start, end] bound by limit. Args: **start** (bytes): Starting identifier range (expected smaller than end) **end** (bytes): Ending identifier range (expected larger than start) **indexer_configuration_id** (int): The tool used to index data **limit** (int): Limit result (default to 1000) Raises: ValueError for limit to None Returns: a dict with keys: - **ids** [bytes]: iterable of content ids within the range. - **next** (Optional[bytes]): The next range of sha1 starts at this sha1 if any """ return self._mimetypes.get_range( start, end, indexer_configuration_id, limit) def content_mimetype_add(self, mimetypes, conflict_update=False): """Add mimetypes not present in storage. Args: mimetypes (iterable): dictionaries with keys: - **id** (bytes): sha1 identifier - **mimetype** (bytes): raw content's mimetype - **encoding** (bytes): raw content's encoding - **indexer_configuration_id** (int): tool's id used to compute the results - **conflict_update** (bool): Flag to determine if we want to overwrite (``True``) or skip duplicates (``False``, the default) """ if not all(isinstance(x['id'], bytes) for x in mimetypes): raise TypeError('identifiers must be bytes.') self._mimetypes.add(mimetypes, conflict_update) def content_mimetype_get(self, ids, db=None, cur=None): """Retrieve full content mimetype per ids. Args: ids (iterable): sha1 identifier Yields: mimetypes (iterable): dictionaries with keys: - **id** (bytes): sha1 identifier - **mimetype** (bytes): raw content's mimetype - **encoding** (bytes): raw content's encoding - **tool** (dict): Tool used to compute the language """ yield from self._mimetypes.get(ids) def content_language_missing(self, languages): """List languages missing from storage. Args: languages (iterable): dictionaries with keys: - **id** (bytes): sha1 identifier - **indexer_configuration_id** (int): tool used to compute the results Yields: an iterable of missing id for the tuple (id, indexer_configuration_id) """ yield from self._languages.missing(languages) def content_language_get(self, ids): """Retrieve full content language per ids. Args: ids (iterable): sha1 identifier Yields: languages (iterable): dictionaries with keys: - **id** (bytes): sha1 identifier - **lang** (bytes): raw content's language - **tool** (dict): Tool used to compute the language """ yield from self._languages.get(ids) def content_language_add(self, languages, conflict_update=False): """Add languages not present in storage. Args: languages (iterable): dictionaries with keys: - **id** (bytes): sha1 - **lang** (bytes): language detected conflict_update (bool): Flag to determine if we want to overwrite (true) or skip duplicates (false, the default) """ if not all(isinstance(x['id'], bytes) for x in languages): raise TypeError('identifiers must be bytes.') self._languages.add(languages, conflict_update) def content_ctags_missing(self, ctags): """List ctags missing from storage. Args: ctags (iterable): dicts with keys: - **id** (bytes): sha1 identifier - **indexer_configuration_id** (int): tool used to compute the results Yields: an iterable of missing id for the tuple (id, indexer_configuration_id) """ yield from self._content_ctags.missing(ctags) def content_ctags_get(self, ids): """Retrieve ctags per id. Args: ids (iterable): sha1 checksums Yields: Dictionaries with keys: - **id** (bytes): content's identifier - **name** (str): symbol's name - **kind** (str): symbol's kind - **lang** (str): language for that content - **tool** (dict): tool used to compute the ctags' info """ for item in self._content_ctags.get(ids): for item_ctags_item in item['ctags']: yield { 'id': item['id'], 'tool': item['tool'], **item_ctags_item } def content_ctags_add(self, ctags, conflict_update=False): """Add ctags not present in storage Args: ctags (iterable): dictionaries with keys: - **id** (bytes): sha1 - **ctags** ([list): List of dictionary with keys: name, kind, line, lang - **indexer_configuration_id**: tool used to compute the results """ if not all(isinstance(x['id'], bytes) for x in ctags): raise TypeError('identifiers must be bytes.') self._content_ctags.add_merge(ctags, conflict_update, 'ctags') def content_ctags_search(self, expression, limit=10, last_sha1=None, db=None, cur=None): """Search through content's raw ctags symbols. Args: expression (str): Expression to search for limit (int): Number of rows to return (default to 10). last_sha1 (str): Offset from which retrieving data (default to ''). Yields: rows of ctags including id, name, lang, kind, line, etc... """ nb_matches = 0 for ((id_, tool_id), item) in \ sorted(self._content_ctags._data.items()): if id_ <= (last_sha1 or bytes(0 for _ in range(SHA1_DIGEST_SIZE))): continue for ctags_item in item['ctags']: if ctags_item['name'] != expression: continue nb_matches += 1 yield { 'id': id_, 'tool': _transform_tool(self._tools[tool_id]), **ctags_item } if nb_matches >= limit: return def content_fossology_license_get(self, ids): """Retrieve licenses per id. Args: ids (iterable): sha1 checksums Yields: dict: ``{id: facts}`` where ``facts`` is a dict with the following keys: - **licenses** ([str]): associated licenses for that content - **tool** (dict): Tool used to compute the license """ # Rewrites the output of SubStorage.get from the old format to # the new one. SubStorage.get should be updated once all other # *_get methods use the new format. # See: https://forge.softwareheritage.org/T1433 res = {} for d in self._licenses.get(ids): res.setdefault(d.pop('id'), []).append(d) for (id_, facts) in res.items(): yield {id_: facts} def content_fossology_license_add(self, licenses, conflict_update=False): """Add licenses not present in storage. Args: licenses (iterable): dictionaries with keys: - **id**: sha1 - **licenses** ([bytes]): List of licenses associated to sha1 - **tool** (str): nomossa conflict_update: Flag to determine if we want to overwrite (true) or skip duplicates (false, the default) Returns: list: content_license entries which failed due to unknown licenses """ if not all(isinstance(x['id'], bytes) for x in licenses): raise TypeError('identifiers must be bytes.') self._licenses.add_merge(licenses, conflict_update, 'licenses') def content_fossology_license_get_range( self, start, end, indexer_configuration_id, limit=1000): """Retrieve licenses within range [start, end] bound by limit. Args: **start** (bytes): Starting identifier range (expected smaller than end) **end** (bytes): Ending identifier range (expected larger than start) **indexer_configuration_id** (int): The tool used to index data **limit** (int): Limit result (default to 1000) Raises: ValueError for limit to None Returns: a dict with keys: - **ids** [bytes]: iterable of content ids within the range. - **next** (Optional[bytes]): The next range of sha1 starts at this sha1 if any """ return self._licenses.get_range( start, end, indexer_configuration_id, limit) def content_metadata_missing(self, metadata): """List metadata missing from storage. Args: metadata (iterable): dictionaries with keys: - **id** (bytes): sha1 identifier - **indexer_configuration_id** (int): tool used to compute the results Yields: missing sha1s """ yield from self._content_metadata.missing(metadata) def content_metadata_get(self, ids): """Retrieve metadata per id. Args: ids (iterable): sha1 checksums Yields: dictionaries with the following keys: - **id** (bytes) - **metadata** (str): associated metadata - **tool** (dict): tool used to compute metadata """ yield from self._content_metadata.get(ids) def content_metadata_add(self, metadata, conflict_update=False): """Add metadata not present in storage. Args: metadata (iterable): dictionaries with keys: - **id**: sha1 - **metadata**: arbitrary dict - **indexer_configuration_id**: tool used to compute the results conflict_update: Flag to determine if we want to overwrite (true) or skip duplicates (false, the default) """ if not all(isinstance(x['id'], bytes) for x in metadata): raise TypeError('identifiers must be bytes.') self._content_metadata.add(metadata, conflict_update) def revision_intrinsic_metadata_missing(self, metadata): """List metadata missing from storage. Args: metadata (iterable): dictionaries with keys: - **id** (bytes): sha1_git revision identifier - **indexer_configuration_id** (int): tool used to compute the results Yields: missing ids """ yield from self._revision_intrinsic_metadata.missing(metadata) def revision_intrinsic_metadata_get(self, ids): """Retrieve revision metadata per id. Args: ids (iterable): sha1 checksums Yields: dictionaries with the following keys: - **id** (bytes) - **metadata** (str): associated metadata - **tool** (dict): tool used to compute metadata - **mappings** (List[str]): list of mappings used to translate these metadata """ yield from self._revision_intrinsic_metadata.get(ids) def revision_intrinsic_metadata_add(self, metadata, conflict_update=False): """Add metadata not present in storage. Args: metadata (iterable): dictionaries with keys: - **id**: sha1_git of revision - **metadata**: arbitrary dict - **indexer_configuration_id**: tool used to compute metadata - **mappings** (List[str]): list of mappings used to translate these metadata conflict_update: Flag to determine if we want to overwrite (true) or skip duplicates (false, the default) """ if not all(isinstance(x['id'], bytes) for x in metadata): raise TypeError('identifiers must be bytes.') self._revision_intrinsic_metadata.add(metadata, conflict_update) def revision_intrinsic_metadata_delete(self, entries): """Remove revision metadata from the storage. Args: entries (dict): dictionaries with the following keys: - **revision** (int): origin identifier - **id** (int): tool used to compute metadata """ self._revision_intrinsic_metadata.delete(entries) def origin_intrinsic_metadata_get(self, ids): """Retrieve origin metadata per id. Args: ids (iterable): origin identifiers Yields: list: dictionaries with the following keys: - **id** (str): origin url - **from_revision** (bytes): which revision this metadata was extracted from - **metadata** (str): associated metadata - **tool** (dict): tool used to compute metadata - **mappings** (List[str]): list of mappings used to translate these metadata """ yield from self._origin_intrinsic_metadata.get(ids) def origin_intrinsic_metadata_add(self, metadata, conflict_update=False): """Add origin metadata not present in storage. Args: metadata (iterable): dictionaries with keys: - **id**: origin url - **from_revision**: sha1 id of the revision used to generate these metadata. - **metadata**: arbitrary dict - **indexer_configuration_id**: tool used to compute metadata - **mappings** (List[str]): list of mappings used to translate these metadata conflict_update: Flag to determine if we want to overwrite (true) or skip duplicates (false, the default) """ self._origin_intrinsic_metadata.add(metadata, conflict_update) def origin_intrinsic_metadata_delete(self, entries): """Remove origin metadata from the storage. Args: entries (dict): dictionaries with the following keys: - **id** (str): origin url - **indexer_configuration_id** (int): tool used to compute metadata """ self._origin_intrinsic_metadata.delete(entries) def origin_intrinsic_metadata_search_fulltext( self, conjunction, limit=100): """Returns the list of origins whose metadata contain all the terms. Args: conjunction (List[str]): List of terms to be searched for. limit (int): The maximum number of results to return Yields: list: dictionaries with the following keys: - **id** (str): origin url - **from_revision** (bytes): which revision this metadata was extracted from - **metadata** (str): associated metadata - **tool** (dict): tool used to compute metadata - **mappings** (List[str]): list of mappings used to translate these metadata """ # A very crude fulltext search implementation, but that's enough # to work on English metadata tokens_re = re.compile('[a-zA-Z0-9]+') search_tokens = list(itertools.chain( *map(tokens_re.findall, conjunction))) def rank(data): # Tokenize the metadata text = json.dumps(data['metadata']) text_tokens = tokens_re.findall(text) text_token_occurences = Counter(text_tokens) # Count the number of occurrences of search tokens in the text score = 0 for search_token in search_tokens: if text_token_occurences[search_token] == 0: # Search token is not in the text. return 0 score += text_token_occurences[search_token] # Normalize according to the text's length return score / math.log(len(text_tokens)) results = [(rank(data), data) for data in self._origin_intrinsic_metadata.get_all()] results = [(rank_, data) for (rank_, data) in results if rank_ > 0] results.sort(key=operator.itemgetter(0), # Don't try to order 'data' reverse=True) for (rank_, result) in results[:limit]: yield result def origin_intrinsic_metadata_search_by_producer( - self, start='', end=None, limit=100, ids_only=False, + self, page_token='', limit=100, ids_only=False, mappings=None, tool_ids=None, db=None, cur=None): """Returns the list of origins whose metadata contain all the terms. Args: - start (str): The minimum origin url to return - end (str): The maximum origin url to return + page_token (str): Opaque token used for pagination. limit (int): The maximum number of results to return ids_only (bool): Determines whether only origin ids are returned or the content as well mappings (List[str]): Returns origins whose intrinsic metadata were generated using at least one of these mappings. - Yields: - list: list of origin ids (int) if `ids_only=True`, else - dictionaries with the following keys: + Returns: + dict: dict with the following keys: + - **next_page_token** (str, optional): opaque token to be used as + `page_token` for retrieveing the next page. + - **origins** (list): list of origin url (str) if `ids_only=True` + else dictionaries with the following keys: - - **id** (str): origin url + - **id** (str): origin urls - **from_revision**: sha1 id of the revision used to generate these metadata. - **metadata** (str): associated metadata - **tool** (dict): tool used to compute metadata - **mappings** (List[str]): list of mappings used to translate these metadata """ + assert isinstance(page_token, str) nb_results = 0 if mappings is not None: mappings = frozenset(mappings) if tool_ids is not None: tool_ids = frozenset(tool_ids) + origins = [] + + # we go to limit+1 to check wether we should add next_page_token in + # the response for entry in self._origin_intrinsic_metadata.get_all(): - if entry['id'] < start or (end and entry['id'] > end): + if entry['id'] <= page_token: continue - if nb_results >= limit: - return + if nb_results >= (limit + 1): + break if mappings is not None and mappings.isdisjoint(entry['mappings']): continue if tool_ids is not None and entry['tool']['id'] not in tool_ids: continue - if ids_only: - yield entry['id'] - else: - yield entry + origins.append(entry) nb_results += 1 + result = {} + if len(origins) > limit: + origins = origins[:limit] + result['next_page_token'] = origins[-1]['id'] + if ids_only: + origins = [origin['id'] for origin in origins] + result['origins'] = origins + return result + def origin_intrinsic_metadata_stats(self): """Returns statistics on stored intrinsic metadata. Returns: dict: dictionary with keys: - total (int): total number of origins that were indexed (possibly yielding an empty metadata dictionary) - non_empty (int): total number of origins that we extracted a non-empty metadata dictionary from - per_mapping (dict): a dictionary with mapping names as keys and number of origins whose indexing used this mapping. Note that indexing a given origin may use 0, 1, or many mappings. """ mapping_count = {m: 0 for m in MAPPING_NAMES} total = non_empty = 0 for data in self._origin_intrinsic_metadata.get_all(): total += 1 if set(data['metadata']) - {'@context'}: non_empty += 1 for mapping in data['mappings']: mapping_count[mapping] += 1 return { 'per_mapping': mapping_count, 'total': total, 'non_empty': non_empty } def indexer_configuration_add(self, tools): """Add new tools to the storage. Args: tools ([dict]): List of dictionary representing tool to insert in the db. Dictionary with the following keys: - **tool_name** (str): tool's name - **tool_version** (str): tool's version - **tool_configuration** (dict): tool's configuration (free form dict) Returns: list: List of dict inserted in the db (holding the id key as well). The order of the list is not guaranteed to match the order of the initial list. """ inserted = [] for tool in tools: tool = tool.copy() id_ = self._tool_key(tool) tool['id'] = id_ self._tools[id_] = tool inserted.append(tool) return inserted def indexer_configuration_get(self, tool): """Retrieve tool information. Args: tool (dict): Dictionary representing a tool with the following keys: - **tool_name** (str): tool's name - **tool_version** (str): tool's version - **tool_configuration** (dict): tool's configuration (free form dict) Returns: The same dictionary with an `id` key, None otherwise. """ return self._tools.get(self._tool_key(tool)) def _tool_key(self, tool): return hash((tool['tool_name'], tool['tool_version'], json.dumps(tool['tool_configuration'], sort_keys=True))) diff --git a/swh/indexer/tests/storage/conftest.py b/swh/indexer/tests/storage/conftest.py new file mode 100644 index 0000000..d2f8f9f --- /dev/null +++ b/swh/indexer/tests/storage/conftest.py @@ -0,0 +1,125 @@ +# Copyright (C) 2015-2019 The Software Heritage developers +# See the AUTHORS file at the top-level directory of this distribution +# License: GNU General Public License version 3, or any later version +# See top-level LICENSE file for more information + +from os.path import join +import pytest + +from . import SQL_DIR +from swh.storage.tests.conftest import postgresql_fact +from swh.indexer.storage import get_indexer_storage +from swh.model.hashutil import hash_to_bytes +from .generate_data_test import MIMETYPE_OBJECTS, FOSSOLOGY_LICENSES + + +DUMP_FILES = join(SQL_DIR, '*.sql') + +TOOLS = [ + { + 'tool_name': 'universal-ctags', + 'tool_version': '~git7859817b', + 'tool_configuration': { + "command_line": "ctags --fields=+lnz --sort=no --links=no " + "--output-format=json "} + }, + { + 'tool_name': 'swh-metadata-translator', + 'tool_version': '0.0.1', + 'tool_configuration': {"type": "local", "context": "NpmMapping"}, + }, + { + 'tool_name': 'swh-metadata-detector', + 'tool_version': '0.0.1', + 'tool_configuration': { + "type": "local", "context": ["NpmMapping", "CodemetaMapping"]}, + }, + { + 'tool_name': 'swh-metadata-detector2', + 'tool_version': '0.0.1', + 'tool_configuration': { + "type": "local", "context": ["NpmMapping", "CodemetaMapping"]}, + }, + { + 'tool_name': 'file', + 'tool_version': '5.22', + 'tool_configuration': {"command_line": "file --mime "}, + }, + { + 'tool_name': 'pygments', + 'tool_version': '2.0.1+dfsg-1.1+deb8u1', + 'tool_configuration': { + "type": "library", "debian-package": "python3-pygments"}, + }, + { + 'tool_name': 'pygments2', + 'tool_version': '2.0.1+dfsg-1.1+deb8u1', + 'tool_configuration': { + "type": "library", + "debian-package": "python3-pygments", + "max_content_size": 10240 + }, + }, + { + 'tool_name': 'nomos', + 'tool_version': '3.1.0rc2-31-ga2cbb8c', + 'tool_configuration': {"command_line": "nomossa "}, + } +] + + +class DataObj(dict): + def __getattr__(self, key): + return self.__getitem__(key) + + def __setattr__(self, key, value): + return self.__setitem__(key, value) + + +@pytest.fixture +def swh_indexer_storage_with_data(swh_indexer_storage): + data = DataObj() + tools = { + tool['tool_name']: { + 'id': tool['id'], + 'name': tool['tool_name'], + 'version': tool['tool_version'], + 'configuration': tool['tool_configuration'], + } + for tool in swh_indexer_storage.indexer_configuration_add(TOOLS)} + data.tools = tools + data.sha1_1 = hash_to_bytes( + '34973274ccef6ab4dfaaf86599792fa9c3fe4689') + data.sha1_2 = hash_to_bytes( + '61c2b3a30496d329e21af70dd2d7e097046d07b7') + data.revision_id_1 = hash_to_bytes( + '7026b7c1a2af56521e951c01ed20f255fa054238') + data.revision_id_2 = hash_to_bytes( + '7026b7c1a2af56521e9587659012345678904321') + data.revision_id_3 = hash_to_bytes( + '7026b7c1a2af56521e9587659012345678904320') + data.origin_url_1 = 'file:///dev/0/zero' # 44434341 + data.origin_url_2 = 'file:///dev/1/one' # 44434342 + data.origin_url_3 = 'file:///dev/2/two' # 54974445 + data.mimetypes = MIMETYPE_OBJECTS[:] + swh_indexer_storage.content_mimetype_add( + MIMETYPE_OBJECTS) + data.fossology_licenses = FOSSOLOGY_LICENSES[:] + swh_indexer_storage._test_data = data + + return (swh_indexer_storage, data) + + +swh_indexer_storage_postgresql = postgresql_fact( + 'postgresql_proc', dump_files=DUMP_FILES) + + +@pytest.fixture +def swh_indexer_storage(swh_indexer_storage_postgresql): + storage_config = { + 'cls': 'local', + 'args': { + 'db': swh_indexer_storage_postgresql.dsn, + }, + } + return get_indexer_storage(**storage_config) diff --git a/swh/indexer/tests/storage/generate_data_test.py b/swh/indexer/tests/storage/generate_data_test.py index 80500d4..5df332a 100644 --- a/swh/indexer/tests/storage/generate_data_test.py +++ b/swh/indexer/tests/storage/generate_data_test.py @@ -1,135 +1,153 @@ -# Copyright (C) 2018 The Software Heritage developers +# Copyright (C) 2018-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information +from uuid import uuid1 + from swh.model.hashutil import MultiHash from hypothesis.strategies import (composite, sets, one_of, uuids, tuples, sampled_from) MIMETYPES = [ b'application/json', b'application/octet-stream', b'application/xml', b'text/plain', ] ENCODINGS = [ b'iso8859-1', b'iso8859-15', b'latin1', b'utf-8', ] def gen_mimetype(): """Generate one mimetype strategy. """ return one_of(sampled_from(MIMETYPES)) def gen_encoding(): """Generate one encoding strategy. """ return one_of(sampled_from(ENCODINGS)) def _init_content(uuid): """Given a uuid, initialize a content """ return { 'id': MultiHash.from_data(uuid.bytes, {'sha1'}).digest()['sha1'], 'indexer_configuration_id': 1, } @composite def gen_content_mimetypes(draw, *, min_size=0, max_size=100): """Generate valid and consistent content_mimetypes. Context: Test purposes Args: **draw** (callable): Used by hypothesis to generate data **min_size** (int): Minimal number of elements to generate (default: 0) **max_size** (int): Maximal number of elements to generate (default: 100) Returns: List of content_mimetypes as expected by the content_mimetype_add api endpoint. """ _ids = draw( sets( tuples( uuids(), gen_mimetype(), gen_encoding() ), min_size=min_size, max_size=max_size ) ) content_mimetypes = [] for uuid, mimetype, encoding in _ids: content_mimetypes.append({ **_init_content(uuid), 'mimetype': mimetype, 'encoding': encoding, }) return content_mimetypes -FOSSOLOGY_LICENSES = [ +MIMETYPE_OBJECTS = [ + {'id': MultiHash.from_data(uuid1().bytes, {'sha1'}).digest()['sha1'], + 'indexer_configuration_id': 1, + 'mimetype': mt, + 'encoding': enc, + } + for mt in MIMETYPES + for enc in ENCODINGS] + +LICENSES = [ b'3DFX', b'BSD', b'GPL', b'Apache2', b'MIT', ] +FOSSOLOGY_LICENSES = [ + {'id': MultiHash.from_data(uuid1().bytes, {'sha1'}).digest()['sha1'], + 'indexer_configuration_id': 1, + 'licenses': [LICENSES[i % len(LICENSES)], ], + } + for i in range(10) + ] + def gen_license(): - return one_of(sampled_from(FOSSOLOGY_LICENSES)) + return one_of(sampled_from(LICENSES)) @composite def gen_content_fossology_licenses(draw, *, min_size=0, max_size=100): """Generate valid and consistent content_fossology_licenses. Context: Test purposes Args: **draw** (callable): Used by hypothesis to generate data **min_size** (int): Minimal number of elements to generate (default: 0) **max_size** (int): Maximal number of elements to generate (default: 100) Returns: List of content_fossology_licenses as expected by the content_fossology_license_add api endpoint. """ _ids = draw( sets( tuples( uuids(), gen_license(), ), min_size=min_size, max_size=max_size ) ) content_licenses = [] for uuid, license in _ids: content_licenses.append({ **_init_content(uuid), 'licenses': [license], - 'indexer_configuration_id': 1, }) return content_licenses diff --git a/swh/indexer/tests/storage/test_api_client.py b/swh/indexer/tests/storage/test_api_client.py index a139099..4fe100c 100644 --- a/swh/indexer/tests/storage/test_api_client.py +++ b/swh/indexer/tests/storage/test_api_client.py @@ -1,38 +1,42 @@ -# Copyright (C) 2015-2018 The Software Heritage developers +# Copyright (C) 2015-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information -import unittest +import pytest -from swh.core.api.tests.server_testing import ServerTestFixture -from swh.indexer.storage import INDEXER_CFG_KEY from swh.indexer.storage.api.client import RemoteStorage -from swh.indexer.storage.api.server import app - -from .test_storage import CommonTestStorage, BasePgTestStorage - - -class TestRemoteStorage(CommonTestStorage, ServerTestFixture, - BasePgTestStorage, unittest.TestCase): - """Test the indexer's remote storage API. - - This class doesn't define any tests as we want identical - functionality between local and remote storage. All the tests are - therefore defined in - `class`:swh.indexer.storage.test_storage.CommonTestStorage. - - """ - - def setUp(self): - self.config = { - INDEXER_CFG_KEY: { - 'cls': 'local', - 'args': { - 'db': 'dbname=%s' % self.TEST_DB_NAME, - } - } - } - self.app = app - super().setUp() - self.storage = RemoteStorage(self.url()) +import swh.indexer.storage.api.server as server + +from swh.indexer.storage import get_indexer_storage + +from .test_storage import * # noqa + + +@pytest.fixture +def app(swh_indexer_storage_postgresql): + storage_config = { + 'cls': 'local', + 'args': { + 'db': swh_indexer_storage_postgresql.dsn, + }, + } + server.storage = get_indexer_storage(**storage_config) + return server.app + + +@pytest.fixture +def swh_rpc_client_class(): + # these are needed for the swh_indexer_storage_with_data fixture + assert hasattr(RemoteStorage, 'indexer_configuration_add') + assert hasattr(RemoteStorage, 'content_mimetype_add') + return RemoteStorage + + +@pytest.fixture +def swh_indexer_storage(swh_rpc_client, app): + # This version of the swh_storage fixture uses the swh_rpc_client fixture + # to instantiate a RemoteStorage (see swh_rpc_client_class above) that + # proxies, via the swh.core RPC mechanism, the local (in memory) storage + # configured in the app fixture above. + return swh_rpc_client diff --git a/swh/indexer/tests/storage/test_converters.py b/swh/indexer/tests/storage/test_converters.py index 9c4cd88..0a5a0f7 100644 --- a/swh/indexer/tests/storage/test_converters.py +++ b/swh/indexer/tests/storage/test_converters.py @@ -1,188 +1,188 @@ -# Copyright (C) 2015-2018 The Software Heritage developers +# Copyright (C) 2015-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import unittest from swh.indexer.storage import converters class TestConverters(unittest.TestCase): def setUp(self): self.maxDiff = None def test_ctags_to_db(self): input_ctag = { 'id': b'some-id', 'indexer_configuration_id': 100, 'ctags': [ { 'name': 'some-name', 'kind': 'some-kind', 'line': 10, 'lang': 'Yaml', }, { 'name': 'main', 'kind': 'function', 'line': 12, 'lang': 'Yaml', }, ] } expected_ctags = [ { 'id': b'some-id', 'name': 'some-name', 'kind': 'some-kind', 'line': 10, 'lang': 'Yaml', 'indexer_configuration_id': 100, }, { 'id': b'some-id', 'name': 'main', 'kind': 'function', 'line': 12, 'lang': 'Yaml', 'indexer_configuration_id': 100, }] # when actual_ctags = list(converters.ctags_to_db(input_ctag)) # then self.assertEqual(actual_ctags, expected_ctags) def test_db_to_ctags(self): input_ctags = { 'id': b'some-id', 'name': 'some-name', 'kind': 'some-kind', 'line': 10, 'lang': 'Yaml', 'tool_id': 200, 'tool_name': 'some-toolname', 'tool_version': 'some-toolversion', 'tool_configuration': {} } expected_ctags = { 'id': b'some-id', 'name': 'some-name', 'kind': 'some-kind', 'line': 10, 'lang': 'Yaml', 'tool': { 'id': 200, 'name': 'some-toolname', 'version': 'some-toolversion', 'configuration': {}, } } # when actual_ctags = converters.db_to_ctags(input_ctags) # then self.assertEqual(actual_ctags, expected_ctags) def test_db_to_mimetype(self): input_mimetype = { 'id': b'some-id', 'tool_id': 10, 'tool_name': 'some-toolname', 'tool_version': 'some-toolversion', 'tool_configuration': {}, 'encoding': b'ascii', 'mimetype': b'text/plain', } expected_mimetype = { 'id': b'some-id', 'encoding': b'ascii', 'mimetype': b'text/plain', 'tool': { 'id': 10, 'name': 'some-toolname', 'version': 'some-toolversion', 'configuration': {}, } } actual_mimetype = converters.db_to_mimetype(input_mimetype) self.assertEqual(actual_mimetype, expected_mimetype) def test_db_to_language(self): input_language = { 'id': b'some-id', 'tool_id': 20, 'tool_name': 'some-toolname', 'tool_version': 'some-toolversion', 'tool_configuration': {}, 'lang': b'css', } expected_language = { 'id': b'some-id', 'lang': b'css', 'tool': { 'id': 20, 'name': 'some-toolname', 'version': 'some-toolversion', 'configuration': {}, } } actual_language = converters.db_to_language(input_language) self.assertEqual(actual_language, expected_language) def test_db_to_fossology_license(self): input_license = { 'id': b'some-id', 'tool_id': 20, 'tool_name': 'nomossa', 'tool_version': '5.22', 'tool_configuration': {}, 'licenses': ['GPL2.0'], } expected_license = { 'licenses': ['GPL2.0'], 'tool': { 'id': 20, 'name': 'nomossa', 'version': '5.22', 'configuration': {}, } } actual_license = converters.db_to_fossology_license(input_license) self.assertEqual(actual_license, expected_license) def test_db_to_metadata(self): input_metadata = { 'id': b'some-id', 'tool_id': 20, 'tool_name': 'some-toolname', 'tool_version': 'some-toolversion', 'tool_configuration': {}, 'metadata': b'metadata', } expected_metadata = { 'id': b'some-id', 'metadata': b'metadata', 'tool': { 'id': 20, 'name': 'some-toolname', 'version': 'some-toolversion', 'configuration': {}, } } actual_metadata = converters.db_to_metadata(input_metadata) self.assertEqual(actual_metadata, expected_metadata) diff --git a/swh/indexer/tests/storage/test_in_memory.py b/swh/indexer/tests/storage/test_in_memory.py index 8992bff..d49a079 100644 --- a/swh/indexer/tests/storage/test_in_memory.py +++ b/swh/indexer/tests/storage/test_in_memory.py @@ -1,19 +1,21 @@ -from unittest import TestCase +# Copyright (C) 2015-2019 The Software Heritage developers +# See the AUTHORS file at the top-level directory of this distribution +# License: GNU General Public License version 3, or any later version +# See top-level LICENSE file for more information -from .test_storage import CommonTestStorage +import pytest +from swh.indexer.storage import get_indexer_storage -class IndexerTestInMemoryStorage(CommonTestStorage, TestCase): - def setUp(self): - self.storage_config = { - 'cls': 'memory', - 'args': { - }, - } - super().setUp() +from .test_storage import * # noqa - def reset_storage_tables(self): - self.storage = self.storage.__class__() - def test_check_config(self): - pass +@pytest.fixture +def swh_indexer_storage(swh_indexer_storage_postgresql): + storage_config = { + 'cls': 'local', + 'args': { + 'db': swh_indexer_storage_postgresql.dsn, + }, + } + return get_indexer_storage(**storage_config) diff --git a/swh/indexer/tests/storage/test_storage.py b/swh/indexer/tests/storage/test_storage.py index 5167058..c20bba1 100644 --- a/swh/indexer/tests/storage/test_storage.py +++ b/swh/indexer/tests/storage/test_storage.py @@ -1,1972 +1,1852 @@ -# Copyright (C) 2015-2018 The Software Heritage developers +# Copyright (C) 2015-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information -import os import threading -import unittest - import pytest -from hypothesis import given - from swh.model.hashutil import hash_to_bytes -from swh.indexer.storage import get_indexer_storage, MAPPING_NAMES -from swh.core.db.tests.db_testing import SingleDbTestFixture -from swh.indexer.tests.storage.generate_data_test import ( - gen_content_mimetypes, gen_content_fossology_licenses -) -from swh.indexer.tests.storage import SQL_DIR -from swh.indexer.metadata_dictionary import MAPPINGS - -TOOLS = [ - { - 'tool_name': 'universal-ctags', - 'tool_version': '~git7859817b', - 'tool_configuration': { - "command_line": "ctags --fields=+lnz --sort=no --links=no " - "--output-format=json "} - }, - { - 'tool_name': 'swh-metadata-translator', - 'tool_version': '0.0.1', - 'tool_configuration': {"type": "local", "context": "NpmMapping"}, - }, - { - 'tool_name': 'swh-metadata-detector', - 'tool_version': '0.0.1', - 'tool_configuration': { - "type": "local", "context": ["NpmMapping", "CodemetaMapping"]}, - }, - { - 'tool_name': 'swh-metadata-detector2', - 'tool_version': '0.0.1', - 'tool_configuration': { - "type": "local", "context": ["NpmMapping", "CodemetaMapping"]}, - }, - { - 'tool_name': 'file', - 'tool_version': '5.22', - 'tool_configuration': {"command_line": "file --mime "}, - }, - { - 'tool_name': 'pygments', - 'tool_version': '2.0.1+dfsg-1.1+deb8u1', - 'tool_configuration': { - "type": "library", "debian-package": "python3-pygments"}, - }, - { - 'tool_name': 'pygments', - 'tool_version': '2.0.1+dfsg-1.1+deb8u1', - 'tool_configuration': { - "type": "library", - "debian-package": "python3-pygments", - "max_content_size": 10240 - }, - }, - { - 'tool_name': 'nomos', - 'tool_version': '3.1.0rc2-31-ga2cbb8c', - 'tool_configuration': {"command_line": "nomossa "}, - } -] - -@pytest.mark.db -class BasePgTestStorage(SingleDbTestFixture): - """Base test class for most indexer tests. +def prepare_mimetypes_from(fossology_licenses): + """Fossology license needs some consistent data in db to run. - It adds support for Storage testing to the SingleDbTestFixture class. - It will also build the database from the swh-indexed/sql/*.sql files. """ + mimetypes = [] + for c in fossology_licenses: + mimetypes.append({ + 'id': c['id'], + 'mimetype': 'text/plain', + 'encoding': 'utf-8', + 'indexer_configuration_id': c['indexer_configuration_id'], + }) + return mimetypes - TEST_DB_NAME = 'softwareheritage-test-indexer' - TEST_DB_DUMP = os.path.join(SQL_DIR, '*.sql') - - def setUp(self): - super().setUp() - self.storage_config = { - 'cls': 'local', - 'args': { - 'db': 'dbname=%s' % self.TEST_DB_NAME, - }, - } - - def tearDown(self): - self.reset_storage_tables() - self.storage = None - super().tearDown() - def reset_storage_tables(self): - excluded = {'indexer_configuration'} - self.reset_db_tables(self.TEST_DB_NAME, excluded=excluded) +def endpoint(storage, endpoint_type, endpoint_name): + return getattr(storage, endpoint_type + '_' + endpoint_name) - db = self.test_db[self.TEST_DB_NAME] - db.conn.commit() +class StorageETypeTester: + """Base class for testing a series of common behaviour between a bunch of + endpoint types supported by an IndexerStorage. -def gen_generic_endpoint_tests(endpoint_type, tool_name, - example_data1, example_data2): - def rename(f): - f.__name__ = 'test_' + endpoint_type + f.__name__ - return f + This is supposed to be inherited with the following class attributes: + - endpoint_type + - tool_name + - example_data - def endpoint(self, endpoint_name): - return getattr(self.storage, endpoint_type + '_' + endpoint_name) + See below for example usage. + """ - @rename - def missing(self): - # given - tool_id = self.tools[tool_name]['id'] + def test_missing(self, swh_indexer_storage_with_data): + storage, data = swh_indexer_storage_with_data + etype = self.endpoint_type + tool_id = data.tools[self.tool_name]['id'] + # given 2 (hopefully) unknown objects query = [ { - 'id': self.sha1_1, + 'id': data.sha1_1, 'indexer_configuration_id': tool_id, }, { - 'id': self.sha1_2, + 'id': data.sha1_2, 'indexer_configuration_id': tool_id, }] - # when - actual_missing = endpoint(self, 'missing')(query) - - # then - self.assertEqual(list(actual_missing), [ - self.sha1_1, - self.sha1_2, - ]) + # we expect these are both returned by the xxx_missing endpoint + actual_missing = endpoint(storage, etype, 'missing')(query) + assert list(actual_missing) == [ + data.sha1_1, + data.sha1_2, + ] - # given - endpoint(self, 'add')([{ - 'id': self.sha1_2, - **example_data1, + # now, when we add one of them + endpoint(storage, etype, 'add')([{ + 'id': data.sha1_2, + **self.example_data[0], 'indexer_configuration_id': tool_id, }]) - # when - actual_missing = endpoint(self, 'missing')(query) - - # then - self.assertEqual(list(actual_missing), [self.sha1_1]) + # we expect only the other one returned + actual_missing = endpoint(storage, etype, 'missing')(query) + assert list(actual_missing) == [data.sha1_1] - @rename - def add__drop_duplicate(self): - # given - tool_id = self.tools[tool_name]['id'] + def test_add__drop_duplicate(self, swh_indexer_storage_with_data): + storage, data = swh_indexer_storage_with_data + etype = self.endpoint_type + tool_id = data.tools[self.tool_name]['id'] + # add the first object data_v1 = { - 'id': self.sha1_2, - **example_data1, + 'id': data.sha1_2, + **self.example_data[0], 'indexer_configuration_id': tool_id, } + endpoint(storage, etype, 'add')([data_v1]) - # given - endpoint(self, 'add')([data_v1]) - - # when - actual_data = list(endpoint(self, 'get')([self.sha1_2])) - - # then + # should be able to retrieve it + actual_data = list(endpoint(storage, etype, 'get')([data.sha1_2])) expected_data_v1 = [{ - 'id': self.sha1_2, - **example_data1, - 'tool': self.tools[tool_name], + 'id': data.sha1_2, + **self.example_data[0], + 'tool': data.tools[self.tool_name], }] - self.assertEqual(actual_data, expected_data_v1) + assert actual_data == expected_data_v1 - # given + # now if we add a modified version of the same object (same id) data_v2 = data_v1.copy() - data_v2.update(example_data2) + data_v2.update(self.example_data[1]) + endpoint(storage, etype, 'add')([data_v2]) - endpoint(self, 'add')([data_v2]) + # we expect to retrieve the original data, not the modified one + actual_data = list(endpoint(storage, etype, 'get')([data.sha1_2])) + assert actual_data == expected_data_v1 - actual_data = list(endpoint(self, 'get')([self.sha1_2])) - - # data did not change as the v2 was dropped. - self.assertEqual(actual_data, expected_data_v1) - - @rename - def add__update_in_place_duplicate(self): - # given - tool_id = self.tools[tool_name]['id'] + def test_add__update_in_place_duplicate( + self, swh_indexer_storage_with_data): + storage, data = swh_indexer_storage_with_data + etype = self.endpoint_type + tool = data.tools[self.tool_name] data_v1 = { - 'id': self.sha1_2, - **example_data1, - 'indexer_configuration_id': tool_id, + 'id': data.sha1_2, + **self.example_data[0], + 'indexer_configuration_id': tool['id'], } # given - endpoint(self, 'add')([data_v1]) + endpoint(storage, etype, 'add')([data_v1]) # when - actual_data = list(endpoint(self, 'get')([self.sha1_2])) + actual_data = list(endpoint(storage, etype, 'get')([data.sha1_2])) expected_data_v1 = [{ - 'id': self.sha1_2, - **example_data1, - 'tool': self.tools[tool_name], + 'id': data.sha1_2, + **self.example_data[0], + 'tool': tool, }] # then - self.assertEqual(actual_data, expected_data_v1) + assert actual_data == expected_data_v1 # given data_v2 = data_v1.copy() - data_v2.update(example_data2) + data_v2.update(self.example_data[1]) - endpoint(self, 'add')([data_v2], conflict_update=True) + endpoint(storage, etype, 'add')([data_v2], conflict_update=True) - actual_data = list(endpoint(self, 'get')([self.sha1_2])) + actual_data = list(endpoint(storage, etype, 'get')([data.sha1_2])) expected_data_v2 = [{ - 'id': self.sha1_2, - **example_data2, - 'tool': self.tools[tool_name], + 'id': data.sha1_2, + **self.example_data[1], + 'tool': tool, }] # data did change as the v2 was used to overwrite v1 - self.assertEqual(actual_data, expected_data_v2) + assert actual_data == expected_data_v2 - @rename - def add__update_in_place_deadlock(self): - # given - tool_id = self.tools[tool_name]['id'] + def test_add__update_in_place_deadlock( + self, swh_indexer_storage_with_data): + storage, data = swh_indexer_storage_with_data + etype = self.endpoint_type + tool = data.tools[self.tool_name] hashes = [ hash_to_bytes( '34973274ccef6ab4dfaaf86599792fa9c3fe4{:03d}'.format(i)) for i in range(1000)] data_v1 = [ { 'id': hash_, - **example_data1, - 'indexer_configuration_id': tool_id, + **self.example_data[0], + 'indexer_configuration_id': tool['id'], } for hash_ in hashes ] data_v2 = [ { 'id': hash_, - **example_data2, - 'indexer_configuration_id': tool_id, + **self.example_data[1], + 'indexer_configuration_id': tool['id'], } for hash_ in hashes ] # Remove one item from each, so that both queries have to succeed for # all items to be in the DB. data_v2a = data_v2[1:] data_v2b = list(reversed(data_v2[0:-1])) # given - endpoint(self, 'add')(data_v1) + endpoint(storage, etype, 'add')(data_v1) # when - actual_data = list(endpoint(self, 'get')(hashes)) + actual_data = list(endpoint(storage, etype, 'get')(hashes)) expected_data_v1 = [ { 'id': hash_, - **example_data1, - 'tool': self.tools[tool_name], + **self.example_data[0], + 'tool': tool, } for hash_ in hashes ] # then - self.assertEqual(actual_data, expected_data_v1) + assert actual_data == expected_data_v1 # given def f1(): - endpoint(self, 'add')(data_v2a, conflict_update=True) + endpoint(storage, etype, 'add')(data_v2a, conflict_update=True) def f2(): - endpoint(self, 'add')(data_v2b, conflict_update=True) + endpoint(storage, etype, 'add')(data_v2b, conflict_update=True) t1 = threading.Thread(target=f1) t2 = threading.Thread(target=f2) t2.start() t1.start() t1.join() t2.join() - actual_data = list(endpoint(self, 'get')(hashes)) + actual_data = sorted(endpoint(storage, etype, 'get')(hashes), + key=lambda x: x['id']) expected_data_v2 = [ { 'id': hash_, - **example_data2, - 'tool': self.tools[tool_name], + **self.example_data[1], + 'tool': tool, } for hash_ in hashes ] - self.assertCountEqual(actual_data, expected_data_v2) + assert actual_data == expected_data_v2 - def add__duplicate_twice(self): - # given - tool_id = self.tools[tool_name]['id'] + def test_add__duplicate_twice(self, swh_indexer_storage_with_data): + storage, data = swh_indexer_storage_with_data + etype = self.endpoint_type + tool = data.tools[self.tool_name] data_rev1 = { - 'id': self.revision_id_2, - **example_data1, - 'indexer_configuration_id': tool_id + 'id': data.revision_id_2, + **self.example_data[0], + 'indexer_configuration_id': tool['id'] } data_rev2 = { - 'id': self.revision_id_2, - **example_data2, - 'indexer_configuration_id': tool_id + 'id': data.revision_id_2, + **self.example_data[1], + 'indexer_configuration_id': tool['id'] } # when - endpoint(self, 'add')([data_rev1]) + endpoint(storage, etype, 'add')([data_rev1]) - with self.assertRaises(ValueError): - endpoint(self, 'add')( + with pytest.raises(ValueError): + endpoint(storage, etype, 'add')( [data_rev2, data_rev2], conflict_update=True) # then - actual_data = list(endpoint(self, 'get')( - [self.revision_id_2, self.revision_id_1])) + actual_data = list(endpoint(storage, etype, 'get')( + [data.revision_id_2, data.revision_id_1])) expected_data = [{ - 'id': self.revision_id_2, - **example_data1, - 'tool': self.tools[tool_name] + 'id': data.revision_id_2, + **self.example_data[0], + 'tool': tool, }] - self.assertEqual(actual_data, expected_data) - - @rename - def get(self): - # given - tool_id = self.tools[tool_name]['id'] + assert actual_data == expected_data - query = [self.sha1_2, self.sha1_1] + def test_get(self, swh_indexer_storage_with_data): + storage, data = swh_indexer_storage_with_data + etype = self.endpoint_type + tool = data.tools[self.tool_name] + query = [data.sha1_2, data.sha1_1] data1 = { - 'id': self.sha1_2, - **example_data1, - 'indexer_configuration_id': tool_id, + 'id': data.sha1_2, + **self.example_data[0], + 'indexer_configuration_id': tool['id'], } # when - endpoint(self, 'add')([data1]) + endpoint(storage, etype, 'add')([data1]) # then - actual_data = list(endpoint(self, 'get')(query)) + actual_data = list(endpoint(storage, etype, 'get')(query)) # then expected_data = [{ - 'id': self.sha1_2, - **example_data1, - 'tool': self.tools[tool_name] + 'id': data.sha1_2, + **self.example_data[0], + 'tool': tool, }] - self.assertEqual(actual_data, expected_data) + assert actual_data == expected_data - @rename - def delete(self): - # given - tool_id = self.tools[tool_name]['id'] - query = [self.sha1_2, self.sha1_1] +class TestIndexerStorageContentMimetypes(StorageETypeTester): + """Test Indexer Storage content_mimetype related methods + """ + endpoint_type = 'content_mimetype' + tool_name = 'file' + example_data = [ + { + 'mimetype': 'text/plain', + 'encoding': 'utf-8', + }, + { + 'mimetype': 'text/html', + 'encoding': 'us-ascii', + }, + ] + + def test_generate_content_mimetype_get_range_limit_none( + self, swh_indexer_storage): + """mimetype_get_range call with wrong limit input should fail""" + storage = swh_indexer_storage + with pytest.raises(ValueError) as e: + storage.content_mimetype_get_range( + start=None, end=None, indexer_configuration_id=None, + limit=None) - data1 = { - 'id': self.sha1_2, - **example_data1, - 'indexer_configuration_id': tool_id, - } + assert e.value.args == ( + 'Development error: limit should not be None',) - # when - endpoint(self, 'add')([data1]) - endpoint(self, 'delete')([ - { - 'id': self.sha1_2, - 'indexer_configuration_id': tool_id, - } - ]) + def test_generate_content_mimetype_get_range_no_limit( + self, swh_indexer_storage_with_data): + """mimetype_get_range returns mimetypes within range provided""" + storage, data = swh_indexer_storage_with_data + mimetypes = data.mimetypes - # then - actual_data = list(endpoint(self, 'get')(query)) + # All ids from the db + content_ids = sorted([c['id'] for c in mimetypes]) - # then - self.assertEqual(actual_data, []) + start = content_ids[0] + end = content_ids[-1] - @rename - def delete_nonexisting(self): - tool_id = self.tools[tool_name]['id'] - endpoint(self, 'delete')([ - { - 'id': self.sha1_2, - 'indexer_configuration_id': tool_id, - } - ]) + # retrieve mimetypes + tool_id = mimetypes[0]['indexer_configuration_id'] + actual_result = storage.content_mimetype_get_range( + start, end, indexer_configuration_id=tool_id) + + actual_ids = actual_result['ids'] + actual_next = actual_result['next'] - return ( - missing, - add__drop_duplicate, - add__update_in_place_duplicate, - add__update_in_place_deadlock, - add__duplicate_twice, - get, - delete, - delete_nonexisting, - ) + assert len(mimetypes) == len(actual_ids) + assert actual_next is None + assert content_ids == actual_ids + def test_generate_content_mimetype_get_range_limit( + self, swh_indexer_storage_with_data): + """mimetype_get_range paginates results if limit exceeded""" + storage, data = swh_indexer_storage_with_data -class CommonTestStorage: - """Base class for Indexer Storage testing. + # input the list of sha1s we want from storage + content_ids = sorted( + [c['id'] for c in data.mimetypes]) + mimetypes = list(storage.content_mimetype_get(content_ids)) + assert len(mimetypes) == len(data.mimetypes) - """ - def setUp(self, *args, **kwargs): - super().setUp() - self.storage = get_indexer_storage(**self.storage_config) - tools = self.storage.indexer_configuration_add(TOOLS) - self.tools = {} - for tool in tools: - tool_name = tool['tool_name'] - while tool_name in self.tools: - tool_name += '_' - self.tools[tool_name] = { - 'id': tool['id'], - 'name': tool['tool_name'], - 'version': tool['tool_version'], - 'configuration': tool['tool_configuration'], - } + start = content_ids[0] + end = content_ids[-1] + # retrieve mimetypes limited to 10 results + actual_result = storage.content_mimetype_get_range( + start, end, + indexer_configuration_id=1, + limit=10) - self.sha1_1 = hash_to_bytes('34973274ccef6ab4dfaaf86599792fa9c3fe4689') - self.sha1_2 = hash_to_bytes('61c2b3a30496d329e21af70dd2d7e097046d07b7') - self.revision_id_1 = hash_to_bytes( - '7026b7c1a2af56521e951c01ed20f255fa054238') - self.revision_id_2 = hash_to_bytes( - '7026b7c1a2af56521e9587659012345678904321') - self.revision_id_3 = hash_to_bytes( - '7026b7c1a2af56521e9587659012345678904320') - self.origin_url_1 = 'file:///dev/0/zero' # 44434341 - self.origin_url_2 = 'file:///dev/1/one' # 44434342 - self.origin_url_3 = 'file:///dev/2/two' # 54974445 - - def test_check_config(self): - self.assertTrue(self.storage.check_config(check_write=True)) - self.assertTrue(self.storage.check_config(check_write=False)) - - # generate content_mimetype tests - ( - test_content_mimetype_missing, - test_content_mimetype_add__drop_duplicate, - test_content_mimetype_add__update_in_place_duplicate, - test_content_mimetype_add__update_in_place_deadlock, - test_content_mimetype_add__duplicate_twice, - test_content_mimetype_get, - _, # content_mimetype_detete, - _, # content_mimetype_detete_nonexisting, - ) = gen_generic_endpoint_tests( - endpoint_type='content_mimetype', - tool_name='file', - example_data1={ - 'mimetype': 'text/plain', - 'encoding': 'utf-8', - }, - example_data2={ - 'mimetype': 'text/html', - 'encoding': 'us-ascii', - }, - ) - - # content_language tests - ( - test_content_language_missing, - test_content_language_add__drop_duplicate, - test_content_language_add__update_in_place_duplicate, - test_content_language_add__update_in_place_deadlock, - test_content_language_add__duplicate_twice, - test_content_language_get, - _, # test_content_language_delete, - _, # test_content_language_delete_nonexisting, - ) = gen_generic_endpoint_tests( - endpoint_type='content_language', - tool_name='pygments', - example_data1={ + assert actual_result + assert set(actual_result.keys()) == {'ids', 'next'} + actual_ids = actual_result['ids'] + actual_next = actual_result['next'] + + assert len(actual_ids) == 10 + assert actual_next is not None + assert actual_next == content_ids[10] + + expected_mimetypes = content_ids[:10] + assert expected_mimetypes == actual_ids + + # retrieve next part + actual_result = storage.content_mimetype_get_range( + start=end, end=end, indexer_configuration_id=1) + assert set(actual_result.keys()) == {'ids', 'next'} + actual_ids = actual_result['ids'] + actual_next = actual_result['next'] + + assert actual_next is None + expected_mimetypes = [content_ids[-1]] + assert expected_mimetypes == actual_ids + + +class TestIndexerStorageContentLanguage(StorageETypeTester): + """Test Indexer Storage content_language related methods + """ + endpoint_type = 'content_language' + tool_name = 'pygments' + example_data = [ + { 'lang': 'haskell', }, - example_data2={ + { 'lang': 'common-lisp', }, - ) - - # content_ctags tests - ( - test_content_ctags_missing, - # the following tests are disabled because CTAGS behave differently - _, # test_content_ctags_add__drop_duplicate, - _, # test_content_ctags_add__update_in_place_duplicate, - _, # test_content_ctags_add__update_in_place_deadlock, - _, # test_content_ctags_add__duplicate_twice, - _, # test_content_ctags_get, - _, # test_content_ctags_delete, - _, # test_content_ctags_delete_nonexisting, - ) = gen_generic_endpoint_tests( - endpoint_type='content_ctags', - tool_name='universal-ctags', - example_data1={ + ] + + +class TestIndexerStorageContentCTags(StorageETypeTester): + """Test Indexer Storage content_ctags related methods + """ + endpoint_type = 'content_ctags' + tool_name = 'universal-ctags' + example_data = [ + { 'ctags': [{ 'name': 'done', 'kind': 'variable', 'line': 119, 'lang': 'OCaml', }] }, - example_data2={ + { 'ctags': [ { 'name': 'done', 'kind': 'variable', 'line': 100, 'lang': 'Python', }, { 'name': 'main', 'kind': 'function', 'line': 119, 'lang': 'Python', }] }, - ) + ] + + # the following tests are disabled because CTAGS behaves differently + @pytest.mark.skip + def test_add__drop_duplicate(self): + pass + + @pytest.mark.skip + def test_add__update_in_place_duplicate(self): + pass - def test_content_ctags_search(self): + @pytest.mark.skip + def test_add__update_in_place_deadlock(self): + pass + + @pytest.mark.skip + def test_add__duplicate_twice(self): + pass + + @pytest.mark.skip + def test_get(self): + pass + + def test_content_ctags_search(self, swh_indexer_storage_with_data): + storage, data = swh_indexer_storage_with_data # 1. given - tool = self.tools['universal-ctags'] + tool = data.tools['universal-ctags'] tool_id = tool['id'] ctag1 = { - 'id': self.sha1_1, + 'id': data.sha1_1, 'indexer_configuration_id': tool_id, 'ctags': [ { 'name': 'hello', 'kind': 'function', 'line': 133, 'lang': 'Python', }, { 'name': 'counter', 'kind': 'variable', 'line': 119, 'lang': 'Python', }, { 'name': 'hello', 'kind': 'variable', 'line': 210, 'lang': 'Python', }, ] } ctag2 = { - 'id': self.sha1_2, + 'id': data.sha1_2, 'indexer_configuration_id': tool_id, 'ctags': [ { 'name': 'hello', 'kind': 'variable', 'line': 100, 'lang': 'C', }, { 'name': 'result', 'kind': 'variable', 'line': 120, 'lang': 'C', }, ] } - self.storage.content_ctags_add([ctag1, ctag2]) + storage.content_ctags_add([ctag1, ctag2]) # 1. when - actual_ctags = list(self.storage.content_ctags_search('hello', - limit=1)) + actual_ctags = list(storage.content_ctags_search('hello', limit=1)) # 1. then - self.assertEqual(actual_ctags, [ + assert actual_ctags == [ { 'id': ctag1['id'], 'tool': tool, 'name': 'hello', 'kind': 'function', 'line': 133, 'lang': 'Python', } - ]) + ] # 2. when - actual_ctags = list(self.storage.content_ctags_search( + actual_ctags = list(storage.content_ctags_search( 'hello', limit=1, last_sha1=ctag1['id'])) # 2. then - self.assertEqual(actual_ctags, [ + assert actual_ctags == [ { 'id': ctag2['id'], 'tool': tool, 'name': 'hello', 'kind': 'variable', 'line': 100, 'lang': 'C', } - ]) + ] # 3. when - actual_ctags = list(self.storage.content_ctags_search('hello')) + actual_ctags = list(storage.content_ctags_search('hello')) # 3. then - self.assertEqual(actual_ctags, [ + assert actual_ctags == [ { 'id': ctag1['id'], 'tool': tool, 'name': 'hello', 'kind': 'function', 'line': 133, 'lang': 'Python', }, { 'id': ctag1['id'], 'tool': tool, 'name': 'hello', 'kind': 'variable', 'line': 210, 'lang': 'Python', }, { 'id': ctag2['id'], 'tool': tool, 'name': 'hello', 'kind': 'variable', 'line': 100, 'lang': 'C', }, - ]) + ] # 4. when - actual_ctags = list(self.storage.content_ctags_search('counter')) + actual_ctags = list(storage.content_ctags_search('counter')) # then - self.assertEqual(actual_ctags, [{ + assert actual_ctags == [{ 'id': ctag1['id'], 'tool': tool, 'name': 'counter', 'kind': 'variable', 'line': 119, 'lang': 'Python', - }]) + }] # 5. when - actual_ctags = list(self.storage.content_ctags_search('result', - limit=1)) + actual_ctags = list(storage.content_ctags_search('result', limit=1)) # then - self.assertEqual(actual_ctags, [{ + assert actual_ctags == [{ 'id': ctag2['id'], 'tool': tool, 'name': 'result', 'kind': 'variable', 'line': 120, 'lang': 'C', - }]) + }] - def test_content_ctags_search_no_result(self): - actual_ctags = list(self.storage.content_ctags_search('counter')) + def test_content_ctags_search_no_result(self, swh_indexer_storage): + storage = swh_indexer_storage + actual_ctags = list(storage.content_ctags_search('counter')) - self.assertEqual(actual_ctags, []) + assert not actual_ctags + + def test_content_ctags_add__add_new_ctags_added( + self, swh_indexer_storage_with_data): + storage, data = swh_indexer_storage_with_data - def test_content_ctags_add__add_new_ctags_added(self): # given - tool = self.tools['universal-ctags'] + tool = data.tools['universal-ctags'] tool_id = tool['id'] ctag_v1 = { - 'id': self.sha1_2, + 'id': data.sha1_2, 'indexer_configuration_id': tool_id, 'ctags': [{ 'name': 'done', 'kind': 'variable', 'line': 100, 'lang': 'Scheme', }] } # given - self.storage.content_ctags_add([ctag_v1]) - self.storage.content_ctags_add([ctag_v1]) # conflict does nothing + storage.content_ctags_add([ctag_v1]) + storage.content_ctags_add([ctag_v1]) # conflict does nothing # when - actual_ctags = list(self.storage.content_ctags_get( - [self.sha1_2])) + actual_ctags = list(storage.content_ctags_get([data.sha1_2])) # then expected_ctags = [{ - 'id': self.sha1_2, + 'id': data.sha1_2, 'name': 'done', 'kind': 'variable', 'line': 100, 'lang': 'Scheme', 'tool': tool, }] - self.assertEqual(actual_ctags, expected_ctags) + assert actual_ctags == expected_ctags # given ctag_v2 = ctag_v1.copy() ctag_v2.update({ 'ctags': [ { 'name': 'defn', 'kind': 'function', 'line': 120, 'lang': 'Scheme', } ] }) - self.storage.content_ctags_add([ctag_v2]) + storage.content_ctags_add([ctag_v2]) expected_ctags = [ { - 'id': self.sha1_2, + 'id': data.sha1_2, 'name': 'done', 'kind': 'variable', 'line': 100, 'lang': 'Scheme', 'tool': tool, }, { - 'id': self.sha1_2, + 'id': data.sha1_2, 'name': 'defn', 'kind': 'function', 'line': 120, 'lang': 'Scheme', 'tool': tool, } ] - actual_ctags = list(self.storage.content_ctags_get( - [self.sha1_2])) + actual_ctags = list(storage.content_ctags_get( + [data.sha1_2])) - self.assertEqual(actual_ctags, expected_ctags) + assert actual_ctags == expected_ctags - def test_content_ctags_add__update_in_place(self): + def test_content_ctags_add__update_in_place( + self, swh_indexer_storage_with_data): + storage, data = swh_indexer_storage_with_data # given - tool = self.tools['universal-ctags'] + tool = data.tools['universal-ctags'] tool_id = tool['id'] ctag_v1 = { - 'id': self.sha1_2, + 'id': data.sha1_2, 'indexer_configuration_id': tool_id, 'ctags': [{ 'name': 'done', 'kind': 'variable', 'line': 100, 'lang': 'Scheme', }] } # given - self.storage.content_ctags_add([ctag_v1]) + storage.content_ctags_add([ctag_v1]) # when - actual_ctags = list(self.storage.content_ctags_get( - [self.sha1_2])) + actual_ctags = list(storage.content_ctags_get( + [data.sha1_2])) # then expected_ctags = [ { - 'id': self.sha1_2, + 'id': data.sha1_2, 'name': 'done', 'kind': 'variable', 'line': 100, 'lang': 'Scheme', 'tool': tool } ] - self.assertEqual(actual_ctags, expected_ctags) + assert actual_ctags == expected_ctags # given ctag_v2 = ctag_v1.copy() ctag_v2.update({ 'ctags': [ { 'name': 'done', 'kind': 'variable', 'line': 100, 'lang': 'Scheme', }, { 'name': 'defn', 'kind': 'function', 'line': 120, 'lang': 'Scheme', } ] }) - self.storage.content_ctags_add([ctag_v2], conflict_update=True) + storage.content_ctags_add([ctag_v2], conflict_update=True) - actual_ctags = list(self.storage.content_ctags_get( - [self.sha1_2])) + actual_ctags = list(storage.content_ctags_get( + [data.sha1_2])) # ctag did change as the v2 was used to overwrite v1 expected_ctags = [ { - 'id': self.sha1_2, + 'id': data.sha1_2, 'name': 'done', 'kind': 'variable', 'line': 100, 'lang': 'Scheme', 'tool': tool, }, { - 'id': self.sha1_2, + 'id': data.sha1_2, 'name': 'defn', 'kind': 'function', 'line': 120, 'lang': 'Scheme', 'tool': tool, } ] - self.assertEqual(actual_ctags, expected_ctags) - - # content_fossology_license tests - ( - _, # The endpoint content_fossology_license_missing does not exist - # the following tests are disabled because fossology_license tests - # behave differently - _, # test_content_fossology_license_add__drop_duplicate, - _, # test_content_fossology_license_add__update_in_place_duplicate, - _, # test_content_fossology_license_add__update_in_place_deadlock, - _, # test_content_metadata_add__duplicate_twice, - _, # test_content_fossology_license_get, - _, # test_content_fossology_license_delete, - _, # test_content_fossology_license_delete_nonexisting, - ) = gen_generic_endpoint_tests( - endpoint_type='content_fossology_license', - tool_name='nomos', - example_data1={ - 'licenses': ['Apache-2.0'], - }, - example_data2={ - 'licenses': ['BSD-2-Clause'], - }, - ) - - def test_content_fossology_license_add__new_license_added(self): - # given - tool = self.tools['nomos'] - tool_id = tool['id'] - - license_v1 = { - 'id': self.sha1_1, - 'licenses': ['Apache-2.0'], - 'indexer_configuration_id': tool_id, - } - - # given - self.storage.content_fossology_license_add([license_v1]) - # conflict does nothing - self.storage.content_fossology_license_add([license_v1]) + assert actual_ctags == expected_ctags - # when - actual_licenses = list(self.storage.content_fossology_license_get( - [self.sha1_1])) - # then - expected_license = { - self.sha1_1: [{ - 'licenses': ['Apache-2.0'], - 'tool': tool, - }] - } - self.assertEqual(actual_licenses, [expected_license]) - - # given - license_v2 = license_v1.copy() - license_v2.update({ - 'licenses': ['BSD-2-Clause'], - }) - - self.storage.content_fossology_license_add([license_v2]) - - actual_licenses = list(self.storage.content_fossology_license_get( - [self.sha1_1])) - - expected_license = { - self.sha1_1: [{ - 'licenses': ['Apache-2.0', 'BSD-2-Clause'], - 'tool': tool - }] - } - - # license did not change as the v2 was dropped. - self.assertEqual(actual_licenses, [expected_license]) - - # content_metadata tests - ( - test_content_metadata_missing, - test_content_metadata_add__drop_duplicate, - test_content_metadata_add__update_in_place_duplicate, - test_content_metadata_add__update_in_place_deadlock, - test_content_metadata_add__duplicate_twice, - test_content_metadata_get, - _, # test_content_metadata_delete, - _, # test_content_metadata_delete_nonexisting, - ) = gen_generic_endpoint_tests( - endpoint_type='content_metadata', - tool_name='swh-metadata-detector', - example_data1={ +class TestIndexerStorageContentMetadata(StorageETypeTester): + """Test Indexer Storage content_metadata related methods + """ + tool_name = 'swh-metadata-detector' + endpoint_type = 'content_metadata' + example_data = [ + { 'metadata': { 'other': {}, 'codeRepository': { 'type': 'git', 'url': 'https://github.com/moranegg/metadata_test' }, 'description': 'Simple package.json test for indexer', 'name': 'test_metadata', 'version': '0.0.1' }, }, - example_data2={ + { 'metadata': { 'other': {}, 'name': 'test_metadata', 'version': '0.0.1' }, }, - ) - - # revision_intrinsic_metadata tests - ( - test_revision_intrinsic_metadata_missing, - test_revision_intrinsic_metadata_add__drop_duplicate, - test_revision_intrinsic_metadata_add__update_in_place_duplicate, - test_revision_intrinsic_metadata_add__update_in_place_deadlock, - test_revision_intrinsic_metadata_add__duplicate_twice, - test_revision_intrinsic_metadata_get, - test_revision_intrinsic_metadata_delete, - test_revision_intrinsic_metadata_delete_nonexisting, - ) = gen_generic_endpoint_tests( - endpoint_type='revision_intrinsic_metadata', - tool_name='swh-metadata-detector', - example_data1={ + ] + + +class TestIndexerStorageRevisionIntrinsicMetadata(StorageETypeTester): + """Test Indexer Storage revision_intrinsic_metadata related methods + """ + tool_name = 'swh-metadata-detector' + endpoint_type = 'revision_intrinsic_metadata' + example_data = [ + { 'metadata': { 'other': {}, 'codeRepository': { 'type': 'git', 'url': 'https://github.com/moranegg/metadata_test' }, 'description': 'Simple package.json test for indexer', 'name': 'test_metadata', 'version': '0.0.1' }, 'mappings': ['mapping1'], }, - example_data2={ + { 'metadata': { 'other': {}, 'name': 'test_metadata', 'version': '0.0.1' }, 'mappings': ['mapping2'], }, - ) + ] + + def test_revision_intrinsic_metadata_delete( + self, swh_indexer_storage_with_data): + storage, data = swh_indexer_storage_with_data + etype = self.endpoint_type + tool = data.tools[self.tool_name] + + query = [data.sha1_2, data.sha1_1] + data1 = { + 'id': data.sha1_2, + **self.example_data[0], + 'indexer_configuration_id': tool['id'], + } + + # when + endpoint(storage, etype, 'add')([data1]) + endpoint(storage, etype, 'delete')([ + { + 'id': data.sha1_2, + 'indexer_configuration_id': tool['id'], + } + ]) + + # then + actual_data = list(endpoint(storage, etype, 'get')(query)) - def test_origin_intrinsic_metadata_get(self): + # then + assert not actual_data + + def test_revision_intrinsic_metadata_delete_nonexisting( + self, swh_indexer_storage_with_data): + storage, data = swh_indexer_storage_with_data + etype = self.endpoint_type + tool = data.tools[self.tool_name] + endpoint(storage, etype, 'delete')([ + { + 'id': data.sha1_2, + 'indexer_configuration_id': tool['id'], + } + ]) + + +class TestIndexerStorageContentFossologyLicence: + def test_content_fossology_license_add__new_license_added( + self, swh_indexer_storage_with_data): + storage, data = swh_indexer_storage_with_data # given - tool_id = self.tools['swh-metadata-detector']['id'] + tool = data.tools['nomos'] + tool_id = tool['id'] + + license_v1 = { + 'id': data.sha1_1, + 'licenses': ['Apache-2.0'], + 'indexer_configuration_id': tool_id, + } + + # given + storage.content_fossology_license_add([license_v1]) + # conflict does nothing + storage.content_fossology_license_add([license_v1]) + + # when + actual_licenses = list(storage.content_fossology_license_get( + [data.sha1_1])) + + # then + expected_license = { + data.sha1_1: [{ + 'licenses': ['Apache-2.0'], + 'tool': tool, + }] + } + assert actual_licenses == [expected_license] + + # given + license_v2 = license_v1.copy() + license_v2.update({ + 'licenses': ['BSD-2-Clause'], + }) + + storage.content_fossology_license_add([license_v2]) + + actual_licenses = list(storage.content_fossology_license_get( + [data.sha1_1])) + + expected_license = { + data.sha1_1: [{ + 'licenses': ['Apache-2.0', 'BSD-2-Clause'], + 'tool': tool + }] + } + + # license did not change as the v2 was dropped. + assert actual_licenses == [expected_license] + + def test_generate_content_fossology_license_get_range_limit_none( + self, swh_indexer_storage_with_data): + storage, data = swh_indexer_storage_with_data + """license_get_range call with wrong limit input should fail""" + with pytest.raises(ValueError) as e: + storage.content_fossology_license_get_range( + start=None, end=None, indexer_configuration_id=None, + limit=None) + + assert e.value.args == ( + 'Development error: limit should not be None',) + + def test_generate_content_fossology_license_get_range_no_limit( + self, swh_indexer_storage_with_data): + """license_get_range returns licenses within range provided""" + storage, data = swh_indexer_storage_with_data + # craft some consistent mimetypes + fossology_licenses = data.fossology_licenses + mimetypes = prepare_mimetypes_from(fossology_licenses) + + storage.content_mimetype_add(mimetypes, conflict_update=True) + # add fossology_licenses to storage + storage.content_fossology_license_add(fossology_licenses) + + # All ids from the db + content_ids = sorted([c['id'] for c in fossology_licenses]) + + start = content_ids[0] + end = content_ids[-1] + + # retrieve fossology_licenses + tool_id = fossology_licenses[0]['indexer_configuration_id'] + actual_result = storage.content_fossology_license_get_range( + start, end, indexer_configuration_id=tool_id) + + actual_ids = actual_result['ids'] + actual_next = actual_result['next'] + + assert len(fossology_licenses) == len(actual_ids) + assert actual_next is None + assert content_ids == actual_ids + + def test_generate_content_fossology_license_get_range_no_limit_with_filter( + self, swh_indexer_storage_with_data): + """This filters non textual, then returns results within range""" + storage, data = swh_indexer_storage_with_data + fossology_licenses = data.fossology_licenses + mimetypes = data.mimetypes + + # craft some consistent mimetypes + _mimetypes = prepare_mimetypes_from(fossology_licenses) + # add binary mimetypes which will get filtered out in results + for m in mimetypes: + _mimetypes.append({ + 'mimetype': 'binary', + **m, + }) + + storage.content_mimetype_add(_mimetypes, conflict_update=True) + # add fossology_licenses to storage + storage.content_fossology_license_add(fossology_licenses) + + # All ids from the db + content_ids = sorted([c['id'] for c in fossology_licenses]) + + start = content_ids[0] + end = content_ids[-1] + + # retrieve fossology_licenses + tool_id = fossology_licenses[0]['indexer_configuration_id'] + actual_result = storage.content_fossology_license_get_range( + start, end, indexer_configuration_id=tool_id) + + actual_ids = actual_result['ids'] + actual_next = actual_result['next'] + + assert len(fossology_licenses) == len(actual_ids) + assert actual_next is None + assert content_ids == actual_ids + + def test_generate_fossology_license_get_range_limit( + self, swh_indexer_storage_with_data): + """fossology_license_get_range paginates results if limit exceeded""" + storage, data = swh_indexer_storage_with_data + fossology_licenses = data.fossology_licenses + + # craft some consistent mimetypes + mimetypes = prepare_mimetypes_from(fossology_licenses) + + # add fossology_licenses to storage + storage.content_mimetype_add(mimetypes, conflict_update=True) + storage.content_fossology_license_add(fossology_licenses) + + # input the list of sha1s we want from storage + content_ids = sorted([c['id'] for c in fossology_licenses]) + start = content_ids[0] + end = content_ids[-1] + + # retrieve fossology_licenses limited to 3 results + limited_results = len(fossology_licenses) - 1 + tool_id = fossology_licenses[0]['indexer_configuration_id'] + actual_result = storage.content_fossology_license_get_range( + start, end, + indexer_configuration_id=tool_id, limit=limited_results) + + actual_ids = actual_result['ids'] + actual_next = actual_result['next'] + + assert limited_results == len(actual_ids) + assert actual_next is not None + assert actual_next == content_ids[-1] + + expected_fossology_licenses = content_ids[:-1] + assert expected_fossology_licenses == actual_ids + + # retrieve next part + actual_results2 = storage.content_fossology_license_get_range( + start=end, end=end, indexer_configuration_id=tool_id) + actual_ids2 = actual_results2['ids'] + actual_next2 = actual_results2['next'] + + assert actual_next2 is None + expected_fossology_licenses2 = [content_ids[-1]] + assert expected_fossology_licenses2 == actual_ids2 + + +class TestIndexerStorageOriginIntrinsicMetadata: + def test_origin_intrinsic_metadata_get( + self, swh_indexer_storage_with_data): + storage, data = swh_indexer_storage_with_data + # given + tool_id = data.tools['swh-metadata-detector']['id'] metadata = { 'version': None, 'name': None, } metadata_rev = { - 'id': self.revision_id_2, + 'id': data.revision_id_2, 'metadata': metadata, 'mappings': ['mapping1'], 'indexer_configuration_id': tool_id, } metadata_origin = { - 'id': self.origin_url_1, + 'id': data.origin_url_1, 'metadata': metadata, 'indexer_configuration_id': tool_id, 'mappings': ['mapping1'], - 'from_revision': self.revision_id_2, + 'from_revision': data.revision_id_2, } # when - self.storage.revision_intrinsic_metadata_add([metadata_rev]) - self.storage.origin_intrinsic_metadata_add([metadata_origin]) + storage.revision_intrinsic_metadata_add([metadata_rev]) + storage.origin_intrinsic_metadata_add([metadata_origin]) # then - actual_metadata = list(self.storage.origin_intrinsic_metadata_get( - [self.origin_url_1, 'no://where'])) + actual_metadata = list(storage.origin_intrinsic_metadata_get( + [data.origin_url_1, 'no://where'])) expected_metadata = [{ - 'id': self.origin_url_1, + 'id': data.origin_url_1, 'metadata': metadata, - 'tool': self.tools['swh-metadata-detector'], - 'from_revision': self.revision_id_2, + 'tool': data.tools['swh-metadata-detector'], + 'from_revision': data.revision_id_2, 'mappings': ['mapping1'], }] - self.assertEqual(actual_metadata, expected_metadata) + assert actual_metadata == expected_metadata - def test_origin_intrinsic_metadata_delete(self): + def test_origin_intrinsic_metadata_delete( + self, swh_indexer_storage_with_data): + storage, data = swh_indexer_storage_with_data # given - tool_id = self.tools['swh-metadata-detector']['id'] + tool_id = data.tools['swh-metadata-detector']['id'] metadata = { 'version': None, 'name': None, } metadata_rev = { - 'id': self.revision_id_2, + 'id': data.revision_id_2, 'metadata': metadata, 'mappings': ['mapping1'], 'indexer_configuration_id': tool_id, } metadata_origin = { - 'id': self.origin_url_1, + 'id': data.origin_url_1, 'metadata': metadata, 'indexer_configuration_id': tool_id, 'mappings': ['mapping1'], - 'from_revision': self.revision_id_2, + 'from_revision': data.revision_id_2, } metadata_origin2 = metadata_origin.copy() - metadata_origin2['id'] = self.origin_url_2 + metadata_origin2['id'] = data.origin_url_2 # when - self.storage.revision_intrinsic_metadata_add([metadata_rev]) - self.storage.origin_intrinsic_metadata_add([ + storage.revision_intrinsic_metadata_add([metadata_rev]) + storage.origin_intrinsic_metadata_add([ metadata_origin, metadata_origin2]) - self.storage.origin_intrinsic_metadata_delete([ + storage.origin_intrinsic_metadata_delete([ { - 'id': self.origin_url_1, + 'id': data.origin_url_1, 'indexer_configuration_id': tool_id } ]) # then - actual_metadata = list(self.storage.origin_intrinsic_metadata_get( - [self.origin_url_1, self.origin_url_2, 'no://where'])) + actual_metadata = list(storage.origin_intrinsic_metadata_get( + [data.origin_url_1, data.origin_url_2, 'no://where'])) for item in actual_metadata: item['indexer_configuration_id'] = item.pop('tool')['id'] - self.assertEqual(actual_metadata, [metadata_origin2]) + assert actual_metadata == [metadata_origin2] - def test_origin_intrinsic_metadata_delete_nonexisting(self): - tool_id = self.tools['swh-metadata-detector']['id'] - self.storage.origin_intrinsic_metadata_delete([ + def test_origin_intrinsic_metadata_delete_nonexisting( + self, swh_indexer_storage_with_data): + storage, data = swh_indexer_storage_with_data + tool_id = data.tools['swh-metadata-detector']['id'] + storage.origin_intrinsic_metadata_delete([ { - 'id': self.origin_url_1, + 'id': data.origin_url_1, 'indexer_configuration_id': tool_id } ]) - def test_origin_intrinsic_metadata_add_drop_duplicate(self): + def test_origin_intrinsic_metadata_add_drop_duplicate( + self, swh_indexer_storage_with_data): + storage, data = swh_indexer_storage_with_data # given - tool_id = self.tools['swh-metadata-detector']['id'] + tool_id = data.tools['swh-metadata-detector']['id'] metadata_v1 = { 'version': None, 'name': None, } metadata_rev_v1 = { - 'id': self.revision_id_1, + 'id': data.revision_id_1, 'metadata': metadata_v1.copy(), 'mappings': [], 'indexer_configuration_id': tool_id, } metadata_origin_v1 = { - 'id': self.origin_url_1, + 'id': data.origin_url_1, 'metadata': metadata_v1.copy(), 'indexer_configuration_id': tool_id, 'mappings': [], - 'from_revision': self.revision_id_1, + 'from_revision': data.revision_id_1, } # given - self.storage.revision_intrinsic_metadata_add([metadata_rev_v1]) - self.storage.origin_intrinsic_metadata_add([metadata_origin_v1]) + storage.revision_intrinsic_metadata_add([metadata_rev_v1]) + storage.origin_intrinsic_metadata_add([metadata_origin_v1]) # when - actual_metadata = list(self.storage.origin_intrinsic_metadata_get( - [self.origin_url_1, 'no://where'])) + actual_metadata = list(storage.origin_intrinsic_metadata_get( + [data.origin_url_1, 'no://where'])) expected_metadata_v1 = [{ - 'id': self.origin_url_1, + 'id': data.origin_url_1, 'metadata': metadata_v1, - 'tool': self.tools['swh-metadata-detector'], - 'from_revision': self.revision_id_1, + 'tool': data.tools['swh-metadata-detector'], + 'from_revision': data.revision_id_1, 'mappings': [], }] - self.assertEqual(actual_metadata, expected_metadata_v1) + assert actual_metadata == expected_metadata_v1 # given metadata_v2 = metadata_v1.copy() metadata_v2.update({ 'name': 'test_metadata', 'author': 'MG', }) metadata_rev_v2 = metadata_rev_v1.copy() metadata_origin_v2 = metadata_origin_v1.copy() metadata_rev_v2['metadata'] = metadata_v2 metadata_origin_v2['metadata'] = metadata_v2 - self.storage.revision_intrinsic_metadata_add([metadata_rev_v2]) - self.storage.origin_intrinsic_metadata_add([metadata_origin_v2]) + storage.revision_intrinsic_metadata_add([metadata_rev_v2]) + storage.origin_intrinsic_metadata_add([metadata_origin_v2]) # then - actual_metadata = list(self.storage.origin_intrinsic_metadata_get( - [self.origin_url_1])) + actual_metadata = list(storage.origin_intrinsic_metadata_get( + [data.origin_url_1])) # metadata did not change as the v2 was dropped. - self.assertEqual(actual_metadata, expected_metadata_v1) + assert actual_metadata == expected_metadata_v1 - def test_origin_intrinsic_metadata_add_update_in_place_duplicate(self): + def test_origin_intrinsic_metadata_add_update_in_place_duplicate( + self, swh_indexer_storage_with_data): + storage, data = swh_indexer_storage_with_data # given - tool_id = self.tools['swh-metadata-detector']['id'] + tool_id = data.tools['swh-metadata-detector']['id'] metadata_v1 = { 'version': None, 'name': None, } metadata_rev_v1 = { - 'id': self.revision_id_2, + 'id': data.revision_id_2, 'metadata': metadata_v1, 'mappings': [], 'indexer_configuration_id': tool_id, } metadata_origin_v1 = { - 'id': self.origin_url_1, + 'id': data.origin_url_1, 'metadata': metadata_v1.copy(), 'indexer_configuration_id': tool_id, 'mappings': [], - 'from_revision': self.revision_id_2, + 'from_revision': data.revision_id_2, } # given - self.storage.revision_intrinsic_metadata_add([metadata_rev_v1]) - self.storage.origin_intrinsic_metadata_add([metadata_origin_v1]) + storage.revision_intrinsic_metadata_add([metadata_rev_v1]) + storage.origin_intrinsic_metadata_add([metadata_origin_v1]) # when - actual_metadata = list(self.storage.origin_intrinsic_metadata_get( - [self.origin_url_1])) + actual_metadata = list(storage.origin_intrinsic_metadata_get( + [data.origin_url_1])) # then expected_metadata_v1 = [{ - 'id': self.origin_url_1, + 'id': data.origin_url_1, 'metadata': metadata_v1, - 'tool': self.tools['swh-metadata-detector'], - 'from_revision': self.revision_id_2, + 'tool': data.tools['swh-metadata-detector'], + 'from_revision': data.revision_id_2, 'mappings': [], }] - self.assertEqual(actual_metadata, expected_metadata_v1) + assert actual_metadata == expected_metadata_v1 # given metadata_v2 = metadata_v1.copy() metadata_v2.update({ 'name': 'test_update_duplicated_metadata', 'author': 'MG', }) metadata_rev_v2 = metadata_rev_v1.copy() metadata_origin_v2 = metadata_origin_v1.copy() metadata_rev_v2['metadata'] = metadata_v2 metadata_origin_v2 = { - 'id': self.origin_url_1, + 'id': data.origin_url_1, 'metadata': metadata_v2.copy(), 'indexer_configuration_id': tool_id, 'mappings': ['npm'], - 'from_revision': self.revision_id_1, + 'from_revision': data.revision_id_1, } - self.storage.revision_intrinsic_metadata_add( + storage.revision_intrinsic_metadata_add( [metadata_rev_v2], conflict_update=True) - self.storage.origin_intrinsic_metadata_add( + storage.origin_intrinsic_metadata_add( [metadata_origin_v2], conflict_update=True) - actual_metadata = list(self.storage.origin_intrinsic_metadata_get( - [self.origin_url_1])) + actual_metadata = list(storage.origin_intrinsic_metadata_get( + [data.origin_url_1])) expected_metadata_v2 = [{ - 'id': self.origin_url_1, + 'id': data.origin_url_1, 'metadata': metadata_v2, - 'tool': self.tools['swh-metadata-detector'], - 'from_revision': self.revision_id_1, + 'tool': data.tools['swh-metadata-detector'], + 'from_revision': data.revision_id_1, 'mappings': ['npm'], }] # metadata did change as the v2 was used to overwrite v1 - self.assertEqual(actual_metadata, expected_metadata_v2) + assert actual_metadata == expected_metadata_v2 - def test_origin_intrinsic_metadata_add__update_in_place_deadlock(self): + def test_origin_intrinsic_metadata_add__update_in_place_deadlock( + self, swh_indexer_storage_with_data): + storage, data = swh_indexer_storage_with_data # given - tool_id = self.tools['swh-metadata-detector']['id'] + tool_id = data.tools['swh-metadata-detector']['id'] ids = list(range(10)) example_data1 = { 'metadata': { 'version': None, 'name': None, }, 'mappings': [], } example_data2 = { 'metadata': { 'version': 'v1.1.1', 'name': 'foo', }, 'mappings': [], } metadata_rev_v1 = { - 'id': self.revision_id_2, + 'id': data.revision_id_2, 'metadata': { 'version': None, 'name': None, }, 'mappings': [], 'indexer_configuration_id': tool_id, } data_v1 = [ { 'id': 'file:///tmp/origin%d' % id_, - 'from_revision': self.revision_id_2, + 'from_revision': data.revision_id_2, **example_data1, 'indexer_configuration_id': tool_id, } for id_ in ids ] data_v2 = [ { 'id': 'file:///tmp/origin%d' % id_, - 'from_revision': self.revision_id_2, + 'from_revision': data.revision_id_2, **example_data2, 'indexer_configuration_id': tool_id, } for id_ in ids ] # Remove one item from each, so that both queries have to succeed for # all items to be in the DB. data_v2a = data_v2[1:] data_v2b = list(reversed(data_v2[0:-1])) # given - self.storage.revision_intrinsic_metadata_add([metadata_rev_v1]) - self.storage.origin_intrinsic_metadata_add(data_v1) + storage.revision_intrinsic_metadata_add([metadata_rev_v1]) + storage.origin_intrinsic_metadata_add(data_v1) # when origins = ['file:///tmp/origin%d' % i for i in ids] - actual_data = list(self.storage.origin_intrinsic_metadata_get(origins)) + actual_data = list(storage.origin_intrinsic_metadata_get(origins)) expected_data_v1 = [ { 'id': 'file:///tmp/origin%d' % id_, - 'from_revision': self.revision_id_2, + 'from_revision': data.revision_id_2, **example_data1, - 'tool': self.tools['swh-metadata-detector'], + 'tool': data.tools['swh-metadata-detector'], } for id_ in ids ] # then - self.assertEqual(actual_data, expected_data_v1) + assert actual_data == expected_data_v1 # given def f1(): - self.storage.origin_intrinsic_metadata_add( + storage.origin_intrinsic_metadata_add( data_v2a, conflict_update=True) def f2(): - self.storage.origin_intrinsic_metadata_add( + storage.origin_intrinsic_metadata_add( data_v2b, conflict_update=True) t1 = threading.Thread(target=f1) t2 = threading.Thread(target=f2) t2.start() t1.start() t1.join() t2.join() - actual_data = list(self.storage.origin_intrinsic_metadata_get(origins)) + actual_data = list(storage.origin_intrinsic_metadata_get(origins)) expected_data_v2 = [ { 'id': 'file:///tmp/origin%d' % id_, - 'from_revision': self.revision_id_2, + 'from_revision': data.revision_id_2, **example_data2, - 'tool': self.tools['swh-metadata-detector'], + 'tool': data.tools['swh-metadata-detector'], } for id_ in ids ] - self.maxDiff = None - self.assertCountEqual(actual_data, expected_data_v2) + assert len(actual_data) == len(expected_data_v2) + assert sorted(actual_data, key=lambda x: x['id']) == expected_data_v2 - def test_origin_intrinsic_metadata_add__duplicate_twice(self): + def test_origin_intrinsic_metadata_add__duplicate_twice( + self, swh_indexer_storage_with_data): + storage, data = swh_indexer_storage_with_data # given - tool_id = self.tools['swh-metadata-detector']['id'] + tool_id = data.tools['swh-metadata-detector']['id'] metadata = { 'developmentStatus': None, 'name': None, } metadata_rev = { - 'id': self.revision_id_2, + 'id': data.revision_id_2, 'metadata': metadata, 'mappings': ['mapping1'], 'indexer_configuration_id': tool_id, } metadata_origin = { - 'id': self.origin_url_1, + 'id': data.origin_url_1, 'metadata': metadata, 'indexer_configuration_id': tool_id, 'mappings': ['mapping1'], - 'from_revision': self.revision_id_2, + 'from_revision': data.revision_id_2, } # when - self.storage.revision_intrinsic_metadata_add([metadata_rev]) + storage.revision_intrinsic_metadata_add([metadata_rev]) - with self.assertRaises(ValueError): - self.storage.origin_intrinsic_metadata_add([ + with pytest.raises(ValueError): + storage.origin_intrinsic_metadata_add([ metadata_origin, metadata_origin]) - def test_origin_intrinsic_metadata_search_fulltext(self): + def test_origin_intrinsic_metadata_search_fulltext( + self, swh_indexer_storage_with_data): + storage, data = swh_indexer_storage_with_data # given - tool_id = self.tools['swh-metadata-detector']['id'] + tool_id = data.tools['swh-metadata-detector']['id'] metadata1 = { 'author': 'John Doe', } metadata1_rev = { - 'id': self.revision_id_1, + 'id': data.revision_id_1, 'metadata': metadata1, 'mappings': [], 'indexer_configuration_id': tool_id, } metadata1_origin = { - 'id': self.origin_url_1, + 'id': data.origin_url_1, 'metadata': metadata1, 'mappings': [], 'indexer_configuration_id': tool_id, - 'from_revision': self.revision_id_1, + 'from_revision': data.revision_id_1, } metadata2 = { 'author': 'Jane Doe', } metadata2_rev = { - 'id': self.revision_id_2, - 'origin': self.origin_url_1, + 'id': data.revision_id_2, 'metadata': metadata2, 'mappings': [], 'indexer_configuration_id': tool_id, } metadata2_origin = { - 'id': self.origin_url_2, + 'id': data.origin_url_2, 'metadata': metadata2, 'mappings': [], 'indexer_configuration_id': tool_id, - 'from_revision': self.revision_id_2, + 'from_revision': data.revision_id_2, } # when - self.storage.revision_intrinsic_metadata_add([metadata1_rev]) - self.storage.origin_intrinsic_metadata_add([metadata1_origin]) - self.storage.revision_intrinsic_metadata_add([metadata2_rev]) - self.storage.origin_intrinsic_metadata_add([metadata2_origin]) + storage.revision_intrinsic_metadata_add([metadata1_rev]) + storage.origin_intrinsic_metadata_add([metadata1_origin]) + storage.revision_intrinsic_metadata_add([metadata2_rev]) + storage.origin_intrinsic_metadata_add([metadata2_origin]) # then - search = self.storage.origin_intrinsic_metadata_search_fulltext - self.assertCountEqual( - [res['id'] for res in search(['Doe'])], - [self.origin_url_1, self.origin_url_2]) - self.assertEqual( - [res['id'] for res in search(['John', 'Doe'])], - [self.origin_url_1]) - self.assertEqual( - [res['id'] for res in search(['John'])], - [self.origin_url_1]) - self.assertEqual( - [res['id'] for res in search(['John', 'Jane'])], - []) - - def test_origin_intrinsic_metadata_search_fulltext_rank(self): + search = storage.origin_intrinsic_metadata_search_fulltext + assert set([res['id'] for res in search(['Doe'])]) \ + == set([data.origin_url_1, data.origin_url_2]) + assert [res['id'] for res in search(['John', 'Doe'])] \ + == [data.origin_url_1] + assert [res['id'] for res in search(['John'])] \ + == [data.origin_url_1] + assert not list(search(['John', 'Jane'])) + + def test_origin_intrinsic_metadata_search_fulltext_rank( + self, swh_indexer_storage_with_data): + storage, data = swh_indexer_storage_with_data # given - tool_id = self.tools['swh-metadata-detector']['id'] + tool_id = data.tools['swh-metadata-detector']['id'] # The following authors have "Random Person" to add some more content # to the JSON data, to work around normalization quirks when there # are few words (rank/(1+ln(nb_words)) is very sensitive to nb_words # for small values of nb_words). metadata1 = { 'author': [ 'Random Person', 'John Doe', 'Jane Doe', ] } metadata1_rev = { - 'id': self.revision_id_1, + 'id': data.revision_id_1, 'metadata': metadata1, 'mappings': [], 'indexer_configuration_id': tool_id, } metadata1_origin = { - 'id': self.origin_url_1, + 'id': data.origin_url_1, 'metadata': metadata1, 'mappings': [], 'indexer_configuration_id': tool_id, - 'from_revision': self.revision_id_1, + 'from_revision': data.revision_id_1, } metadata2 = { 'author': [ 'Random Person', 'Jane Doe', ] } metadata2_rev = { - 'id': self.revision_id_2, + 'id': data.revision_id_2, 'metadata': metadata2, 'mappings': [], 'indexer_configuration_id': tool_id, } metadata2_origin = { - 'id': self.origin_url_2, + 'id': data.origin_url_2, 'metadata': metadata2, 'mappings': [], 'indexer_configuration_id': tool_id, - 'from_revision': self.revision_id_2, + 'from_revision': data.revision_id_2, } # when - self.storage.revision_intrinsic_metadata_add([metadata1_rev]) - self.storage.origin_intrinsic_metadata_add([metadata1_origin]) - self.storage.revision_intrinsic_metadata_add([metadata2_rev]) - self.storage.origin_intrinsic_metadata_add([metadata2_origin]) + storage.revision_intrinsic_metadata_add([metadata1_rev]) + storage.origin_intrinsic_metadata_add([metadata1_origin]) + storage.revision_intrinsic_metadata_add([metadata2_rev]) + storage.origin_intrinsic_metadata_add([metadata2_origin]) # then - search = self.storage.origin_intrinsic_metadata_search_fulltext - self.assertEqual( - [res['id'] for res in search(['Doe'])], - [self.origin_url_1, self.origin_url_2]) - self.assertEqual( - [res['id'] for res in search(['Doe'], limit=1)], - [self.origin_url_1]) - self.assertEqual( - [res['id'] for res in search(['John'])], - [self.origin_url_1]) - self.assertEqual( - [res['id'] for res in search(['Jane'])], - [self.origin_url_2, self.origin_url_1]) - self.assertEqual( - [res['id'] for res in search(['John', 'Jane'])], - [self.origin_url_1]) - - def _fill_origin_intrinsic_metadata(self): - tool1_id = self.tools['swh-metadata-detector']['id'] - tool2_id = self.tools['swh-metadata-detector2']['id'] + search = storage.origin_intrinsic_metadata_search_fulltext + assert [res['id'] for res in search(['Doe'])] \ + == [data.origin_url_1, data.origin_url_2] + assert [res['id'] for res in search(['Doe'], limit=1)] \ + == [data.origin_url_1] + assert [res['id'] for res in search(['John'])] \ + == [data.origin_url_1] + assert [res['id'] for res in search(['Jane'])] \ + == [data.origin_url_2, data.origin_url_1] + assert [res['id'] for res in search(['John', 'Jane'])] \ + == [data.origin_url_1] + + def _fill_origin_intrinsic_metadata( + self, swh_indexer_storage_with_data): + storage, data = swh_indexer_storage_with_data + tool1_id = data.tools['swh-metadata-detector']['id'] + tool2_id = data.tools['swh-metadata-detector2']['id'] metadata1 = { '@context': 'foo', 'author': 'John Doe', } metadata1_rev = { - 'id': self.revision_id_1, + 'id': data.revision_id_1, 'metadata': metadata1, 'mappings': ['npm'], 'indexer_configuration_id': tool1_id, } metadata1_origin = { - 'id': self.origin_url_1, + 'id': data.origin_url_1, 'metadata': metadata1, 'mappings': ['npm'], 'indexer_configuration_id': tool1_id, - 'from_revision': self.revision_id_1, + 'from_revision': data.revision_id_1, } metadata2 = { '@context': 'foo', 'author': 'Jane Doe', } metadata2_rev = { - 'id': self.revision_id_2, + 'id': data.revision_id_2, 'metadata': metadata2, 'mappings': ['npm', 'gemspec'], 'indexer_configuration_id': tool2_id, } metadata2_origin = { - 'id': self.origin_url_2, + 'id': data.origin_url_2, 'metadata': metadata2, 'mappings': ['npm', 'gemspec'], 'indexer_configuration_id': tool2_id, - 'from_revision': self.revision_id_2, + 'from_revision': data.revision_id_2, } metadata3 = { '@context': 'foo', } metadata3_rev = { - 'id': self.revision_id_3, + 'id': data.revision_id_3, 'metadata': metadata3, 'mappings': ['npm', 'gemspec'], 'indexer_configuration_id': tool2_id, } metadata3_origin = { - 'id': self.origin_url_3, + 'id': data.origin_url_3, 'metadata': metadata3, 'mappings': ['pkg-info'], 'indexer_configuration_id': tool2_id, - 'from_revision': self.revision_id_3, + 'from_revision': data.revision_id_3, } - self.storage.revision_intrinsic_metadata_add([metadata1_rev]) - self.storage.origin_intrinsic_metadata_add([metadata1_origin]) - self.storage.revision_intrinsic_metadata_add([metadata2_rev]) - self.storage.origin_intrinsic_metadata_add([metadata2_origin]) - self.storage.revision_intrinsic_metadata_add([metadata3_rev]) - self.storage.origin_intrinsic_metadata_add([metadata3_origin]) - - def test_origin_intrinsic_metadata_search_by_producer(self): - self._fill_origin_intrinsic_metadata() - tool1 = self.tools['swh-metadata-detector'] - tool2 = self.tools['swh-metadata-detector2'] - endpoint = self.storage.origin_intrinsic_metadata_search_by_producer + storage.revision_intrinsic_metadata_add([metadata1_rev]) + storage.origin_intrinsic_metadata_add([metadata1_origin]) + storage.revision_intrinsic_metadata_add([metadata2_rev]) + storage.origin_intrinsic_metadata_add([metadata2_origin]) + storage.revision_intrinsic_metadata_add([metadata3_rev]) + storage.origin_intrinsic_metadata_add([metadata3_origin]) + + def test_origin_intrinsic_metadata_search_by_producer( + self, swh_indexer_storage_with_data): + storage, data = swh_indexer_storage_with_data + self._fill_origin_intrinsic_metadata( + swh_indexer_storage_with_data) + tool1 = data.tools['swh-metadata-detector'] + tool2 = data.tools['swh-metadata-detector2'] + endpoint = storage.origin_intrinsic_metadata_search_by_producer # test pagination - self.assertCountEqual( - endpoint(ids_only=True), - [self.origin_url_1, self.origin_url_2, self.origin_url_3]) - self.assertCountEqual( - endpoint(start=self.origin_url_1, ids_only=True), - [self.origin_url_1, self.origin_url_2, self.origin_url_3]) - self.assertCountEqual( - endpoint(start=self.origin_url_1, limit=2, ids_only=True), - [self.origin_url_1, self.origin_url_2]) - self.assertCountEqual( - endpoint(start=self.origin_url_1+'2', ids_only=True), - [self.origin_url_2, self.origin_url_3]) - self.assertCountEqual( - endpoint(start=self.origin_url_1+'2', end=self.origin_url_3[:-1], - ids_only=True), - [self.origin_url_2]) + # no 'page_token' param, return all origins + result = endpoint(ids_only=True) + assert result['origins'] \ + == [data.origin_url_1, data.origin_url_2, data.origin_url_3] + assert 'next_page_token' not in result + + # 'page_token' is < than origin_1, return everything + result = endpoint(page_token=data.origin_url_1[:-1], ids_only=True) + assert result['origins'] \ + == [data.origin_url_1, data.origin_url_2, data.origin_url_3] + assert 'next_page_token' not in result + + # 'page_token' is origin_3, return nothing + result = endpoint(page_token=data.origin_url_3, ids_only=True) + assert not result['origins'] + assert 'next_page_token' not in result + + # test limit argument + result = endpoint(page_token=data.origin_url_1[:-1], + limit=2, ids_only=True) + assert result['origins'] == [data.origin_url_1, data.origin_url_2] + assert result['next_page_token'] == result['origins'][-1] + + result = endpoint(page_token=data.origin_url_1, limit=2, ids_only=True) + assert result['origins'] == [data.origin_url_2, data.origin_url_3] + assert 'next_page_token' not in result + + result = endpoint(page_token=data.origin_url_2, limit=2, ids_only=True) + assert result['origins'] == [data.origin_url_3] + assert 'next_page_token' not in result # test mappings filtering - self.assertCountEqual( - endpoint(mappings=['npm'], ids_only=True), - [self.origin_url_1, self.origin_url_2]) - self.assertCountEqual( - endpoint(mappings=['npm', 'gemspec'], ids_only=True), - [self.origin_url_1, self.origin_url_2]) - self.assertCountEqual( - endpoint(mappings=['gemspec'], ids_only=True), - [self.origin_url_2]) - self.assertCountEqual( - endpoint(mappings=['pkg-info'], ids_only=True), - [self.origin_url_3]) - self.assertCountEqual( - endpoint(mappings=['foobar'], ids_only=True), - []) + result = endpoint(mappings=['npm'], ids_only=True) + assert result['origins'] == [data.origin_url_1, data.origin_url_2] + assert 'next_page_token' not in result + + result = endpoint(mappings=['npm', 'gemspec'], ids_only=True) + assert result['origins'] == [data.origin_url_1, data.origin_url_2] + assert 'next_page_token' not in result + + result = endpoint(mappings=['gemspec'], ids_only=True) + assert result['origins'] == [data.origin_url_2] + assert 'next_page_token' not in result + + result = endpoint(mappings=['pkg-info'], ids_only=True) + assert result['origins'] == [data.origin_url_3] + assert 'next_page_token' not in result + + result = endpoint(mappings=['foobar'], ids_only=True) + assert not result['origins'] + assert 'next_page_token' not in result # test pagination + mappings - self.assertCountEqual( - endpoint(mappings=['npm'], limit=1, ids_only=True), - [self.origin_url_1]) + result = endpoint(mappings=['npm'], limit=1, ids_only=True) + assert result['origins'] == [data.origin_url_1] + assert result['next_page_token'] == result['origins'][-1] # test tool filtering - self.assertCountEqual( - endpoint(tool_ids=[tool1['id']], ids_only=True), - [self.origin_url_1]) - self.assertCountEqual( - endpoint(tool_ids=[tool2['id']], ids_only=True), - [self.origin_url_2, self.origin_url_3]) - self.assertCountEqual( - endpoint(tool_ids=[tool1['id'], tool2['id']], ids_only=True), - [self.origin_url_1, self.origin_url_2, self.origin_url_3]) + result = endpoint(tool_ids=[tool1['id']], ids_only=True) + assert result['origins'] == [data.origin_url_1] + assert 'next_page_token' not in result + + result = endpoint(tool_ids=[tool2['id']], ids_only=True) + assert sorted(result['origins']) \ + == [data.origin_url_2, data.origin_url_3] + assert 'next_page_token' not in result + + result = endpoint(tool_ids=[tool1['id'], tool2['id']], ids_only=True) + assert sorted(result['origins']) \ + == [data.origin_url_1, data.origin_url_2, data.origin_url_3] + assert 'next_page_token' not in result # test ids_only=False - self.assertEqual(list(endpoint(mappings=['gemspec'])), [{ - 'id': self.origin_url_2, - 'metadata': { - '@context': 'foo', - 'author': 'Jane Doe', - }, - 'mappings': ['npm', 'gemspec'], - 'tool': tool2, - 'from_revision': self.revision_id_2, - }]) + assert endpoint(mappings=['gemspec'])['origins'] \ + == [{ + 'id': data.origin_url_2, + 'metadata': { + '@context': 'foo', + 'author': 'Jane Doe', + }, + 'mappings': ['npm', 'gemspec'], + 'tool': tool2, + 'from_revision': data.revision_id_2, + }] - def test_origin_intrinsic_metadata_stats(self): - self._fill_origin_intrinsic_metadata() + def test_origin_intrinsic_metadata_stats( + self, swh_indexer_storage_with_data): + storage, data = swh_indexer_storage_with_data + self._fill_origin_intrinsic_metadata( + swh_indexer_storage_with_data) - result = self.storage.origin_intrinsic_metadata_stats() - self.assertEqual(result, { + result = storage.origin_intrinsic_metadata_stats() + assert result == { 'per_mapping': { 'gemspec': 1, 'npm': 2, 'pkg-info': 1, 'codemeta': 0, 'maven': 0, }, 'total': 3, 'non_empty': 2, - }) + } + - def test_indexer_configuration_add(self): +class TestIndexerStorageIndexerCondifuration: + def test_indexer_configuration_add( + self, swh_indexer_storage_with_data): + storage, data = swh_indexer_storage_with_data tool = { 'tool_name': 'some-unknown-tool', 'tool_version': 'some-version', 'tool_configuration': {"debian-package": "some-package"}, } - actual_tool = self.storage.indexer_configuration_get(tool) - self.assertIsNone(actual_tool) # does not exist + actual_tool = storage.indexer_configuration_get(tool) + assert actual_tool is None # does not exist # add it - actual_tools = list(self.storage.indexer_configuration_add([tool])) + actual_tools = list(storage.indexer_configuration_add([tool])) - self.assertEqual(len(actual_tools), 1) + assert len(actual_tools) == 1 actual_tool = actual_tools[0] - self.assertIsNotNone(actual_tool) # now it exists + assert actual_tool is not None # now it exists new_id = actual_tool.pop('id') - self.assertEqual(actual_tool, tool) + assert actual_tool == tool - actual_tools2 = list(self.storage.indexer_configuration_add([tool])) + actual_tools2 = list(storage.indexer_configuration_add([tool])) actual_tool2 = actual_tools2[0] - self.assertIsNotNone(actual_tool2) # now it exists + assert actual_tool2 is not None # now it exists new_id2 = actual_tool2.pop('id') - self.assertEqual(new_id, new_id2) - self.assertEqual(actual_tool, actual_tool2) + assert new_id == new_id2 + assert actual_tool == actual_tool2 - def test_indexer_configuration_add_multiple(self): + def test_indexer_configuration_add_multiple( + self, swh_indexer_storage_with_data): + storage, data = swh_indexer_storage_with_data tool = { 'tool_name': 'some-unknown-tool', 'tool_version': 'some-version', 'tool_configuration': {"debian-package": "some-package"}, } - actual_tools = list(self.storage.indexer_configuration_add([tool])) - self.assertEqual(len(actual_tools), 1) + actual_tools = list(storage.indexer_configuration_add([tool])) + assert len(actual_tools) == 1 new_tools = [tool, { 'tool_name': 'yet-another-tool', 'tool_version': 'version', 'tool_configuration': {}, }] - actual_tools = list(self.storage.indexer_configuration_add(new_tools)) - self.assertEqual(len(actual_tools), 2) + actual_tools = list(storage.indexer_configuration_add(new_tools)) + assert len(actual_tools) == 2 # order not guaranteed, so we iterate over results to check for tool in actual_tools: _id = tool.pop('id') - self.assertIsNotNone(_id) - self.assertIn(tool, new_tools) + assert _id is not None + assert tool in new_tools - def test_indexer_configuration_get_missing(self): + def test_indexer_configuration_get_missing( + self, swh_indexer_storage_with_data): + storage, data = swh_indexer_storage_with_data tool = { 'tool_name': 'unknown-tool', 'tool_version': '3.1.0rc2-31-ga2cbb8c', 'tool_configuration': {"command_line": "nomossa "}, } - actual_tool = self.storage.indexer_configuration_get(tool) + actual_tool = storage.indexer_configuration_get(tool) - self.assertIsNone(actual_tool) + assert actual_tool is None - def test_indexer_configuration_get(self): + def test_indexer_configuration_get( + self, swh_indexer_storage_with_data): + storage, data = swh_indexer_storage_with_data tool = { 'tool_name': 'nomos', 'tool_version': '3.1.0rc2-31-ga2cbb8c', 'tool_configuration': {"command_line": "nomossa "}, } - self.storage.indexer_configuration_add([tool]) - actual_tool = self.storage.indexer_configuration_get(tool) + actual_tool = storage.indexer_configuration_get(tool) + assert actual_tool expected_tool = tool.copy() del actual_tool['id'] - self.assertEqual(expected_tool, actual_tool) + assert expected_tool == actual_tool - def test_indexer_configuration_metadata_get_missing_context(self): + def test_indexer_configuration_metadata_get_missing_context( + self, swh_indexer_storage_with_data): + storage, data = swh_indexer_storage_with_data tool = { 'tool_name': 'swh-metadata-translator', 'tool_version': '0.0.1', 'tool_configuration': {"context": "unknown-context"}, } - actual_tool = self.storage.indexer_configuration_get(tool) + actual_tool = storage.indexer_configuration_get(tool) - self.assertIsNone(actual_tool) + assert actual_tool is None - def test_indexer_configuration_metadata_get(self): + def test_indexer_configuration_metadata_get( + self, swh_indexer_storage_with_data): + storage, data = swh_indexer_storage_with_data tool = { 'tool_name': 'swh-metadata-translator', 'tool_version': '0.0.1', 'tool_configuration': {"type": "local", "context": "NpmMapping"}, } - self.storage.indexer_configuration_add([tool]) - actual_tool = self.storage.indexer_configuration_get(tool) + storage.indexer_configuration_add([tool]) + actual_tool = storage.indexer_configuration_get(tool) + assert actual_tool expected_tool = tool.copy() expected_tool['id'] = actual_tool['id'] - self.assertEqual(expected_tool, actual_tool) - - @pytest.mark.property_based - def test_generate_content_mimetype_get_range_limit_none(self): - """mimetype_get_range call with wrong limit input should fail""" - with self.assertRaises(ValueError) as e: - self.storage.content_mimetype_get_range( - start=None, end=None, indexer_configuration_id=None, - limit=None) - - self.assertEqual(e.exception.args, ( - 'Development error: limit should not be None',)) - - @pytest.mark.property_based - @given(gen_content_mimetypes(min_size=1, max_size=4)) - def test_generate_content_mimetype_get_range_no_limit(self, mimetypes): - """mimetype_get_range returns mimetypes within range provided""" - self.reset_storage_tables() - # add mimetypes to storage - self.storage.content_mimetype_add(mimetypes) - - # All ids from the db - content_ids = sorted([c['id'] for c in mimetypes]) - - start = content_ids[0] - end = content_ids[-1] - - # retrieve mimetypes - tool_id = mimetypes[0]['indexer_configuration_id'] - actual_result = self.storage.content_mimetype_get_range( - start, end, indexer_configuration_id=tool_id) - - actual_ids = actual_result['ids'] - actual_next = actual_result['next'] - - self.assertEqual(len(mimetypes), len(actual_ids)) - self.assertIsNone(actual_next) - self.assertEqual(content_ids, actual_ids) - - @pytest.mark.property_based - @given(gen_content_mimetypes(min_size=4, max_size=4)) - def test_generate_content_mimetype_get_range_limit(self, mimetypes): - """mimetype_get_range paginates results if limit exceeded""" - self.reset_storage_tables() - - # add mimetypes to storage - self.storage.content_mimetype_add(mimetypes) - - # input the list of sha1s we want from storage - content_ids = sorted([c['id'] for c in mimetypes]) - start = content_ids[0] - end = content_ids[-1] - - # retrieve mimetypes limited to 3 results - limited_results = len(mimetypes) - 1 - tool_id = mimetypes[0]['indexer_configuration_id'] - actual_result = self.storage.content_mimetype_get_range( - start, end, - indexer_configuration_id=tool_id, limit=limited_results) - - actual_ids = actual_result['ids'] - actual_next = actual_result['next'] - - self.assertEqual(limited_results, len(actual_ids)) - self.assertIsNotNone(actual_next) - self.assertEqual(actual_next, content_ids[-1]) - - expected_mimetypes = content_ids[:-1] - self.assertEqual(expected_mimetypes, actual_ids) - - # retrieve next part - actual_results2 = self.storage.content_mimetype_get_range( - start=end, end=end, indexer_configuration_id=tool_id) - actual_ids2 = actual_results2['ids'] - actual_next2 = actual_results2['next'] - - self.assertIsNone(actual_next2) - expected_mimetypes2 = [content_ids[-1]] - self.assertEqual(expected_mimetypes2, actual_ids2) + assert expected_tool == actual_tool - @pytest.mark.property_based - def test_generate_content_fossology_license_get_range_limit_none(self): - """license_get_range call with wrong limit input should fail""" - with self.assertRaises(ValueError) as e: - self.storage.content_fossology_license_get_range( - start=None, end=None, indexer_configuration_id=None, - limit=None) - - self.assertEqual(e.exception.args, ( - 'Development error: limit should not be None',)) - - @pytest.mark.property_based - def prepare_mimetypes_from(self, fossology_licenses): - """Fossology license needs some consistent data in db to run. - - """ - mimetypes = [] - for c in fossology_licenses: - mimetypes.append({ - 'id': c['id'], - 'mimetype': 'text/plain', - 'encoding': 'utf-8', - 'indexer_configuration_id': c['indexer_configuration_id'], - }) - return mimetypes - - @pytest.mark.property_based - @given(gen_content_fossology_licenses(min_size=1, max_size=4)) - def test_generate_content_fossology_license_get_range_no_limit( - self, fossology_licenses): - """license_get_range returns licenses within range provided""" - self.reset_storage_tables() - # craft some consistent mimetypes - mimetypes = self.prepare_mimetypes_from(fossology_licenses) - - self.storage.content_mimetype_add(mimetypes) - # add fossology_licenses to storage - self.storage.content_fossology_license_add(fossology_licenses) - - # All ids from the db - content_ids = sorted([c['id'] for c in fossology_licenses]) - - start = content_ids[0] - end = content_ids[-1] - - # retrieve fossology_licenses - tool_id = fossology_licenses[0]['indexer_configuration_id'] - actual_result = self.storage.content_fossology_license_get_range( - start, end, indexer_configuration_id=tool_id) - - actual_ids = actual_result['ids'] - actual_next = actual_result['next'] - - self.assertEqual(len(fossology_licenses), len(actual_ids)) - self.assertIsNone(actual_next) - self.assertEqual(content_ids, actual_ids) - - @pytest.mark.property_based - @given(gen_content_fossology_licenses(min_size=1, max_size=4), - gen_content_mimetypes(min_size=1, max_size=1)) - def test_generate_content_fossology_license_get_range_no_limit_with_filter( - self, fossology_licenses, mimetypes): - """This filters non textual, then returns results within range""" - self.reset_storage_tables() - - # craft some consistent mimetypes - _mimetypes = self.prepare_mimetypes_from(fossology_licenses) - # add binary mimetypes which will get filtered out in results - for m in mimetypes: - _mimetypes.append({ - 'mimetype': 'binary', - **m, - }) - - self.storage.content_mimetype_add(_mimetypes) - # add fossology_licenses to storage - self.storage.content_fossology_license_add(fossology_licenses) - - # All ids from the db - content_ids = sorted([c['id'] for c in fossology_licenses]) - - start = content_ids[0] - end = content_ids[-1] - - # retrieve fossology_licenses - tool_id = fossology_licenses[0]['indexer_configuration_id'] - actual_result = self.storage.content_fossology_license_get_range( - start, end, indexer_configuration_id=tool_id) - - actual_ids = actual_result['ids'] - actual_next = actual_result['next'] - - self.assertEqual(len(fossology_licenses), len(actual_ids)) - self.assertIsNone(actual_next) - self.assertEqual(content_ids, actual_ids) - - @pytest.mark.property_based - @given(gen_content_fossology_licenses(min_size=4, max_size=4)) - def test_generate_fossology_license_get_range_limit( - self, fossology_licenses): - """fossology_license_get_range paginates results if limit exceeded""" - self.reset_storage_tables() - # craft some consistent mimetypes - mimetypes = self.prepare_mimetypes_from(fossology_licenses) - - # add fossology_licenses to storage - self.storage.content_mimetype_add(mimetypes) - self.storage.content_fossology_license_add(fossology_licenses) - - # input the list of sha1s we want from storage - content_ids = sorted([c['id'] for c in fossology_licenses]) - start = content_ids[0] - end = content_ids[-1] - - # retrieve fossology_licenses limited to 3 results - limited_results = len(fossology_licenses) - 1 - tool_id = fossology_licenses[0]['indexer_configuration_id'] - actual_result = self.storage.content_fossology_license_get_range( - start, end, - indexer_configuration_id=tool_id, limit=limited_results) - - actual_ids = actual_result['ids'] - actual_next = actual_result['next'] - - self.assertEqual(limited_results, len(actual_ids)) - self.assertIsNotNone(actual_next) - self.assertEqual(actual_next, content_ids[-1]) - - expected_fossology_licenses = content_ids[:-1] - self.assertEqual(expected_fossology_licenses, actual_ids) - - # retrieve next part - actual_results2 = self.storage.content_fossology_license_get_range( - start=end, end=end, indexer_configuration_id=tool_id) - actual_ids2 = actual_results2['ids'] - actual_next2 = actual_results2['next'] - - self.assertIsNone(actual_next2) - expected_fossology_licenses2 = [content_ids[-1]] - self.assertEqual(expected_fossology_licenses2, actual_ids2) - - -@pytest.mark.db -class IndexerTestStorage(CommonTestStorage, BasePgTestStorage, - unittest.TestCase): - """Running the tests locally. - - For the client api tests (remote storage), see - `class`:swh.indexer.storage.test_api_client:TestRemoteStorage - class. +class TestIndexerStorageMisc: + """Misc endpoints tests for the IndexerStorage. """ - pass - -def test_mapping_names(): - assert set(MAPPING_NAMES) == {m.name for m in MAPPINGS.values()} + def test_check_config(self, swh_indexer_storage): + storage = swh_indexer_storage + assert storage.check_config(check_write=True) + assert storage.check_config(check_write=False) diff --git a/version.txt b/version.txt index 88600e0..5c7fd84 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -v0.0.156-0-gd27cd13 \ No newline at end of file +v0.0.157-0-g01f1c38 \ No newline at end of file