diff --git a/MANIFEST.in b/MANIFEST.in index cfe6e5b..c6e3a9a 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,9 +1,8 @@ include README.md include Makefile -include requirements.txt -include requirements-swh.txt +include requirements*.txt include version.txt recursive-include sql * recursive-include swh/indexer/sql *.sql recursive-include swh/indexer/data * recursive-include swh py.typed diff --git a/PKG-INFO b/PKG-INFO index c90f7a1..0af4326 100644 --- a/PKG-INFO +++ b/PKG-INFO @@ -1,69 +1,69 @@ Metadata-Version: 2.1 Name: swh.indexer -Version: 0.0.158 +Version: 0.0.159 Summary: Software Heritage Content Indexer Home-page: https://forge.softwareheritage.org/diffusion/78/ Author: Software Heritage developers Author-email: swh-devel@inria.fr License: UNKNOWN Project-URL: Bug Reports, https://forge.softwareheritage.org/maniphest Project-URL: Funding, https://www.softwareheritage.org/donate Project-URL: Source, https://forge.softwareheritage.org/source/swh-indexer Description: swh-indexer ============ Tools to compute multiple indexes on SWH's raw contents: - content: - mimetype - ctags - language - fossology-license - metadata - revision: - metadata An indexer is in charge of: - looking up objects - extracting information from those objects - store those information in the swh-indexer db There are multiple indexers working on different object types: - content indexer: works with content sha1 hashes - revision indexer: works with revision sha1 hashes - origin indexer: works with origin identifiers Indexation procedure: - receive batch of ids - retrieve the associated data depending on object type - compute for that object some index - store the result to swh's storage Current content indexers: - mimetype (queue swh_indexer_content_mimetype): detect the encoding and mimetype - language (queue swh_indexer_content_language): detect the programming language - ctags (queue swh_indexer_content_ctags): compute tags information - fossology-license (queue swh_indexer_fossology_license): compute the license - metadata: translate file into translated_metadata dict Current revision indexers: - metadata: detects files containing metadata and retrieves translated_metadata in content_metadata table in storage or run content indexer to translate files. Platform: UNKNOWN Classifier: Programming Language :: Python :: 3 Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3) Classifier: Operating System :: OS Independent Classifier: Development Status :: 5 - Production/Stable Description-Content-Type: text/markdown Provides-Extra: testing diff --git a/requirements-swh.txt b/requirements-swh.txt index 330716a..c49aa91 100644 --- a/requirements-swh.txt +++ b/requirements-swh.txt @@ -1,6 +1,6 @@ -swh.core[db,http] >= 0.0.65 +swh.core[db,http] >= 0.0.87 swh.model >= 0.0.15 swh.objstorage >= 0.0.28 swh.scheduler >= 0.0.47 swh.storage >= 0.0.156 swh.journal >= 0.0.17 diff --git a/requirements-test.txt b/requirements-test.txt new file mode 100644 index 0000000..96c1f1d --- /dev/null +++ b/requirements-test.txt @@ -0,0 +1,3 @@ +pytest +pytest-postgresql +hypothesis>=3.11.0 diff --git a/sql/doc/json/revision_metadata.translated_metadata.json b/sql/doc/json/revision_metadata.translated_metadata.json index 4b6814d..4041b2c 100644 --- a/sql/doc/json/revision_metadata.translated_metadata.json +++ b/sql/doc/json/revision_metadata.translated_metadata.json @@ -1,56 +1,56 @@ { "$schema": "http://json-schema.org/schema#", "id": "http://softwareheritage.org/schemas/revision_metadata.translated_metadata.schema.json", "type": "object", "properties": { "developmentStatus": { "type": "list" }, "version": { "type": "list" }, "operatingSystem": { "type": "list" }, "description": { "type": "list" }, "keywords": { "type": "list" }, "issueTracker": { "type": "list" }, "name": { "type": "list" }, "author": { "type": "list" }, "relatedLink": { "type": "list" }, "url": { "type": "list" }, "license": { "type": "list" }, "maintainer": { "type": "list" }, "email": { "type": "list" }, "softwareRequirements": { "type": "list" }, "identifier": { "type": "list" }, "codeRepository": { "type": "list" - }, + } } } diff --git a/sql/json/revision_metadata.translated_metadata.json b/sql/json/revision_metadata.translated_metadata.json index 4b6814d..4041b2c 100644 --- a/sql/json/revision_metadata.translated_metadata.json +++ b/sql/json/revision_metadata.translated_metadata.json @@ -1,56 +1,56 @@ { "$schema": "http://json-schema.org/schema#", "id": "http://softwareheritage.org/schemas/revision_metadata.translated_metadata.schema.json", "type": "object", "properties": { "developmentStatus": { "type": "list" }, "version": { "type": "list" }, "operatingSystem": { "type": "list" }, "description": { "type": "list" }, "keywords": { "type": "list" }, "issueTracker": { "type": "list" }, "name": { "type": "list" }, "author": { "type": "list" }, "relatedLink": { "type": "list" }, "url": { "type": "list" }, "license": { "type": "list" }, "maintainer": { "type": "list" }, "email": { "type": "list" }, "softwareRequirements": { "type": "list" }, "identifier": { "type": "list" }, "codeRepository": { "type": "list" - }, + } } } diff --git a/swh.indexer.egg-info/PKG-INFO b/swh.indexer.egg-info/PKG-INFO index c90f7a1..0af4326 100644 --- a/swh.indexer.egg-info/PKG-INFO +++ b/swh.indexer.egg-info/PKG-INFO @@ -1,69 +1,69 @@ Metadata-Version: 2.1 Name: swh.indexer -Version: 0.0.158 +Version: 0.0.159 Summary: Software Heritage Content Indexer Home-page: https://forge.softwareheritage.org/diffusion/78/ Author: Software Heritage developers Author-email: swh-devel@inria.fr License: UNKNOWN Project-URL: Bug Reports, https://forge.softwareheritage.org/maniphest Project-URL: Funding, https://www.softwareheritage.org/donate Project-URL: Source, https://forge.softwareheritage.org/source/swh-indexer Description: swh-indexer ============ Tools to compute multiple indexes on SWH's raw contents: - content: - mimetype - ctags - language - fossology-license - metadata - revision: - metadata An indexer is in charge of: - looking up objects - extracting information from those objects - store those information in the swh-indexer db There are multiple indexers working on different object types: - content indexer: works with content sha1 hashes - revision indexer: works with revision sha1 hashes - origin indexer: works with origin identifiers Indexation procedure: - receive batch of ids - retrieve the associated data depending on object type - compute for that object some index - store the result to swh's storage Current content indexers: - mimetype (queue swh_indexer_content_mimetype): detect the encoding and mimetype - language (queue swh_indexer_content_language): detect the programming language - ctags (queue swh_indexer_content_ctags): compute tags information - fossology-license (queue swh_indexer_fossology_license): compute the license - metadata: translate file into translated_metadata dict Current revision indexers: - metadata: detects files containing metadata and retrieves translated_metadata in content_metadata table in storage or run content indexer to translate files. Platform: UNKNOWN Classifier: Programming Language :: Python :: 3 Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3) Classifier: Operating System :: OS Independent Classifier: Development Status :: 5 - Production/Stable Description-Content-Type: text/markdown Provides-Extra: testing diff --git a/swh.indexer.egg-info/SOURCES.txt b/swh.indexer.egg-info/SOURCES.txt index 569109d..65ab8d4 100644 --- a/swh.indexer.egg-info/SOURCES.txt +++ b/swh.indexer.egg-info/SOURCES.txt @@ -1,95 +1,97 @@ MANIFEST.in Makefile README.md requirements-swh.txt +requirements-test.txt requirements.txt setup.py version.txt sql/bin/db-upgrade sql/bin/dot_add_content sql/doc/json/.gitignore sql/doc/json/Makefile sql/doc/json/indexer_configuration.tool_configuration.schema.json sql/doc/json/revision_metadata.translated_metadata.json sql/json/.gitignore sql/json/Makefile sql/json/indexer_configuration.tool_configuration.schema.json sql/json/revision_metadata.translated_metadata.json sql/upgrades/115.sql sql/upgrades/116.sql sql/upgrades/117.sql sql/upgrades/118.sql sql/upgrades/119.sql sql/upgrades/120.sql sql/upgrades/121.sql sql/upgrades/122.sql sql/upgrades/123.sql sql/upgrades/124.sql sql/upgrades/125.sql sql/upgrades/126.sql sql/upgrades/127.sql swh/__init__.py swh.indexer.egg-info/PKG-INFO swh.indexer.egg-info/SOURCES.txt swh.indexer.egg-info/dependency_links.txt swh.indexer.egg-info/entry_points.txt swh.indexer.egg-info/requires.txt swh.indexer.egg-info/top_level.txt swh/indexer/__init__.py swh/indexer/cli.py swh/indexer/codemeta.py swh/indexer/ctags.py swh/indexer/fossology_license.py swh/indexer/indexer.py swh/indexer/journal_client.py swh/indexer/metadata.py swh/indexer/metadata_detector.py swh/indexer/mimetype.py swh/indexer/origin_head.py swh/indexer/py.typed swh/indexer/rehash.py swh/indexer/tasks.py swh/indexer/data/codemeta/CITATION swh/indexer/data/codemeta/LICENSE swh/indexer/data/codemeta/codemeta.jsonld swh/indexer/data/codemeta/crosswalk.csv swh/indexer/metadata_dictionary/__init__.py swh/indexer/metadata_dictionary/base.py swh/indexer/metadata_dictionary/codemeta.py swh/indexer/metadata_dictionary/maven.py swh/indexer/metadata_dictionary/npm.py swh/indexer/metadata_dictionary/python.py swh/indexer/metadata_dictionary/ruby.py swh/indexer/sql/10-swh-init.sql swh/indexer/sql/20-swh-enums.sql swh/indexer/sql/30-swh-schema.sql swh/indexer/sql/40-swh-func.sql swh/indexer/sql/50-swh-data.sql swh/indexer/sql/60-swh-indexes.sql swh/indexer/storage/__init__.py swh/indexer/storage/converters.py swh/indexer/storage/db.py swh/indexer/storage/in_memory.py +swh/indexer/storage/interface.py swh/indexer/storage/api/__init__.py swh/indexer/storage/api/client.py swh/indexer/storage/api/server.py swh/indexer/tests/__init__.py swh/indexer/tests/conftest.py swh/indexer/tests/tasks.py swh/indexer/tests/test_cli.py swh/indexer/tests/test_ctags.py swh/indexer/tests/test_fossology_license.py swh/indexer/tests/test_journal_client.py swh/indexer/tests/test_metadata.py swh/indexer/tests/test_mimetype.py swh/indexer/tests/test_origin_head.py swh/indexer/tests/test_origin_metadata.py swh/indexer/tests/utils.py swh/indexer/tests/storage/__init__.py swh/indexer/tests/storage/conftest.py swh/indexer/tests/storage/generate_data_test.py swh/indexer/tests/storage/test_api_client.py swh/indexer/tests/storage/test_converters.py swh/indexer/tests/storage/test_in_memory.py swh/indexer/tests/storage/test_server.py swh/indexer/tests/storage/test_storage.py \ No newline at end of file diff --git a/swh.indexer.egg-info/requires.txt b/swh.indexer.egg-info/requires.txt index ebdf5b3..8940ea6 100644 --- a/swh.indexer.egg-info/requires.txt +++ b/swh.indexer.egg-info/requires.txt @@ -1,16 +1,16 @@ vcversioner click python-magic>=0.4.13 pyld xmltodict -swh.core[db,http]>=0.0.65 +swh.core[db,http]>=0.0.87 swh.model>=0.0.15 swh.objstorage>=0.0.28 swh.scheduler>=0.0.47 swh.storage>=0.0.156 swh.journal>=0.0.17 [testing] -pytest<4 +pytest pytest-postgresql hypothesis>=3.11.0 diff --git a/swh/indexer/codemeta.py b/swh/indexer/codemeta.py index d02cdf2..756cb4f 100644 --- a/swh/indexer/codemeta.py +++ b/swh/indexer/codemeta.py @@ -1,147 +1,158 @@ # Copyright (C) 2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import collections import csv import itertools import json import os.path import re import swh.indexer from pyld import jsonld _DATA_DIR = os.path.join(os.path.dirname(swh.indexer.__file__), 'data') CROSSWALK_TABLE_PATH = os.path.join(_DATA_DIR, 'codemeta', 'crosswalk.csv') CODEMETA_CONTEXT_PATH = os.path.join(_DATA_DIR, 'codemeta', 'codemeta.jsonld') with open(CODEMETA_CONTEXT_PATH) as fd: CODEMETA_CONTEXT = json.load(fd) CODEMETA_CONTEXT_URL = 'https://doi.org/10.5063/schema/codemeta-2.0' CODEMETA_ALTERNATE_CONTEXT_URLS = { ('https://raw.githubusercontent.com/codemeta/codemeta/' 'master/codemeta.jsonld') } CODEMETA_URI = 'https://codemeta.github.io/terms/' SCHEMA_URI = 'http://schema.org/' PROPERTY_BLACKLIST = { # CodeMeta properties that we cannot properly represent. SCHEMA_URI + 'softwareRequirements', CODEMETA_URI + 'softwareSuggestions', # Duplicate of 'author' SCHEMA_URI + 'creator', } _codemeta_field_separator = re.compile(r'\s*[,/]\s*') def make_absolute_uri(local_name): definition = CODEMETA_CONTEXT['@context'][local_name] if isinstance(definition, str): return definition elif isinstance(definition, dict): prefixed_name = definition['@id'] (prefix, local_name) = prefixed_name.split(':') if prefix == 'schema': canonical_name = SCHEMA_URI + local_name elif prefix == 'codemeta': canonical_name = CODEMETA_URI + local_name else: assert False, prefix return canonical_name else: assert False, definition def _read_crosstable(fd): reader = csv.reader(fd) try: header = next(reader) except StopIteration: raise ValueError('empty file') data_sources = set(header) - {'Parent Type', 'Property', 'Type', 'Description'} assert 'codemeta-V1' in data_sources codemeta_translation = {data_source: {} for data_source in data_sources} terms = set() for line in reader: # For each canonical name local_name = dict(zip(header, line))['Property'] if not local_name: continue canonical_name = make_absolute_uri(local_name) if canonical_name in PROPERTY_BLACKLIST: continue terms.add(canonical_name) for (col, value) in zip(header, line): # For each cell in the row if col in data_sources: # If that's not the parentType/property/type/description for local_name in _codemeta_field_separator.split(value): # For each of the data source's properties that maps # to this canonical name if local_name.strip(): codemeta_translation[col][local_name.strip()] = \ canonical_name return (terms, codemeta_translation) with open(CROSSWALK_TABLE_PATH) as fd: (CODEMETA_TERMS, CROSSWALK_TABLE) = _read_crosstable(fd) def _document_loader(url): """Document loader for pyld. Reads the local codemeta.jsonld file instead of fetching it from the Internet every single time.""" if url == CODEMETA_CONTEXT_URL or url in CODEMETA_ALTERNATE_CONTEXT_URLS: return { 'contextUrl': None, 'documentUrl': url, 'document': CODEMETA_CONTEXT, } elif url == CODEMETA_URI: raise Exception('{} is CodeMeta\'s URI, use {} as context url'.format( CODEMETA_URI, CODEMETA_CONTEXT_URL)) else: raise Exception(url) def compact(doc): """Same as `pyld.jsonld.compact`, but in the context of CodeMeta.""" return jsonld.compact(doc, CODEMETA_CONTEXT_URL, options={'documentLoader': _document_loader}) def expand(doc): """Same as `pyld.jsonld.expand`, but in the context of CodeMeta.""" return jsonld.expand(doc, options={'documentLoader': _document_loader}) def merge_documents(documents): """Takes a list of metadata dicts, each generated from a different metadata file, and merges them. Removes duplicates, if any.""" documents = list(itertools.chain.from_iterable(map(expand, documents))) merged_document = collections.defaultdict(list) for document in documents: for (key, values) in document.items(): - for value in values: - if value not in merged_document[key]: - merged_document[key].append(value) + if key == '@id': + # @id does not get expanded to a list + value = values + + # Only one @id is allowed, move it to sameAs + if '@id' not in merged_document: + merged_document['@id'] = value + elif value != merged_document['@id']: + if value not in merged_document[SCHEMA_URI + 'sameAs']: + merged_document[SCHEMA_URI + 'sameAs'].append(value) + else: + for value in values: + if value not in merged_document[key]: + merged_document[key].append(value) return compact(merged_document) diff --git a/swh/indexer/data/codemeta/codemeta.jsonld b/swh/indexer/data/codemeta/codemeta.jsonld index ecba88b..5e19122 100644 --- a/swh/indexer/data/codemeta/codemeta.jsonld +++ b/swh/indexer/data/codemeta/codemeta.jsonld @@ -1,80 +1,80 @@ { "@context": { "type": "@type", "id": "@id", "schema":"http://schema.org/", "codemeta": "https://codemeta.github.io/terms/", "Organization": {"@id": "schema:Organization"}, "Person": {"@id": "schema:Person"}, "SoftwareSourceCode": {"@id": "schema:SoftwareSourceCode"}, "SoftwareApplication": {"@id": "schema:SoftwareApplication"}, "Text": {"@id": "schema:Text"}, "URL": {"@id": "schema:URL"}, "address": { "@id": "schema:address"}, "affiliation": { "@id": "schema:affiliation"}, "applicationCategory": { "@id": "schema:applicationCategory", "@type": "@id"}, "applicationSubCategory": { "@id": "schema:applicationSubCategory", "@type": "@id"}, "citation": { "@id": "schema:citation"}, "codeRepository": { "@id": "schema:codeRepository", "@type": "@id"}, "contributor": { "@id": "schema:contributor"}, "copyrightHolder": { "@id": "schema:copyrightHolder"}, "copyrightYear": { "@id": "schema:copyrightYear"}, "creator": { "@id": "schema:creator"}, "dateCreated": {"@id": "schema:dateCreated", "@type": "schema:Date" }, "dateModified": {"@id": "schema:dateModified", "@type": "schema:Date" }, "datePublished": {"@id": "schema:datePublished", "@type": "schema:Date" }, "description": { "@id": "schema:description"}, "downloadUrl": { "@id": "schema:downloadUrl", "@type": "@id"}, "email": { "@id": "schema:email"}, "editor": { "@id": "schema:editor"}, "encoding": { "@id": "schema:encoding"}, "familyName": { "@id": "schema:familyName"}, "fileFormat": { "@id": "schema:fileFormat", "@type": "@id"}, "fileSize": { "@id": "schema:fileSize"}, "funder": { "@id": "schema:funder"}, "givenName": { "@id": "schema:givenName"}, "hasPart": { "@id": "schema:hasPart" }, "identifier": { "@id": "schema:identifier", "@type": "@id"}, "installUrl": { "@id": "schema:installUrl", "@type": "@id"}, "isAccessibleForFree": { "@id": "schema:isAccessibleForFree"}, "isPartOf": { "@id": "schema:isPartOf"}, "keywords": { "@id": "schema:keywords"}, "license": { "@id": "schema:license", "@type": "@id"}, "memoryRequirements": { "@id": "schema:memoryRequirements", "@type": "@id"}, "name": { "@id": "schema:name"}, "operatingSystem": { "@id": "schema:operatingSystem"}, "permissions": { "@id": "schema:permissions"}, "position": { "@id": "schema:position"}, "processorRequirements": { "@id": "schema:processorRequirements"}, "producer": { "@id": "schema:producer"}, "programmingLanguage": { "@id": "schema:programmingLanguage"}, "provider": { "@id": "schema:provider"}, "publisher": { "@id": "schema:publisher"}, "relatedLink": { "@id": "schema:relatedLink", "@type": "@id"}, "releaseNotes": { "@id": "schema:releaseNotes", "@type": "@id"}, "runtimePlatform": { "@id": "schema:runtimePlatform"}, "sameAs": { "@id": "schema:sameAs", "@type": "@id"}, "softwareHelp": { "@id": "schema:softwareHelp"}, "softwareRequirements": { "@id": "schema:softwareRequirements", "@type": "@id"}, "softwareVersion": { "@id": "schema:softwareVersion"}, "sponsor": { "@id": "schema:sponsor"}, "storageRequirements": { "@id": "schema:storageRequirements", "@type": "@id"}, "supportingData": { "@id": "schema:supportingData"}, "targetProduct": { "@id": "schema:targetProduct"}, "url": { "@id": "schema:url", "@type": "@id"}, "version": { "@id": "schema:version"}, - + "author": { "@id": "schema:author", "@container": "@list" }, - + "softwareSuggestions": { "@id": "codemeta:softwareSuggestions", "@type": "@id"}, "contIntegration": { "@id": "codemeta:contIntegration", "@type": "@id"}, "buildInstructions": { "@id": "codemeta:buildInstructions", "@type": "@id"}, "developmentStatus": { "@id": "codemeta:developmentStatus", "@type": "@id"}, "embargoDate": { "@id":"codemeta:embargoDate", "@type": "schema:Date" }, "funding": { "@id": "codemeta:funding" }, "readme": { "@id":"codemeta:readme", "@type": "@id" }, "issueTracker": { "@id":"codemeta:issueTracker", "@type": "@id" }, "referencePublication": { "@id": "codemeta:referencePublication", "@type": "@id"}, "maintainer": { "@id": "codemeta:maintainer" } } } diff --git a/swh/indexer/metadata_dictionary/ruby.py b/swh/indexer/metadata_dictionary/ruby.py index 8d5b4a7..ddf0151 100644 --- a/swh/indexer/metadata_dictionary/ruby.py +++ b/swh/indexer/metadata_dictionary/ruby.py @@ -1,117 +1,124 @@ # Copyright (C) 2018-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import ast import itertools import re -from swh.indexer.codemeta import CROSSWALK_TABLE +from swh.indexer.codemeta import CROSSWALK_TABLE, SCHEMA_URI from .base import DictMapping +def name_to_person(name): + return { + '@type': SCHEMA_URI + 'Person', + SCHEMA_URI + 'name': name, + } + + class GemspecMapping(DictMapping): name = 'gemspec' mapping = CROSSWALK_TABLE['Ruby Gem'] string_fields = ['name', 'version', 'description', 'summary', 'email'] _re_spec_new = re.compile(r'.*Gem::Specification.new +(do|\{) +\|.*\|.*') _re_spec_entry = re.compile(r'\s*\w+\.(?P\w+)\s*=\s*(?P.*)') @classmethod def detect_metadata_files(cls, file_entries): for entry in file_entries: if entry['name'].endswith(b'.gemspec'): return [entry['sha1']] return [] def translate(self, raw_content): try: raw_content = raw_content.decode() except UnicodeDecodeError: self.log.warning('Error unidecoding from %s', self.log_suffix) return # Skip lines before 'Gem::Specification.new' lines = itertools.dropwhile( lambda x: not self._re_spec_new.match(x), raw_content.split('\n')) try: next(lines) # Consume 'Gem::Specification.new' except StopIteration: self.log.warning('Could not find Gem::Specification in %s', self.log_suffix) return content_dict = {} for line in lines: match = self._re_spec_entry.match(line) if match: value = self.eval_ruby_expression(match.group('expr')) if value: content_dict[match.group('key')] = value return self._translate_dict(content_dict) def eval_ruby_expression(self, expr): """Very simple evaluator of Ruby expressions. >>> GemspecMapping().eval_ruby_expression('"Foo bar"') 'Foo bar' >>> GemspecMapping().eval_ruby_expression("'Foo bar'") 'Foo bar' >>> GemspecMapping().eval_ruby_expression("['Foo', 'bar']") ['Foo', 'bar'] >>> GemspecMapping().eval_ruby_expression("'Foo bar'.freeze") 'Foo bar' >>> GemspecMapping().eval_ruby_expression( \ "['Foo'.freeze, 'bar'.freeze]") ['Foo', 'bar'] """ def evaluator(node): if isinstance(node, ast.Str): return node.s elif isinstance(node, ast.List): res = [] for element in node.elts: val = evaluator(element) if not val: return res.append(val) return res expr = expr.replace('.freeze', '') try: # We're parsing Ruby expressions here, but Python's # ast.parse works for very simple Ruby expressions # (mainly strings delimited with " or ', and lists # of such strings). tree = ast.parse(expr, mode='eval') except (SyntaxError, ValueError): return if isinstance(tree, ast.Expression): return evaluator(tree.body) def normalize_homepage(self, s): if isinstance(s, str): return {"@id": s} def normalize_license(self, s): if isinstance(s, str): return [{"@id": "https://spdx.org/licenses/" + s}] def normalize_licenses(self, licenses): if isinstance(licenses, list): return [{"@id": "https://spdx.org/licenses/" + license} for license in licenses if isinstance(license, str)] def normalize_author(self, author): if isinstance(author, str): - return {"@list": [author]} + return {"@list": [name_to_person(author)]} def normalize_authors(self, authors): if isinstance(authors, list): - return {"@list": [author for author in authors + return {"@list": [name_to_person(author) for author in authors if isinstance(author, str)]} diff --git a/swh/indexer/origin_head.py b/swh/indexer/origin_head.py index 96606f1..5a6431b 100644 --- a/swh/indexer/origin_head.py +++ b/swh/indexer/origin_head.py @@ -1,162 +1,150 @@ # Copyright (C) 2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import re import click import logging from swh.indexer.indexer import OriginIndexer class OriginHeadIndexer(OriginIndexer): """Origin-level indexer. This indexer is in charge of looking up the revision that acts as the "head" of an origin. In git, this is usually the commit pointed to by the 'master' branch.""" USE_TOOLS = False def persist_index_computations(self, results, policy_update): """Do nothing. The indexer's results are not persistent, they should only be piped to another indexer.""" pass # Dispatch def index(self, origin_url): latest_visit = self.storage.origin_visit_get_latest( origin_url, allowed_statuses=['full'], require_snapshot=True) if latest_visit is None: return None latest_snapshot = self.storage.snapshot_get(latest_visit['snapshot']) method = getattr( self, '_try_get_%s_head' % latest_visit['type'], self._try_get_head_generic) rev_id = method(latest_snapshot) if rev_id is not None: return { 'origin_url': origin_url, 'revision_id': rev_id, } # could not find a head revision return None - # VCSs - - def _try_get_vcs_head(self, snapshot): - try: - branches = snapshot['branches'] - if branches[b'HEAD']['target_type'] == 'revision': - return branches[b'HEAD']['target'] - except KeyError: - return None - - _try_get_hg_head = _try_get_git_head = _try_get_vcs_head - # Tarballs _archive_filename_re = re.compile( rb'^' rb'(?P.*)[-_]' rb'(?P[0-9]+(\.[0-9])*)' rb'(?P[-+][a-zA-Z0-9.~]+?)?' rb'(?P(\.[a-zA-Z0-9]+)+)' rb'$') @classmethod def _parse_version(cls, filename): """Extracts the release version from an archive filename, to get an ordering whose maximum is likely to be the last version of the software >>> OriginHeadIndexer._parse_version(b'foo') (-inf,) >>> OriginHeadIndexer._parse_version(b'foo.tar.gz') (-inf,) >>> OriginHeadIndexer._parse_version(b'gnu-hello-0.0.1.tar.gz') (0, 0, 1, 0) >>> OriginHeadIndexer._parse_version(b'gnu-hello-0.0.1-beta2.tar.gz') (0, 0, 1, -1, 'beta2') >>> OriginHeadIndexer._parse_version(b'gnu-hello-0.0.1+foobar.tar.gz') (0, 0, 1, 1, 'foobar') """ res = cls._archive_filename_re.match(filename) if res is None: return (float('-infinity'),) version = [int(n) for n in res.group('version').decode().split('.')] if res.group('preversion') is None: version.append(0) else: preversion = res.group('preversion').decode() if preversion.startswith('-'): version.append(-1) version.append(preversion[1:]) elif preversion.startswith('+'): version.append(1) version.append(preversion[1:]) else: assert False, res.group('preversion') return tuple(version) def _try_get_ftp_head(self, snapshot): archive_names = list(snapshot['branches']) max_archive_name = max(archive_names, key=self._parse_version) r = self._try_resolve_target(snapshot['branches'], max_archive_name) return r # Generic def _try_get_head_generic(self, snapshot): - # Works on 'deposit', 'svn', and 'pypi'. + # Works on 'deposit', 'pypi', and VCSs. try: branches = snapshot['branches'] except KeyError: return None else: return ( self._try_resolve_target(branches, b'HEAD') or self._try_resolve_target(branches, b'master') ) def _try_resolve_target(self, branches, target_name): try: target = branches[target_name] if target is None: return None while target['target_type'] == 'alias': target = branches[target['target']] if target is None: return None if target['target_type'] == 'revision': return target['target'] elif target['target_type'] == 'content': return None # TODO elif target['target_type'] == 'directory': return None # TODO elif target['target_type'] == 'release': return None # TODO else: assert False except KeyError: return None @click.command() @click.option('--origins', '-i', help='Origins to lookup, in the "type+url" format', multiple=True) def main(origins): rev_metadata_indexer = OriginHeadIndexer() rev_metadata_indexer.run(origins) if __name__ == '__main__': logging.basicConfig(level=logging.INFO) main() diff --git a/swh/indexer/sql/40-swh-func.sql b/swh/indexer/sql/40-swh-func.sql index 31ab57c..d2c62b7 100644 --- a/swh/indexer/sql/40-swh-func.sql +++ b/swh/indexer/sql/40-swh-func.sql @@ -1,454 +1,454 @@ -- Postgresql index helper function create or replace function hash_sha1(text) returns text language sql strict immutable as $$ select encode(public.digest($1, 'sha1'), 'hex') $$; comment on function hash_sha1(text) is 'Compute sha1 hash as text'; -- create a temporary table called tmp_TBLNAME, mimicking existing table -- TBLNAME -- -- Args: --- tblname: name of the table to mimick +-- tblname: name of the table to mimic create or replace function swh_mktemp(tblname regclass) returns void language plpgsql as $$ begin execute format(' create temporary table tmp_%1$I (like %1$I including defaults) on commit drop; alter table tmp_%1$I drop column if exists object_id; ', tblname); return; end $$; -- create a temporary table for content_mimetype tmp_content_mimetype, create or replace function swh_mktemp_content_mimetype() returns void language sql as $$ create temporary table tmp_content_mimetype ( like content_mimetype including defaults ) on commit drop; $$; comment on function swh_mktemp_content_mimetype() IS 'Helper table to add mimetype information'; -- add tmp_content_mimetype entries to content_mimetype, overwriting -- duplicates if conflict_update is true, skipping duplicates otherwise. -- -- If filtering duplicates is in order, the call to -- swh_content_mimetype_missing must take place before calling this -- function. -- -- -- operates in bulk: 0. swh_mktemp(content_mimetype), 1. COPY to tmp_content_mimetype, -- 2. call this function create or replace function swh_content_mimetype_add(conflict_update boolean) returns void language plpgsql as $$ begin if conflict_update then insert into content_mimetype (id, mimetype, encoding, indexer_configuration_id) select id, mimetype, encoding, indexer_configuration_id from tmp_content_mimetype tcm on conflict(id, indexer_configuration_id) do update set mimetype = excluded.mimetype, encoding = excluded.encoding; else insert into content_mimetype (id, mimetype, encoding, indexer_configuration_id) select id, mimetype, encoding, indexer_configuration_id from tmp_content_mimetype tcm on conflict(id, indexer_configuration_id) do nothing; end if; return; end $$; comment on function swh_content_mimetype_add(boolean) IS 'Add new content mimetypes'; -- add tmp_content_language entries to content_language, overwriting -- duplicates if conflict_update is true, skipping duplicates otherwise. -- -- If filtering duplicates is in order, the call to -- swh_content_language_missing must take place before calling this -- function. -- -- operates in bulk: 0. swh_mktemp(content_language), 1. COPY to -- tmp_content_language, 2. call this function create or replace function swh_content_language_add(conflict_update boolean) returns void language plpgsql as $$ begin if conflict_update then insert into content_language (id, lang, indexer_configuration_id) select id, lang, indexer_configuration_id from tmp_content_language tcl on conflict(id, indexer_configuration_id) do update set lang = excluded.lang; else insert into content_language (id, lang, indexer_configuration_id) select id, lang, indexer_configuration_id from tmp_content_language tcl on conflict(id, indexer_configuration_id) do nothing; end if; return; end $$; comment on function swh_content_language_add(boolean) IS 'Add new content languages'; -- create a temporary table for retrieving content_language create or replace function swh_mktemp_content_language() returns void language sql as $$ create temporary table tmp_content_language ( like content_language including defaults ) on commit drop; $$; comment on function swh_mktemp_content_language() is 'Helper table to add content language'; -- create a temporary table for content_ctags tmp_content_ctags, create or replace function swh_mktemp_content_ctags() returns void language sql as $$ create temporary table tmp_content_ctags ( like content_ctags including defaults ) on commit drop; $$; comment on function swh_mktemp_content_ctags() is 'Helper table to add content ctags'; -- add tmp_content_ctags entries to content_ctags, overwriting -- duplicates if conflict_update is true, skipping duplicates otherwise. -- -- operates in bulk: 0. swh_mktemp(content_ctags), 1. COPY to tmp_content_ctags, -- 2. call this function create or replace function swh_content_ctags_add(conflict_update boolean) returns void language plpgsql as $$ begin if conflict_update then delete from content_ctags where id in (select tmp.id from tmp_content_ctags tmp inner join indexer_configuration i on i.id=tmp.indexer_configuration_id); end if; insert into content_ctags (id, name, kind, line, lang, indexer_configuration_id) select id, name, kind, line, lang, indexer_configuration_id from tmp_content_ctags tct on conflict(id, hash_sha1(name), kind, line, lang, indexer_configuration_id) do nothing; return; end $$; comment on function swh_content_ctags_add(boolean) IS 'Add new ctags symbols per content'; create type content_ctags_signature as ( id sha1, name text, kind text, line bigint, lang ctags_languages, tool_id integer, tool_name text, tool_version text, tool_configuration jsonb ); -- Search within ctags content. -- create or replace function swh_content_ctags_search( expression text, l integer default 10, last_sha1 sha1 default '\x0000000000000000000000000000000000000000') returns setof content_ctags_signature language sql as $$ select c.id, name, kind, line, lang, i.id as tool_id, tool_name, tool_version, tool_configuration from content_ctags c inner join indexer_configuration i on i.id = c.indexer_configuration_id where hash_sha1(name) = hash_sha1(expression) and c.id > last_sha1 order by id limit l; $$; comment on function swh_content_ctags_search(text, integer, sha1) IS 'Equality search through ctags'' symbols'; -- create a temporary table for content_fossology_license tmp_content_fossology_license, create or replace function swh_mktemp_content_fossology_license() returns void language sql as $$ create temporary table tmp_content_fossology_license ( id sha1, license text, indexer_configuration_id integer ) on commit drop; $$; comment on function swh_mktemp_content_fossology_license() is 'Helper table to add content license'; -- add tmp_content_fossology_license entries to content_fossology_license, overwriting -- duplicates if conflict_update is true, skipping duplicates otherwise. -- -- operates in bulk: 0. swh_mktemp(content_fossology_license), 1. COPY to -- tmp_content_fossology_license, 2. call this function create or replace function swh_content_fossology_license_add(conflict_update boolean) returns void language plpgsql as $$ begin -- insert unknown licenses first insert into fossology_license (name) select distinct license from tmp_content_fossology_license tmp where not exists (select 1 from fossology_license where name=tmp.license) on conflict(name) do nothing; if conflict_update then -- delete from content_fossology_license c -- using tmp_content_fossology_license tmp, indexer_configuration i -- where c.id = tmp.id and i.id=tmp.indexer_configuration_id delete from content_fossology_license where id in (select tmp.id from tmp_content_fossology_license tmp inner join indexer_configuration i on i.id=tmp.indexer_configuration_id); end if; insert into content_fossology_license (id, license_id, indexer_configuration_id) select tcl.id, (select id from fossology_license where name = tcl.license) as license, indexer_configuration_id from tmp_content_fossology_license tcl on conflict(id, license_id, indexer_configuration_id) do nothing; return; end $$; comment on function swh_content_fossology_license_add(boolean) IS 'Add new content licenses'; -- content_metadata functions -- add tmp_content_metadata entries to content_metadata, overwriting -- duplicates if conflict_update is true, skipping duplicates otherwise. -- -- If filtering duplicates is in order, the call to -- swh_content_metadata_missing must take place before calling this -- function. -- -- operates in bulk: 0. swh_mktemp(content_language), 1. COPY to -- tmp_content_metadata, 2. call this function create or replace function swh_content_metadata_add(conflict_update boolean) returns void language plpgsql as $$ begin if conflict_update then insert into content_metadata (id, metadata, indexer_configuration_id) select id, metadata, indexer_configuration_id from tmp_content_metadata tcm on conflict(id, indexer_configuration_id) do update set metadata = excluded.metadata; else insert into content_metadata (id, metadata, indexer_configuration_id) select id, metadata, indexer_configuration_id from tmp_content_metadata tcm on conflict(id, indexer_configuration_id) do nothing; end if; return; end $$; comment on function swh_content_metadata_add(boolean) IS 'Add new content metadata'; -- create a temporary table for retrieving content_metadata create or replace function swh_mktemp_content_metadata() returns void language sql as $$ create temporary table tmp_content_metadata ( like content_metadata including defaults ) on commit drop; $$; comment on function swh_mktemp_content_metadata() is 'Helper table to add content metadata'; -- end content_metadata functions -- add tmp_revision_intrinsic_metadata entries to revision_intrinsic_metadata, -- overwriting duplicates if conflict_update is true, skipping duplicates -- otherwise. -- -- If filtering duplicates is in order, the call to -- swh_revision_intrinsic_metadata_missing must take place before calling this -- function. -- -- operates in bulk: 0. swh_mktemp(content_language), 1. COPY to -- tmp_revision_intrinsic_metadata, 2. call this function create or replace function swh_revision_intrinsic_metadata_add(conflict_update boolean) returns void language plpgsql as $$ begin if conflict_update then insert into revision_intrinsic_metadata (id, metadata, mappings, indexer_configuration_id) select id, metadata, mappings, indexer_configuration_id from tmp_revision_intrinsic_metadata tcm on conflict(id, indexer_configuration_id) do update set metadata = excluded.metadata, mappings = excluded.mappings; else insert into revision_intrinsic_metadata (id, metadata, mappings, indexer_configuration_id) select id, metadata, mappings, indexer_configuration_id from tmp_revision_intrinsic_metadata tcm on conflict(id, indexer_configuration_id) do nothing; end if; return; end $$; comment on function swh_revision_intrinsic_metadata_add(boolean) IS 'Add new revision intrinsic metadata'; -- create a temporary table for retrieving revision_intrinsic_metadata create or replace function swh_mktemp_revision_intrinsic_metadata() returns void language sql as $$ create temporary table tmp_revision_intrinsic_metadata ( like revision_intrinsic_metadata including defaults ) on commit drop; $$; comment on function swh_mktemp_revision_intrinsic_metadata() is 'Helper table to add revision intrinsic metadata'; -- create a temporary table for retrieving origin_intrinsic_metadata create or replace function swh_mktemp_origin_intrinsic_metadata() returns void language sql as $$ create temporary table tmp_origin_intrinsic_metadata ( like origin_intrinsic_metadata including defaults ) on commit drop; $$; comment on function swh_mktemp_origin_intrinsic_metadata() is 'Helper table to add origin intrinsic metadata'; create or replace function swh_mktemp_indexer_configuration() returns void language sql as $$ create temporary table tmp_indexer_configuration ( like indexer_configuration including defaults ) on commit drop; alter table tmp_indexer_configuration drop column id; $$; -- add tmp_indexer_configuration entries to indexer_configuration, -- skipping duplicates if any. -- -- operates in bulk: 0. create temporary tmp_indexer_configuration, 1. COPY to -- it, 2. call this function to insert and filtering out duplicates create or replace function swh_indexer_configuration_add() returns setof indexer_configuration language plpgsql as $$ begin insert into indexer_configuration(tool_name, tool_version, tool_configuration) select tool_name, tool_version, tool_configuration from tmp_indexer_configuration tmp on conflict(tool_name, tool_version, tool_configuration) do nothing; return query select id, tool_name, tool_version, tool_configuration from tmp_indexer_configuration join indexer_configuration using(tool_name, tool_version, tool_configuration); return; end $$; -- add tmp_origin_intrinsic_metadata entries to origin_intrinsic_metadata, -- overwriting duplicates if conflict_update is true, skipping duplicates -- otherwise. -- -- If filtering duplicates is in order, the call to -- swh_origin_intrinsic_metadata_missing must take place before calling this -- function. -- -- operates in bulk: 0. swh_mktemp(content_language), 1. COPY to -- tmp_origin_intrinsic_metadata, 2. call this function create or replace function swh_origin_intrinsic_metadata_add( conflict_update boolean) returns void language plpgsql as $$ begin perform swh_origin_intrinsic_metadata_compute_tsvector(); if conflict_update then insert into origin_intrinsic_metadata (id, metadata, indexer_configuration_id, from_revision, metadata_tsvector, mappings) select id, metadata, indexer_configuration_id, from_revision, metadata_tsvector, mappings from tmp_origin_intrinsic_metadata on conflict(id, indexer_configuration_id) do update set metadata = excluded.metadata, metadata_tsvector = excluded.metadata_tsvector, mappings = excluded.mappings, from_revision = excluded.from_revision; else insert into origin_intrinsic_metadata (id, metadata, indexer_configuration_id, from_revision, metadata_tsvector, mappings) select id, metadata, indexer_configuration_id, from_revision, metadata_tsvector, mappings from tmp_origin_intrinsic_metadata on conflict(id, indexer_configuration_id) do nothing; end if; return; end $$; comment on function swh_origin_intrinsic_metadata_add(boolean) IS 'Add new origin intrinsic metadata'; -- Compute the metadata_tsvector column in tmp_origin_intrinsic_metadata. -- -- It uses the "pg_catalog.simple" dictionary, as it has no stopword, -- so it should be suitable for proper names and non-English text. create or replace function swh_origin_intrinsic_metadata_compute_tsvector() returns void language plpgsql as $$ begin update tmp_origin_intrinsic_metadata set metadata_tsvector = to_tsvector('pg_catalog.simple', metadata); end $$; diff --git a/swh/indexer/storage/__init__.py b/swh/indexer/storage/__init__.py index a8d557f..8c61aff 100644 --- a/swh/indexer/storage/__init__.py +++ b/swh/indexer/storage/__init__.py @@ -1,936 +1,451 @@ -# Copyright (C) 2015-2018 The Software Heritage developers +# Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import json import psycopg2 from collections import defaultdict -from swh.core.api import remote_api_endpoint from swh.storage.common import db_transaction_generator, db_transaction from swh.storage.exc import StorageDBError from .db import Db from . import converters INDEXER_CFG_KEY = 'indexer_storage' MAPPING_NAMES = ['codemeta', 'gemspec', 'maven', 'npm', 'pkg-info'] def get_indexer_storage(cls, args): """Get an indexer storage object of class `storage_class` with arguments `storage_args`. Args: cls (str): storage's class, either 'local' or 'remote' args (dict): dictionary of arguments passed to the storage class constructor Returns: an instance of swh.indexer's storage (either local or remote) Raises: ValueError if passed an unknown storage class. """ if cls == 'remote': from .api.client import RemoteStorage as IndexerStorage elif cls == 'local': from . import IndexerStorage elif cls == 'memory': from .in_memory import IndexerStorage else: raise ValueError('Unknown indexer storage class `%s`' % cls) return IndexerStorage(**args) def _check_id_duplicates(data): """ If any two dictionaries in `data` have the same id, raises a `ValueError`. Values associated to the key must be hashable. Args: data (List[dict]): List of dictionaries to be inserted >>> _check_id_duplicates([ ... {'id': 'foo', 'data': 'spam'}, ... {'id': 'bar', 'data': 'egg'}, ... ]) >>> _check_id_duplicates([ ... {'id': 'foo', 'data': 'spam'}, ... {'id': 'foo', 'data': 'egg'}, ... ]) Traceback (most recent call last): ... ValueError: The same id is present more than once. """ if len({item['id'] for item in data}) < len(data): raise ValueError('The same id is present more than once.') class IndexerStorage: """SWH Indexer Storage """ def __init__(self, db, min_pool_conns=1, max_pool_conns=10): """ Args: db_conn: either a libpq connection string, or a psycopg2 connection """ try: if isinstance(db, psycopg2.extensions.connection): self._pool = None self._db = Db(db) else: self._pool = psycopg2.pool.ThreadedConnectionPool( min_pool_conns, max_pool_conns, db ) self._db = None except psycopg2.OperationalError as e: raise StorageDBError(e) def get_db(self): if self._db: return self._db return Db.from_pool(self._pool) def put_db(self, db): if db is not self._db: db.put_conn() - @remote_api_endpoint('check_config') @db_transaction() def check_config(self, *, check_write, db=None, cur=None): - """Check that the storage is configured and ready to go.""" # Check permissions on one of the tables if check_write: check = 'INSERT' else: check = 'SELECT' cur.execute( "select has_table_privilege(current_user, 'content_mimetype', %s)", # noqa (check,) ) return cur.fetchone()[0] - @remote_api_endpoint('content_mimetype/missing') @db_transaction_generator() def content_mimetype_missing(self, mimetypes, db=None, cur=None): - """Generate mimetypes missing from storage. - - Args: - mimetypes (iterable): iterable of dict with keys: - - - **id** (bytes): sha1 identifier - - **indexer_configuration_id** (int): tool used to compute the - results - - Yields: - tuple (id, indexer_configuration_id): missing id - - """ for obj in db.content_mimetype_missing_from_list(mimetypes, cur): yield obj[0] def _content_get_range(self, content_type, start, end, indexer_configuration_id, limit=1000, with_textual_data=False, db=None, cur=None): - """Retrieve ids of type content_type within range [start, end] bound - by limit. - - Args: - **content_type** (str): content's type (mimetype, language, etc...) - **start** (bytes): Starting identifier range (expected smaller - than end) - **end** (bytes): Ending identifier range (expected larger - than start) - **indexer_configuration_id** (int): The tool used to index data - **limit** (int): Limit result (default to 1000) - **with_textual_data** (bool): Deal with only textual - content (True) or all - content (all contents by - defaults, False) - - Raises: - ValueError for; - - limit to None - - wrong content_type provided - - Returns: - a dict with keys: - - **ids** [bytes]: iterable of content ids within the range. - - **next** (Optional[bytes]): The next range of sha1 starts at - this sha1 if any - - """ if limit is None: raise ValueError('Development error: limit should not be None') if content_type not in db.content_indexer_names: err = 'Development error: Wrong type. Should be one of [%s]' % ( ','.join(db.content_indexer_names)) raise ValueError(err) ids = [] next_id = None for counter, obj in enumerate(db.content_get_range( content_type, start, end, indexer_configuration_id, limit=limit+1, with_textual_data=with_textual_data, cur=cur)): _id = obj[0] if counter >= limit: next_id = _id break ids.append(_id) return { 'ids': ids, 'next': next_id } - @remote_api_endpoint('content_mimetype/range') @db_transaction() def content_mimetype_get_range(self, start, end, indexer_configuration_id, limit=1000, db=None, cur=None): - """Retrieve mimetypes within range [start, end] bound by limit. - - Args: - **start** (bytes): Starting identifier range (expected smaller - than end) - **end** (bytes): Ending identifier range (expected larger - than start) - **indexer_configuration_id** (int): The tool used to index data - **limit** (int): Limit result (default to 1000) - - Raises: - ValueError for limit to None - - Returns: - a dict with keys: - - **ids** [bytes]: iterable of content ids within the range. - - **next** (Optional[bytes]): The next range of sha1 starts at - this sha1 if any - - """ return self._content_get_range('mimetype', start, end, indexer_configuration_id, limit=limit, db=db, cur=cur) - @remote_api_endpoint('content_mimetype/add') @db_transaction() def content_mimetype_add(self, mimetypes, conflict_update=False, db=None, cur=None): - """Add mimetypes not present in storage. - - Args: - mimetypes (iterable): dictionaries with keys: - - - **id** (bytes): sha1 identifier - - **mimetype** (bytes): raw content's mimetype - - **encoding** (bytes): raw content's encoding - - **indexer_configuration_id** (int): tool's id used to - compute the results - - **conflict_update** (bool): Flag to determine if we want to - overwrite (``True``) or skip duplicates (``False``, the - default) - - """ _check_id_duplicates(mimetypes) mimetypes.sort(key=lambda m: m['id']) db.mktemp_content_mimetype(cur) db.copy_to(mimetypes, 'tmp_content_mimetype', ['id', 'mimetype', 'encoding', 'indexer_configuration_id'], cur) db.content_mimetype_add_from_temp(conflict_update, cur) - @remote_api_endpoint('content_mimetype') @db_transaction_generator() def content_mimetype_get(self, ids, db=None, cur=None): - """Retrieve full content mimetype per ids. - - Args: - ids (iterable): sha1 identifier - - Yields: - mimetypes (iterable): dictionaries with keys: - - - **id** (bytes): sha1 identifier - - **mimetype** (bytes): raw content's mimetype - - **encoding** (bytes): raw content's encoding - - **tool** (dict): Tool used to compute the language - - """ for c in db.content_mimetype_get_from_list(ids, cur): yield converters.db_to_mimetype( dict(zip(db.content_mimetype_cols, c))) - @remote_api_endpoint('content_language/missing') @db_transaction_generator() def content_language_missing(self, languages, db=None, cur=None): - """List languages missing from storage. - - Args: - languages (iterable): dictionaries with keys: - - - **id** (bytes): sha1 identifier - - **indexer_configuration_id** (int): tool used to compute - the results - - Yields: - an iterable of missing id for the tuple (id, - indexer_configuration_id) - - """ for obj in db.content_language_missing_from_list(languages, cur): yield obj[0] - @remote_api_endpoint('content_language') @db_transaction_generator() def content_language_get(self, ids, db=None, cur=None): - """Retrieve full content language per ids. - - Args: - ids (iterable): sha1 identifier - - Yields: - languages (iterable): dictionaries with keys: - - - **id** (bytes): sha1 identifier - - **lang** (bytes): raw content's language - - **tool** (dict): Tool used to compute the language - - """ for c in db.content_language_get_from_list(ids, cur): yield converters.db_to_language( dict(zip(db.content_language_cols, c))) - @remote_api_endpoint('content_language/add') @db_transaction() def content_language_add(self, languages, conflict_update=False, db=None, cur=None): - """Add languages not present in storage. - - Args: - languages (iterable): dictionaries with keys: - - - **id** (bytes): sha1 - - **lang** (bytes): language detected - - conflict_update (bool): Flag to determine if we want to - overwrite (true) or skip duplicates (false, the - default) - - """ _check_id_duplicates(languages) languages.sort(key=lambda m: m['id']) db.mktemp_content_language(cur) # empty language is mapped to 'unknown' db.copy_to( ({ 'id': l['id'], 'lang': 'unknown' if not l['lang'] else l['lang'], 'indexer_configuration_id': l['indexer_configuration_id'], } for l in languages), 'tmp_content_language', ['id', 'lang', 'indexer_configuration_id'], cur) db.content_language_add_from_temp(conflict_update, cur) - @remote_api_endpoint('content/ctags/missing') @db_transaction_generator() def content_ctags_missing(self, ctags, db=None, cur=None): - """List ctags missing from storage. - - Args: - ctags (iterable): dicts with keys: - - - **id** (bytes): sha1 identifier - - **indexer_configuration_id** (int): tool used to compute - the results - - Yields: - an iterable of missing id for the tuple (id, - indexer_configuration_id) - - """ for obj in db.content_ctags_missing_from_list(ctags, cur): yield obj[0] - @remote_api_endpoint('content/ctags') @db_transaction_generator() def content_ctags_get(self, ids, db=None, cur=None): - """Retrieve ctags per id. - - Args: - ids (iterable): sha1 checksums - - Yields: - Dictionaries with keys: - - - **id** (bytes): content's identifier - - **name** (str): symbol's name - - **kind** (str): symbol's kind - - **lang** (str): language for that content - - **tool** (dict): tool used to compute the ctags' info - - - """ for c in db.content_ctags_get_from_list(ids, cur): yield converters.db_to_ctags(dict(zip(db.content_ctags_cols, c))) - @remote_api_endpoint('content/ctags/add') @db_transaction() def content_ctags_add(self, ctags, conflict_update=False, db=None, cur=None): - """Add ctags not present in storage - - Args: - ctags (iterable): dictionaries with keys: - - - **id** (bytes): sha1 - - **ctags** ([list): List of dictionary with keys: name, kind, - line, lang - - """ _check_id_duplicates(ctags) ctags.sort(key=lambda m: m['id']) def _convert_ctags(__ctags): """Convert ctags dict to list of ctags. """ for ctags in __ctags: yield from converters.ctags_to_db(ctags) db.mktemp_content_ctags(cur) db.copy_to(list(_convert_ctags(ctags)), tblname='tmp_content_ctags', columns=['id', 'name', 'kind', 'line', 'lang', 'indexer_configuration_id'], cur=cur) db.content_ctags_add_from_temp(conflict_update, cur) - @remote_api_endpoint('content/ctags/search') @db_transaction_generator() def content_ctags_search(self, expression, limit=10, last_sha1=None, db=None, cur=None): - """Search through content's raw ctags symbols. - - Args: - expression (str): Expression to search for - limit (int): Number of rows to return (default to 10). - last_sha1 (str): Offset from which retrieving data (default to ''). - - Yields: - rows of ctags including id, name, lang, kind, line, etc... - - """ for obj in db.content_ctags_search(expression, last_sha1, limit, cur=cur): yield converters.db_to_ctags(dict(zip(db.content_ctags_cols, obj))) - @remote_api_endpoint('content/fossology_license') @db_transaction_generator() def content_fossology_license_get(self, ids, db=None, cur=None): - """Retrieve licenses per id. - - Args: - ids (iterable): sha1 checksums - - Yields: - dict: ``{id: facts}`` where ``facts`` is a dict with the - following keys: - - - **licenses** ([str]): associated licenses for that content - - **tool** (dict): Tool used to compute the license - - """ d = defaultdict(list) for c in db.content_fossology_license_get_from_list(ids, cur): license = dict(zip(db.content_fossology_license_cols, c)) id_ = license['id'] d[id_].append(converters.db_to_fossology_license(license)) for id_, facts in d.items(): yield {id_: facts} - @remote_api_endpoint('content/fossology_license/add') @db_transaction() def content_fossology_license_add(self, licenses, conflict_update=False, db=None, cur=None): - """Add licenses not present in storage. - - Args: - licenses (iterable): dictionaries with keys: - - - **id**: sha1 - - **licenses** ([bytes]): List of licenses associated to sha1 - - **tool** (str): nomossa - - conflict_update: Flag to determine if we want to overwrite (true) - or skip duplicates (false, the default) - - Returns: - list: content_license entries which failed due to unknown licenses - - """ _check_id_duplicates(licenses) licenses.sort(key=lambda m: m['id']) db.mktemp_content_fossology_license(cur) db.copy_to( ({ 'id': sha1['id'], 'indexer_configuration_id': sha1['indexer_configuration_id'], 'license': license, } for sha1 in licenses for license in sha1['licenses']), tblname='tmp_content_fossology_license', columns=['id', 'license', 'indexer_configuration_id'], cur=cur) db.content_fossology_license_add_from_temp(conflict_update, cur) - @remote_api_endpoint('content/fossology_license/range') @db_transaction() def content_fossology_license_get_range( self, start, end, indexer_configuration_id, limit=1000, db=None, cur=None): - """Retrieve licenses within range [start, end] bound by limit. - - Args: - **start** (bytes): Starting identifier range (expected smaller - than end) - **end** (bytes): Ending identifier range (expected larger - than start) - **indexer_configuration_id** (int): The tool used to index data - **limit** (int): Limit result (default to 1000) - - Raises: - ValueError for limit to None - - Returns: - a dict with keys: - - **ids** [bytes]: iterable of content ids within the range. - - **next** (Optional[bytes]): The next range of sha1 starts at - this sha1 if any - - """ return self._content_get_range('fossology_license', start, end, indexer_configuration_id, limit=limit, with_textual_data=True, db=db, cur=cur) - @remote_api_endpoint('content_metadata/missing') @db_transaction_generator() def content_metadata_missing(self, metadata, db=None, cur=None): - """List metadata missing from storage. - - Args: - metadata (iterable): dictionaries with keys: - - - **id** (bytes): sha1 identifier - - **indexer_configuration_id** (int): tool used to compute - the results - - Yields: - missing sha1s - - """ for obj in db.content_metadata_missing_from_list(metadata, cur): yield obj[0] - @remote_api_endpoint('content_metadata') @db_transaction_generator() def content_metadata_get(self, ids, db=None, cur=None): - """Retrieve metadata per id. - - Args: - ids (iterable): sha1 checksums - - Yields: - dictionaries with the following keys: - - id (bytes) - metadata (str): associated metadata - tool (dict): tool used to compute metadata - - """ for c in db.content_metadata_get_from_list(ids, cur): yield converters.db_to_metadata( dict(zip(db.content_metadata_cols, c))) - @remote_api_endpoint('content_metadata/add') @db_transaction() def content_metadata_add(self, metadata, conflict_update=False, db=None, cur=None): - """Add metadata not present in storage. - - Args: - metadata (iterable): dictionaries with keys: - - - **id**: sha1 - - **metadata**: arbitrary dict - - conflict_update: Flag to determine if we want to overwrite (true) - or skip duplicates (false, the default) - - """ _check_id_duplicates(metadata) metadata.sort(key=lambda m: m['id']) db.mktemp_content_metadata(cur) db.copy_to(metadata, 'tmp_content_metadata', ['id', 'metadata', 'indexer_configuration_id'], cur) db.content_metadata_add_from_temp(conflict_update, cur) - @remote_api_endpoint('revision_intrinsic_metadata/missing') @db_transaction_generator() def revision_intrinsic_metadata_missing(self, metadata, db=None, cur=None): - """List metadata missing from storage. - - Args: - metadata (iterable): dictionaries with keys: - - - **id** (bytes): sha1_git revision identifier - - **indexer_configuration_id** (int): tool used to compute - the results - - Yields: - missing ids - - """ for obj in db.revision_intrinsic_metadata_missing_from_list( metadata, cur): yield obj[0] - @remote_api_endpoint('revision_intrinsic_metadata') @db_transaction_generator() def revision_intrinsic_metadata_get(self, ids, db=None, cur=None): - """Retrieve revision metadata per id. - - Args: - ids (iterable): sha1 checksums - - Yields: - dictionaries with the following keys: - - - **id** (bytes) - - **metadata** (str): associated metadata - - **tool** (dict): tool used to compute metadata - - **mappings** (List[str]): list of mappings used to translate - these metadata - - """ for c in db.revision_intrinsic_metadata_get_from_list(ids, cur): yield converters.db_to_metadata( dict(zip(db.revision_intrinsic_metadata_cols, c))) - @remote_api_endpoint('revision_intrinsic_metadata/add') @db_transaction() def revision_intrinsic_metadata_add(self, metadata, conflict_update=False, db=None, cur=None): - """Add metadata not present in storage. - - Args: - metadata (iterable): dictionaries with keys: - - - **id**: sha1_git of revision - - **metadata**: arbitrary dict - - **indexer_configuration_id**: tool used to compute metadata - - **mappings** (List[str]): list of mappings used to translate - these metadata - - conflict_update: Flag to determine if we want to overwrite (true) - or skip duplicates (false, the default) - - """ _check_id_duplicates(metadata) metadata.sort(key=lambda m: m['id']) db.mktemp_revision_intrinsic_metadata(cur) db.copy_to(metadata, 'tmp_revision_intrinsic_metadata', ['id', 'metadata', 'mappings', 'indexer_configuration_id'], cur) db.revision_intrinsic_metadata_add_from_temp(conflict_update, cur) - @remote_api_endpoint('revision_intrinsic_metadata/delete') @db_transaction() def revision_intrinsic_metadata_delete(self, entries, db=None, cur=None): - """Remove revision metadata from the storage. - - Args: - entries (dict): dictionaries with the following keys: - - **id** (bytes): revision identifier - - **indexer_configuration_id** (int): tool used to compute - metadata - """ db.revision_intrinsic_metadata_delete(entries, cur) - @remote_api_endpoint('origin_intrinsic_metadata') @db_transaction_generator() def origin_intrinsic_metadata_get(self, ids, db=None, cur=None): - """Retrieve origin metadata per id. - - Args: - ids (iterable): origin identifiers - - Yields: - list: dictionaries with the following keys: - - - **id** (str): origin url - - **from_revision** (bytes): which revision this metadata - was extracted from - - **metadata** (str): associated metadata - - **tool** (dict): tool used to compute metadata - - **mappings** (List[str]): list of mappings used to translate - these metadata - - """ for c in db.origin_intrinsic_metadata_get_from_list(ids, cur): yield converters.db_to_metadata( dict(zip(db.origin_intrinsic_metadata_cols, c))) - @remote_api_endpoint('origin_intrinsic_metadata/add') @db_transaction() def origin_intrinsic_metadata_add(self, metadata, conflict_update=False, db=None, cur=None): - """Add origin metadata not present in storage. - - Args: - metadata (iterable): dictionaries with keys: - - - **id**: origin urls - - **from_revision**: sha1 id of the revision used to generate - these metadata. - - **metadata**: arbitrary dict - - **indexer_configuration_id**: tool used to compute metadata - - **mappings** (List[str]): list of mappings used to translate - these metadata - - conflict_update: Flag to determine if we want to overwrite (true) - or skip duplicates (false, the default) - - """ _check_id_duplicates(metadata) metadata.sort(key=lambda m: m['id']) db.mktemp_origin_intrinsic_metadata(cur) db.copy_to(metadata, 'tmp_origin_intrinsic_metadata', ['id', 'metadata', 'indexer_configuration_id', 'from_revision', 'mappings'], cur) db.origin_intrinsic_metadata_add_from_temp(conflict_update, cur) - @remote_api_endpoint('origin_intrinsic_metadata/delete') @db_transaction() def origin_intrinsic_metadata_delete( self, entries, db=None, cur=None): - """Remove origin metadata from the storage. - - Args: - entries (dict): dictionaries with the following keys: - - **id** (str): origin urls - - **indexer_configuration_id** (int): tool used to compute - metadata - """ db.origin_intrinsic_metadata_delete(entries, cur) - @remote_api_endpoint('origin_intrinsic_metadata/search/fulltext') @db_transaction_generator() def origin_intrinsic_metadata_search_fulltext( self, conjunction, limit=100, db=None, cur=None): - """Returns the list of origins whose metadata contain all the terms. - - Args: - conjunction (List[str]): List of terms to be searched for. - limit (int): The maximum number of results to return - - Yields: - list: dictionaries with the following keys: - - - **id** (str): origin urls - - **from_revision**: sha1 id of the revision used to generate - these metadata. - - **metadata** (str): associated metadata - - **tool** (dict): tool used to compute metadata - - **mappings** (List[str]): list of mappings used to translate - these metadata - - """ for c in db.origin_intrinsic_metadata_search_fulltext( conjunction, limit=limit, cur=cur): yield converters.db_to_metadata( dict(zip(db.origin_intrinsic_metadata_cols, c))) - @remote_api_endpoint('origin_intrinsic_metadata/search/by_producer') @db_transaction() def origin_intrinsic_metadata_search_by_producer( self, page_token='', limit=100, ids_only=False, mappings=None, tool_ids=None, db=None, cur=None): - """Returns the list of origins whose metadata contain all the terms. - - Args: - page_token (str): Opaque token used for pagination. - limit (int): The maximum number of results to return - ids_only (bool): Determines whether only origin urls are - returned or the content as well - mappings (List[str]): Returns origins whose intrinsic metadata - were generated using at least one of these mappings. - - Returns: - dict: dict with the following keys: - - **next_page_token** (str, optional): opaque token to be used as - `page_token` for retrieving the next page. If absent, there is - no more pages to gather. - - **origins** (list): list of origin url (str) if `ids_only=True` - else dictionaries with the following keys: - - - **id** (str): origin urls - - **from_revision**: sha1 id of the revision used to generate - these metadata. - - **metadata** (str): associated metadata - - **tool** (dict): tool used to compute metadata - - **mappings** (List[str]): list of mappings used to translate - these metadata - - """ assert isinstance(page_token, str) - # we go to limit+1 to check wether we should add next_page_token in + # we go to limit+1 to check whether we should add next_page_token in # the response res = db.origin_intrinsic_metadata_search_by_producer( page_token, limit + 1, ids_only, mappings, tool_ids, cur) result = {} if ids_only: result['origins'] = [origin for (origin,) in res] if len(result['origins']) > limit: result['origins'][limit:] = [] result['next_page_token'] = result['origins'][-1] else: result['origins'] = [converters.db_to_metadata( dict(zip(db.origin_intrinsic_metadata_cols, c)))for c in res] if len(result['origins']) > limit: result['origins'][limit:] = [] result['next_page_token'] = result['origins'][-1]['id'] return result - @remote_api_endpoint('origin_intrinsic_metadata/stats') @db_transaction() def origin_intrinsic_metadata_stats( self, db=None, cur=None): - """Returns counts of indexed metadata per origins, broken down - into metadata types. - - Returns: - dict: dictionary with keys: - - - total (int): total number of origins that were indexed - (possibly yielding an empty metadata dictionary) - - non_empty (int): total number of origins that we extracted - a non-empty metadata dictionary from - - per_mapping (dict): a dictionary with mapping names as - keys and number of origins whose indexing used this - mapping. Note that indexing a given origin may use - 0, 1, or many mappings. - """ mapping_names = [m for m in MAPPING_NAMES] select_parts = [] # Count rows for each mapping for mapping_name in mapping_names: select_parts.append(( "sum(case when (mappings @> ARRAY['%s']) " " then 1 else 0 end)" ) % mapping_name) # Total select_parts.append("sum(1)") # Rows whose metadata has at least one key that is not '@context' select_parts.append( "sum(case when ('{}'::jsonb @> (metadata - '@context')) " " then 0 else 1 end)") cur.execute('select ' + ', '.join(select_parts) + ' from origin_intrinsic_metadata') results = dict(zip(mapping_names + ['total', 'non_empty'], cur.fetchone())) return { 'total': results.pop('total'), 'non_empty': results.pop('non_empty'), 'per_mapping': results, } - @remote_api_endpoint('indexer_configuration/add') @db_transaction_generator() def indexer_configuration_add(self, tools, db=None, cur=None): - """Add new tools to the storage. - - Args: - tools ([dict]): List of dictionary representing tool to - insert in the db. Dictionary with the following keys: - - - **tool_name** (str): tool's name - - **tool_version** (str): tool's version - - **tool_configuration** (dict): tool's configuration - (free form dict) - - Returns: - List of dict inserted in the db (holding the id key as - well). The order of the list is not guaranteed to match - the order of the initial list. - - """ db.mktemp_indexer_configuration(cur) db.copy_to(tools, 'tmp_indexer_configuration', ['tool_name', 'tool_version', 'tool_configuration'], cur) tools = db.indexer_configuration_add_from_temp(cur) for line in tools: yield dict(zip(db.indexer_configuration_cols, line)) - @remote_api_endpoint('indexer_configuration/data') @db_transaction() def indexer_configuration_get(self, tool, db=None, cur=None): - """Retrieve tool information. - - Args: - tool (dict): Dictionary representing a tool with the - following keys: - - - **tool_name** (str): tool's name - - **tool_version** (str): tool's version - - **tool_configuration** (dict): tool's configuration - (free form dict) - - Returns: - The same dictionary with an `id` key, None otherwise. - - """ tool_conf = tool['tool_configuration'] if isinstance(tool_conf, dict): tool_conf = json.dumps(tool_conf) idx = db.indexer_configuration_get(tool['tool_name'], tool['tool_version'], tool_conf) if not idx: return None return dict(zip(db.indexer_configuration_cols, idx)) diff --git a/swh/indexer/storage/api/client.py b/swh/indexer/storage/api/client.py index ec4c234..0e62adc 100644 --- a/swh/indexer/storage/api/client.py +++ b/swh/indexer/storage/api/client.py @@ -1,17 +1,17 @@ # Copyright (C) 2015-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from swh.core.api import RPCClient from swh.storage.exc import StorageAPIError -from .. import IndexerStorage +from ..interface import IndexerStorageInterface class RemoteStorage(RPCClient): """Proxy to a remote storage API""" - backend_class = IndexerStorage + backend_class = IndexerStorageInterface api_exception = StorageAPIError diff --git a/swh/indexer/storage/api/server.py b/swh/indexer/storage/api/server.py index 7edfec6..2b71f2b 100644 --- a/swh/indexer/storage/api/server.py +++ b/swh/indexer/storage/api/server.py @@ -1,106 +1,107 @@ # Copyright (C) 2015-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import os import logging from swh.core import config from swh.core.api import (RPCServerApp, error_handler, encode_data_server as encode_data) from swh.indexer.storage import ( - get_indexer_storage, INDEXER_CFG_KEY, IndexerStorage + get_indexer_storage, INDEXER_CFG_KEY ) +from swh.indexer.storage.interface import IndexerStorageInterface def get_storage(): global storage if not storage: storage = get_indexer_storage(**app.config[INDEXER_CFG_KEY]) return storage app = RPCServerApp(__name__, - backend_class=IndexerStorage, + backend_class=IndexerStorageInterface, backend_factory=get_storage) storage = None @app.errorhandler(Exception) def my_error_handler(exception): return error_handler(exception, encode_data) @app.route('/') def index(): return 'SWH Indexer Storage API server' api_cfg = None def load_and_check_config(config_file, type='local'): """Check the minimal configuration is set to run the api or raise an error explanation. Args: config_file (str): Path to the configuration file to load type (str): configuration type. For 'local' type, more checks are done. Raises: Error if the setup is not as expected Returns: configuration as a dict """ if not config_file: raise EnvironmentError('Configuration file must be defined') if not os.path.exists(config_file): raise FileNotFoundError('Configuration file %s does not exist' % ( config_file, )) cfg = config.read(config_file) if 'indexer_storage' not in cfg: raise KeyError("Missing '%indexer_storage' configuration") if type == 'local': vcfg = cfg['indexer_storage'] cls = vcfg.get('cls') if cls != 'local': raise ValueError( "The indexer_storage backend can only be started with a " "'local' configuration") args = vcfg['args'] if not args.get('db'): raise ValueError( "Invalid configuration; missing 'db' config entry") return cfg def make_app_from_configfile(): """Run the WSGI app from the webserver, loading the configuration from a configuration file. SWH_CONFIG_FILENAME environment variable defines the configuration path to load. """ global api_cfg if not api_cfg: config_file = os.environ.get('SWH_CONFIG_FILENAME') api_cfg = load_and_check_config(config_file) app.config.update(api_cfg) handler = logging.StreamHandler() app.logger.addHandler(handler) return app if __name__ == '__main__': print('Deprecated. Use swh-indexer') diff --git a/swh/indexer/storage/db.py b/swh/indexer/storage/db.py index fb08d62..87ae355 100644 --- a/swh/indexer/storage/db.py +++ b/swh/indexer/storage/db.py @@ -1,455 +1,455 @@ # Copyright (C) 2015-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from swh.model import hashutil from swh.core.db import BaseDb from swh.core.db.db_utils import execute_values_generator, stored_procedure class Db(BaseDb): """Proxy to the SWH Indexer DB, with wrappers around stored procedures """ content_mimetype_hash_keys = ['id', 'indexer_configuration_id'] def _missing_from_list(self, table, data, hash_keys, cur=None): """Read from table the data with hash_keys that are missing. Args: table (str): Table name (e.g content_mimetype, content_language, etc...) data (dict): Dict of data to read from hash_keys ([str]): List of keys to read in the data dict. Yields: The data which is missing from the db. """ cur = self._cursor(cur) keys = ', '.join(hash_keys) equality = ' AND '.join( ('t.%s = c.%s' % (key, key)) for key in hash_keys ) yield from execute_values_generator( cur, """ select %s from (values %%s) as t(%s) where not exists ( select 1 from %s c where %s ) """ % (keys, keys, table, equality), (tuple(m[k] for k in hash_keys) for m in data) ) def content_mimetype_missing_from_list(self, mimetypes, cur=None): """List missing mimetypes. """ yield from self._missing_from_list( 'content_mimetype', mimetypes, self.content_mimetype_hash_keys, cur=cur) content_mimetype_cols = [ 'id', 'mimetype', 'encoding', 'tool_id', 'tool_name', 'tool_version', 'tool_configuration'] @stored_procedure('swh_mktemp_content_mimetype') def mktemp_content_mimetype(self, cur=None): pass def content_mimetype_add_from_temp(self, conflict_update, cur=None): self._cursor(cur).execute("SELECT swh_content_mimetype_add(%s)", (conflict_update, )) def _convert_key(self, key, main_table='c'): """Convert keys according to specific use in the module. Args: key (str): Key expression to change according to the alias used in the query main_table (str): Alias to use for the main table. Default to c for content_{something}. Expected: Tables content_{something} being aliased as 'c' (something in {language, mimetype, ...}), table indexer_configuration being aliased as 'i'. """ if key == 'id': return '%s.id' % main_table elif key == 'tool_id': return 'i.id as tool_id' elif key == 'licenses': return ''' array(select name from fossology_license where id = ANY( array_agg(%s.license_id))) as licenses''' % main_table return key def _get_from_list(self, table, ids, cols, cur=None, id_col='id'): """Fetches entries from the `table` such that their `id` field (or whatever is given to `id_col`) is in `ids`. Returns the columns `cols`. - The `cur`sor is used to connect to the database. + The `cur` parameter is used to connect to the database. """ cur = self._cursor(cur) keys = map(self._convert_key, cols) query = """ select {keys} from (values %s) as t(id) inner join {table} c on c.{id_col}=t.id inner join indexer_configuration i on c.indexer_configuration_id=i.id; """.format( keys=', '.join(keys), id_col=id_col, table=table) yield from execute_values_generator( cur, query, ((_id,) for _id in ids) ) content_indexer_names = { 'mimetype': 'content_mimetype', 'fossology_license': 'content_fossology_license', } def content_get_range(self, content_type, start, end, indexer_configuration_id, limit=1000, with_textual_data=False, cur=None): """Retrieve contents with content_type, within range [start, end] bound by limit and associated to the given indexer configuration id. When asking to work on textual content, that filters on the mimetype table with any mimetype that is not binary. """ cur = self._cursor(cur) table = self.content_indexer_names[content_type] if with_textual_data: extra = """inner join content_mimetype cm on (t.id=cm.id and cm.mimetype like 'text/%%')""" else: extra = "" query = """select t.id from %s t inner join indexer_configuration ic on t.indexer_configuration_id=ic.id %s where ic.id=%%s and %%s <= t.id and t.id <= %%s order by t.indexer_configuration_id, t.id limit %%s""" % (table, extra) cur.execute(query, (indexer_configuration_id, start, end, limit)) yield from cur def content_mimetype_get_from_list(self, ids, cur=None): yield from self._get_from_list( 'content_mimetype', ids, self.content_mimetype_cols, cur=cur) content_language_hash_keys = ['id', 'indexer_configuration_id'] def content_language_missing_from_list(self, languages, cur=None): """List missing languages. """ yield from self._missing_from_list( 'content_language', languages, self.content_language_hash_keys, cur=cur) content_language_cols = [ 'id', 'lang', 'tool_id', 'tool_name', 'tool_version', 'tool_configuration'] @stored_procedure('swh_mktemp_content_language') def mktemp_content_language(self, cur=None): pass def content_language_add_from_temp(self, conflict_update, cur=None): self._cursor(cur).execute("SELECT swh_content_language_add(%s)", (conflict_update, )) def content_language_get_from_list(self, ids, cur=None): yield from self._get_from_list( 'content_language', ids, self.content_language_cols, cur=cur) content_ctags_hash_keys = ['id', 'indexer_configuration_id'] def content_ctags_missing_from_list(self, ctags, cur=None): """List missing ctags. """ yield from self._missing_from_list( 'content_ctags', ctags, self.content_ctags_hash_keys, cur=cur) content_ctags_cols = [ 'id', 'name', 'kind', 'line', 'lang', 'tool_id', 'tool_name', 'tool_version', 'tool_configuration'] @stored_procedure('swh_mktemp_content_ctags') def mktemp_content_ctags(self, cur=None): pass def content_ctags_add_from_temp(self, conflict_update, cur=None): self._cursor(cur).execute("SELECT swh_content_ctags_add(%s)", (conflict_update, )) def content_ctags_get_from_list(self, ids, cur=None): cur = self._cursor(cur) keys = map(self._convert_key, self.content_ctags_cols) yield from execute_values_generator( cur, """ select %s from (values %%s) as t(id) inner join content_ctags c on c.id=t.id inner join indexer_configuration i on c.indexer_configuration_id=i.id order by line """ % ', '.join(keys), ((_id,) for _id in ids) ) def content_ctags_search(self, expression, last_sha1, limit, cur=None): cur = self._cursor(cur) if not last_sha1: query = """SELECT %s FROM swh_content_ctags_search(%%s, %%s)""" % ( ','.join(self.content_ctags_cols)) cur.execute(query, (expression, limit)) else: if last_sha1 and isinstance(last_sha1, bytes): last_sha1 = '\\x%s' % hashutil.hash_to_hex(last_sha1) elif last_sha1: last_sha1 = '\\x%s' % last_sha1 query = """SELECT %s FROM swh_content_ctags_search(%%s, %%s, %%s)""" % ( ','.join(self.content_ctags_cols)) cur.execute(query, (expression, limit, last_sha1)) yield from cur content_fossology_license_cols = [ 'id', 'tool_id', 'tool_name', 'tool_version', 'tool_configuration', 'licenses'] @stored_procedure('swh_mktemp_content_fossology_license') def mktemp_content_fossology_license(self, cur=None): pass def content_fossology_license_add_from_temp(self, conflict_update, cur=None): """Add new licenses per content. """ self._cursor(cur).execute( "SELECT swh_content_fossology_license_add(%s)", (conflict_update, )) def content_fossology_license_get_from_list(self, ids, cur=None): """Retrieve licenses per id. """ cur = self._cursor(cur) keys = map(self._convert_key, self.content_fossology_license_cols) yield from execute_values_generator( cur, """ select %s from (values %%s) as t(id) inner join content_fossology_license c on t.id=c.id inner join indexer_configuration i on i.id=c.indexer_configuration_id group by c.id, i.id, i.tool_name, i.tool_version, i.tool_configuration; """ % ', '.join(keys), ((_id,) for _id in ids) ) content_metadata_hash_keys = ['id', 'indexer_configuration_id'] def content_metadata_missing_from_list(self, metadata, cur=None): """List missing metadata. """ yield from self._missing_from_list( 'content_metadata', metadata, self.content_metadata_hash_keys, cur=cur) content_metadata_cols = [ 'id', 'metadata', 'tool_id', 'tool_name', 'tool_version', 'tool_configuration'] @stored_procedure('swh_mktemp_content_metadata') def mktemp_content_metadata(self, cur=None): pass def content_metadata_add_from_temp(self, conflict_update, cur=None): self._cursor(cur).execute("SELECT swh_content_metadata_add(%s)", (conflict_update, )) def content_metadata_get_from_list(self, ids, cur=None): yield from self._get_from_list( 'content_metadata', ids, self.content_metadata_cols, cur=cur) revision_intrinsic_metadata_hash_keys = [ 'id', 'indexer_configuration_id'] def revision_intrinsic_metadata_missing_from_list( self, metadata, cur=None): """List missing metadata. """ yield from self._missing_from_list( 'revision_intrinsic_metadata', metadata, self.revision_intrinsic_metadata_hash_keys, cur=cur) revision_intrinsic_metadata_cols = [ 'id', 'metadata', 'mappings', 'tool_id', 'tool_name', 'tool_version', 'tool_configuration'] @stored_procedure('swh_mktemp_revision_intrinsic_metadata') def mktemp_revision_intrinsic_metadata(self, cur=None): pass def revision_intrinsic_metadata_add_from_temp( self, conflict_update, cur=None): self._cursor(cur).execute( "SELECT swh_revision_intrinsic_metadata_add(%s)", (conflict_update, )) def revision_intrinsic_metadata_delete( self, entries, cur=None): cur = self._cursor(cur) cur.execute( "DELETE from revision_intrinsic_metadata " "WHERE (id, indexer_configuration_id) IN " " (VALUES %s)" % (', '.join('%s' for _ in entries)), tuple((e['id'], e['indexer_configuration_id']) for e in entries),) def revision_intrinsic_metadata_get_from_list(self, ids, cur=None): yield from self._get_from_list( 'revision_intrinsic_metadata', ids, self.revision_intrinsic_metadata_cols, cur=cur) origin_intrinsic_metadata_cols = [ 'id', 'metadata', 'from_revision', 'mappings', 'tool_id', 'tool_name', 'tool_version', 'tool_configuration'] origin_intrinsic_metadata_regconfig = 'pg_catalog.simple' """The dictionary used to normalize 'metadata' and queries. 'pg_catalog.simple' provides no stopword, so it should be suitable for proper names and non-English content. When updating this value, make sure to add a new index on origin_intrinsic_metadata.metadata.""" @stored_procedure('swh_mktemp_origin_intrinsic_metadata') def mktemp_origin_intrinsic_metadata(self, cur=None): pass def origin_intrinsic_metadata_add_from_temp( self, conflict_update, cur=None): cur = self._cursor(cur) cur.execute( "SELECT swh_origin_intrinsic_metadata_add(%s)", (conflict_update, )) def origin_intrinsic_metadata_delete( self, entries, cur=None): cur = self._cursor(cur) cur.execute( "DELETE from origin_intrinsic_metadata " "WHERE (id, indexer_configuration_id) IN" " (VALUES %s)" % (', '.join('%s' for _ in entries)), tuple((e['id'], e['indexer_configuration_id']) for e in entries),) def origin_intrinsic_metadata_get_from_list(self, ids, cur=None): yield from self._get_from_list( 'origin_intrinsic_metadata', ids, self.origin_intrinsic_metadata_cols, cur=cur, id_col='id') def origin_intrinsic_metadata_search_fulltext(self, terms, *, limit, cur): regconfig = self.origin_intrinsic_metadata_regconfig tsquery_template = ' && '.join("plainto_tsquery('%s', %%s)" % regconfig for _ in terms) tsquery_args = [(term,) for term in terms] keys = (self._convert_key(col, 'oim') for col in self.origin_intrinsic_metadata_cols) query = ("SELECT {keys} FROM origin_intrinsic_metadata AS oim " "INNER JOIN indexer_configuration AS i " "ON oim.indexer_configuration_id=i.id " "JOIN LATERAL (SELECT {tsquery_template}) AS s(tsq) ON true " "WHERE oim.metadata_tsvector @@ tsq " "ORDER BY ts_rank(oim.metadata_tsvector, tsq, 1) DESC " "LIMIT %s;" ).format(keys=', '.join(keys), tsquery_template=tsquery_template) cur.execute(query, tsquery_args + [limit]) yield from cur def origin_intrinsic_metadata_search_by_producer( self, last, limit, ids_only, mappings, tool_ids, cur): if ids_only: keys = 'oim.id' else: keys = ', '.join((self._convert_key(col, 'oim') for col in self.origin_intrinsic_metadata_cols)) query_parts = [ "SELECT %s" % keys, "FROM origin_intrinsic_metadata AS oim", "INNER JOIN indexer_configuration AS i", "ON oim.indexer_configuration_id=i.id", ] args = [] where = [] if last: where.append('oim.id > %s') args.append(last) if mappings is not None: where.append('oim.mappings && %s') args.append(mappings) if tool_ids is not None: where.append('oim.indexer_configuration_id = ANY(%s)') args.append(tool_ids) if where: query_parts.append('WHERE') query_parts.append(' AND '.join(where)) if limit: query_parts.append('LIMIT %s') args.append(limit) cur.execute(' '.join(query_parts), args) yield from cur indexer_configuration_cols = ['id', 'tool_name', 'tool_version', 'tool_configuration'] @stored_procedure('swh_mktemp_indexer_configuration') def mktemp_indexer_configuration(self, cur=None): pass def indexer_configuration_add_from_temp(self, cur=None): cur = self._cursor(cur) cur.execute("SELECT %s from swh_indexer_configuration_add()" % ( ','.join(self.indexer_configuration_cols), )) yield from cur def indexer_configuration_get(self, tool_name, tool_version, tool_configuration, cur=None): cur = self._cursor(cur) cur.execute('''select %s from indexer_configuration where tool_name=%%s and tool_version=%%s and tool_configuration=%%s''' % ( ','.join(self.indexer_configuration_cols)), (tool_name, tool_version, tool_configuration)) return cur.fetchone() diff --git a/swh/indexer/storage/in_memory.py b/swh/indexer/storage/in_memory.py index 0aed233..74a41a5 100644 --- a/swh/indexer/storage/in_memory.py +++ b/swh/indexer/storage/in_memory.py @@ -1,848 +1,421 @@ # Copyright (C) 2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import bisect from collections import defaultdict, Counter import itertools import json import operator import math import re from . import MAPPING_NAMES SHA1_DIGEST_SIZE = 160 def _transform_tool(tool): return { 'id': tool['id'], 'name': tool['tool_name'], 'version': tool['tool_version'], 'configuration': tool['tool_configuration'], } class SubStorage: """Implements common missing/get/add logic for each indexer type.""" def __init__(self, tools): self._tools = tools self._sorted_ids = [] self._data = {} # map (id_, tool_id) -> metadata_dict self._tools_per_id = defaultdict(set) # map id_ -> Set[tool_id] def missing(self, ids): """List data missing from storage. Args: data (iterable): dictionaries with keys: - **id** (bytes): sha1 identifier - **indexer_configuration_id** (int): tool used to compute the results Yields: missing sha1s """ for id_ in ids: tool_id = id_['indexer_configuration_id'] id_ = id_['id'] if tool_id not in self._tools_per_id.get(id_, set()): yield id_ def get(self, ids): """Retrieve data per id. Args: ids (iterable): sha1 checksums Yields: dict: dictionaries with the following keys: - **id** (bytes) - **tool** (dict): tool used to compute metadata - arbitrary data (as provided to `add`) """ for id_ in ids: for tool_id in self._tools_per_id.get(id_, set()): key = (id_, tool_id) yield { 'id': id_, 'tool': _transform_tool(self._tools[tool_id]), **self._data[key], } def get_all(self): yield from self.get(self._sorted_ids) def get_range(self, start, end, indexer_configuration_id, limit): """Retrieve data within range [start, end] bound by limit. Args: **start** (bytes): Starting identifier range (expected smaller than end) **end** (bytes): Ending identifier range (expected larger than start) **indexer_configuration_id** (int): The tool used to index data **limit** (int): Limit result Raises: ValueError for limit to None Returns: a dict with keys: - **ids** [bytes]: iterable of content ids within the range. - **next** (Optional[bytes]): The next range of sha1 starts at this sha1 if any """ if limit is None: raise ValueError('Development error: limit should not be None') from_index = bisect.bisect_left(self._sorted_ids, start) to_index = bisect.bisect_right(self._sorted_ids, end, lo=from_index) if to_index - from_index >= limit: return { 'ids': self._sorted_ids[from_index:from_index+limit], 'next': self._sorted_ids[from_index+limit], } else: return { 'ids': self._sorted_ids[from_index:to_index], 'next': None, } def add(self, data, conflict_update): """Add data not present in storage. Args: data (iterable): dictionaries with keys: - **id**: sha1 - **indexer_configuration_id**: tool used to compute the results - arbitrary data conflict_update (bool): Flag to determine if we want to overwrite (true) or skip duplicates (false) """ data = list(data) if len({x['id'] for x in data}) < len(data): # For "exception-compatibility" with the pgsql backend raise ValueError('The same id is present more than once.') for item in data: item = item.copy() tool_id = item.pop('indexer_configuration_id') id_ = item.pop('id') data = item if not conflict_update and \ tool_id in self._tools_per_id.get(id_, set()): # Duplicate, should not be updated continue key = (id_, tool_id) self._data[key] = data self._tools_per_id[id_].add(tool_id) if id_ not in self._sorted_ids: bisect.insort(self._sorted_ids, id_) def add_merge(self, new_data, conflict_update, merged_key): for new_item in new_data: id_ = new_item['id'] tool_id = new_item['indexer_configuration_id'] if conflict_update: all_subitems = [] else: existing = list(self.get([id_])) all_subitems = [ old_subitem for existing_item in existing if existing_item['tool']['id'] == tool_id for old_subitem in existing_item[merged_key] ] for new_subitem in new_item[merged_key]: if new_subitem not in all_subitems: all_subitems.append(new_subitem) self.add([ { 'id': id_, 'indexer_configuration_id': tool_id, merged_key: all_subitems, } ], conflict_update=True) if id_ not in self._sorted_ids: bisect.insort(self._sorted_ids, id_) def delete(self, entries): for entry in entries: (id_, tool_id) = (entry['id'], entry['indexer_configuration_id']) key = (id_, tool_id) if tool_id in self._tools_per_id[id_]: self._tools_per_id[id_].remove(tool_id) if key in self._data: del self._data[key] class IndexerStorage: """In-memory SWH indexer storage.""" def __init__(self): self._tools = {} self._mimetypes = SubStorage(self._tools) self._languages = SubStorage(self._tools) self._content_ctags = SubStorage(self._tools) self._licenses = SubStorage(self._tools) self._content_metadata = SubStorage(self._tools) self._revision_intrinsic_metadata = SubStorage(self._tools) self._origin_intrinsic_metadata = SubStorage(self._tools) def check_config(self, *, check_write): return True def content_mimetype_missing(self, mimetypes): - """Generate mimetypes missing from storage. - - Args: - mimetypes (iterable): iterable of dict with keys: - - - **id** (bytes): sha1 identifier - - **indexer_configuration_id** (int): tool used to compute the - results - - Yields: - tuple (id, indexer_configuration_id): missing id - - """ yield from self._mimetypes.missing(mimetypes) def content_mimetype_get_range( self, start, end, indexer_configuration_id, limit=1000): - """Retrieve mimetypes within range [start, end] bound by limit. - - Args: - **start** (bytes): Starting identifier range (expected smaller - than end) - **end** (bytes): Ending identifier range (expected larger - than start) - **indexer_configuration_id** (int): The tool used to index data - **limit** (int): Limit result (default to 1000) - - Raises: - ValueError for limit to None - - Returns: - a dict with keys: - - **ids** [bytes]: iterable of content ids within the range. - - **next** (Optional[bytes]): The next range of sha1 starts at - this sha1 if any - - """ return self._mimetypes.get_range( start, end, indexer_configuration_id, limit) def content_mimetype_add(self, mimetypes, conflict_update=False): - """Add mimetypes not present in storage. - - Args: - mimetypes (iterable): dictionaries with keys: - - - **id** (bytes): sha1 identifier - - **mimetype** (bytes): raw content's mimetype - - **encoding** (bytes): raw content's encoding - - **indexer_configuration_id** (int): tool's id used to - compute the results - - **conflict_update** (bool): Flag to determine if we want to - overwrite (``True``) or skip duplicates (``False``, the - default) - - """ if not all(isinstance(x['id'], bytes) for x in mimetypes): raise TypeError('identifiers must be bytes.') self._mimetypes.add(mimetypes, conflict_update) - def content_mimetype_get(self, ids, db=None, cur=None): - """Retrieve full content mimetype per ids. - - Args: - ids (iterable): sha1 identifier - - Yields: - mimetypes (iterable): dictionaries with keys: - - - **id** (bytes): sha1 identifier - - **mimetype** (bytes): raw content's mimetype - - **encoding** (bytes): raw content's encoding - - **tool** (dict): Tool used to compute the language - - """ + def content_mimetype_get(self, ids): yield from self._mimetypes.get(ids) def content_language_missing(self, languages): - """List languages missing from storage. - - Args: - languages (iterable): dictionaries with keys: - - - **id** (bytes): sha1 identifier - - **indexer_configuration_id** (int): tool used to compute - the results - - Yields: - an iterable of missing id for the tuple (id, - indexer_configuration_id) - - """ yield from self._languages.missing(languages) def content_language_get(self, ids): - """Retrieve full content language per ids. - - Args: - ids (iterable): sha1 identifier - - Yields: - languages (iterable): dictionaries with keys: - - - **id** (bytes): sha1 identifier - - **lang** (bytes): raw content's language - - **tool** (dict): Tool used to compute the language - - """ yield from self._languages.get(ids) def content_language_add(self, languages, conflict_update=False): - """Add languages not present in storage. - - Args: - languages (iterable): dictionaries with keys: - - - **id** (bytes): sha1 - - **lang** (bytes): language detected - - conflict_update (bool): Flag to determine if we want to - overwrite (true) or skip duplicates (false, the - default) - - """ if not all(isinstance(x['id'], bytes) for x in languages): raise TypeError('identifiers must be bytes.') self._languages.add(languages, conflict_update) def content_ctags_missing(self, ctags): - """List ctags missing from storage. - - Args: - ctags (iterable): dicts with keys: - - - **id** (bytes): sha1 identifier - - **indexer_configuration_id** (int): tool used to compute - the results - - Yields: - an iterable of missing id for the tuple (id, - indexer_configuration_id) - - """ yield from self._content_ctags.missing(ctags) def content_ctags_get(self, ids): - """Retrieve ctags per id. - - Args: - ids (iterable): sha1 checksums - - Yields: - Dictionaries with keys: - - - **id** (bytes): content's identifier - - **name** (str): symbol's name - - **kind** (str): symbol's kind - - **lang** (str): language for that content - - **tool** (dict): tool used to compute the ctags' info - - - """ for item in self._content_ctags.get(ids): for item_ctags_item in item['ctags']: yield { 'id': item['id'], 'tool': item['tool'], **item_ctags_item } def content_ctags_add(self, ctags, conflict_update=False): - """Add ctags not present in storage - - Args: - ctags (iterable): dictionaries with keys: - - - **id** (bytes): sha1 - - **ctags** ([list): List of dictionary with keys: name, kind, - line, lang - - **indexer_configuration_id**: tool used to compute the - results - - """ if not all(isinstance(x['id'], bytes) for x in ctags): raise TypeError('identifiers must be bytes.') self._content_ctags.add_merge(ctags, conflict_update, 'ctags') def content_ctags_search(self, expression, - limit=10, last_sha1=None, db=None, cur=None): - """Search through content's raw ctags symbols. - - Args: - expression (str): Expression to search for - limit (int): Number of rows to return (default to 10). - last_sha1 (str): Offset from which retrieving data (default to ''). - - Yields: - rows of ctags including id, name, lang, kind, line, etc... - - """ + limit=10, last_sha1=None): nb_matches = 0 for ((id_, tool_id), item) in \ sorted(self._content_ctags._data.items()): if id_ <= (last_sha1 or bytes(0 for _ in range(SHA1_DIGEST_SIZE))): continue for ctags_item in item['ctags']: if ctags_item['name'] != expression: continue nb_matches += 1 yield { 'id': id_, 'tool': _transform_tool(self._tools[tool_id]), **ctags_item } if nb_matches >= limit: return def content_fossology_license_get(self, ids): - """Retrieve licenses per id. - - Args: - ids (iterable): sha1 checksums - - Yields: - dict: ``{id: facts}`` where ``facts`` is a dict with the - following keys: - - - **licenses** ([str]): associated licenses for that content - - **tool** (dict): Tool used to compute the license - - """ # Rewrites the output of SubStorage.get from the old format to # the new one. SubStorage.get should be updated once all other # *_get methods use the new format. # See: https://forge.softwareheritage.org/T1433 res = {} for d in self._licenses.get(ids): res.setdefault(d.pop('id'), []).append(d) for (id_, facts) in res.items(): yield {id_: facts} def content_fossology_license_add(self, licenses, conflict_update=False): - """Add licenses not present in storage. - - Args: - licenses (iterable): dictionaries with keys: - - - **id**: sha1 - - **licenses** ([bytes]): List of licenses associated to sha1 - - **tool** (str): nomossa - - conflict_update: Flag to determine if we want to overwrite (true) - or skip duplicates (false, the default) - - Returns: - list: content_license entries which failed due to unknown licenses - - """ if not all(isinstance(x['id'], bytes) for x in licenses): raise TypeError('identifiers must be bytes.') self._licenses.add_merge(licenses, conflict_update, 'licenses') def content_fossology_license_get_range( self, start, end, indexer_configuration_id, limit=1000): - """Retrieve licenses within range [start, end] bound by limit. - - Args: - **start** (bytes): Starting identifier range (expected smaller - than end) - **end** (bytes): Ending identifier range (expected larger - than start) - **indexer_configuration_id** (int): The tool used to index data - **limit** (int): Limit result (default to 1000) - - Raises: - ValueError for limit to None - - Returns: - a dict with keys: - - **ids** [bytes]: iterable of content ids within the range. - - **next** (Optional[bytes]): The next range of sha1 starts at - this sha1 if any - - """ return self._licenses.get_range( start, end, indexer_configuration_id, limit) def content_metadata_missing(self, metadata): - """List metadata missing from storage. - - Args: - metadata (iterable): dictionaries with keys: - - - **id** (bytes): sha1 identifier - - **indexer_configuration_id** (int): tool used to compute - the results - - Yields: - missing sha1s - - """ yield from self._content_metadata.missing(metadata) def content_metadata_get(self, ids): - """Retrieve metadata per id. - - Args: - ids (iterable): sha1 checksums - - Yields: - dictionaries with the following keys: - - - **id** (bytes) - - **metadata** (str): associated metadata - - **tool** (dict): tool used to compute metadata - - """ yield from self._content_metadata.get(ids) def content_metadata_add(self, metadata, conflict_update=False): - """Add metadata not present in storage. - - Args: - metadata (iterable): dictionaries with keys: - - - **id**: sha1 - - **metadata**: arbitrary dict - - **indexer_configuration_id**: tool used to compute the - results - - conflict_update: Flag to determine if we want to overwrite (true) - or skip duplicates (false, the default) - - """ if not all(isinstance(x['id'], bytes) for x in metadata): raise TypeError('identifiers must be bytes.') self._content_metadata.add(metadata, conflict_update) def revision_intrinsic_metadata_missing(self, metadata): - """List metadata missing from storage. - - Args: - metadata (iterable): dictionaries with keys: - - - **id** (bytes): sha1_git revision identifier - - **indexer_configuration_id** (int): tool used to compute - the results - - Yields: - missing ids - - """ yield from self._revision_intrinsic_metadata.missing(metadata) def revision_intrinsic_metadata_get(self, ids): - """Retrieve revision metadata per id. - - Args: - ids (iterable): sha1 checksums - - Yields: - dictionaries with the following keys: - - - **id** (bytes) - - **metadata** (str): associated metadata - - **tool** (dict): tool used to compute metadata - - **mappings** (List[str]): list of mappings used to translate - these metadata - - """ yield from self._revision_intrinsic_metadata.get(ids) def revision_intrinsic_metadata_add(self, metadata, conflict_update=False): - """Add metadata not present in storage. - - Args: - metadata (iterable): dictionaries with keys: - - - **id**: sha1_git of revision - - **metadata**: arbitrary dict - - **indexer_configuration_id**: tool used to compute metadata - - **mappings** (List[str]): list of mappings used to translate - these metadata - - conflict_update: Flag to determine if we want to overwrite (true) - or skip duplicates (false, the default) - - """ if not all(isinstance(x['id'], bytes) for x in metadata): raise TypeError('identifiers must be bytes.') self._revision_intrinsic_metadata.add(metadata, conflict_update) def revision_intrinsic_metadata_delete(self, entries): - """Remove revision metadata from the storage. - - Args: - entries (dict): dictionaries with the following keys: - - **revision** (int): origin identifier - - **id** (int): tool used to compute metadata - """ self._revision_intrinsic_metadata.delete(entries) def origin_intrinsic_metadata_get(self, ids): - """Retrieve origin metadata per id. - - Args: - ids (iterable): origin identifiers - - Yields: - list: dictionaries with the following keys: - - - **id** (str): origin url - - **from_revision** (bytes): which revision this metadata - was extracted from - - **metadata** (str): associated metadata - - **tool** (dict): tool used to compute metadata - - **mappings** (List[str]): list of mappings used to translate - these metadata - - """ yield from self._origin_intrinsic_metadata.get(ids) def origin_intrinsic_metadata_add(self, metadata, conflict_update=False): - """Add origin metadata not present in storage. - - Args: - metadata (iterable): dictionaries with keys: - - - **id**: origin url - - **from_revision**: sha1 id of the revision used to generate - these metadata. - - **metadata**: arbitrary dict - - **indexer_configuration_id**: tool used to compute metadata - - **mappings** (List[str]): list of mappings used to translate - these metadata - - conflict_update: Flag to determine if we want to overwrite (true) - or skip duplicates (false, the default) - - """ self._origin_intrinsic_metadata.add(metadata, conflict_update) def origin_intrinsic_metadata_delete(self, entries): - """Remove origin metadata from the storage. - - Args: - entries (dict): dictionaries with the following keys: - - **id** (str): origin url - - **indexer_configuration_id** (int): tool used to compute - metadata - """ self._origin_intrinsic_metadata.delete(entries) def origin_intrinsic_metadata_search_fulltext( self, conjunction, limit=100): - """Returns the list of origins whose metadata contain all the terms. - - Args: - conjunction (List[str]): List of terms to be searched for. - limit (int): The maximum number of results to return - - Yields: - list: dictionaries with the following keys: - - - **id** (str): origin url - - **from_revision** (bytes): which revision this metadata - was extracted from - - **metadata** (str): associated metadata - - **tool** (dict): tool used to compute metadata - - **mappings** (List[str]): list of mappings used to translate - these metadata - - """ # A very crude fulltext search implementation, but that's enough # to work on English metadata tokens_re = re.compile('[a-zA-Z0-9]+') search_tokens = list(itertools.chain( *map(tokens_re.findall, conjunction))) def rank(data): # Tokenize the metadata text = json.dumps(data['metadata']) text_tokens = tokens_re.findall(text) text_token_occurences = Counter(text_tokens) # Count the number of occurrences of search tokens in the text score = 0 for search_token in search_tokens: if text_token_occurences[search_token] == 0: # Search token is not in the text. return 0 score += text_token_occurences[search_token] # Normalize according to the text's length return score / math.log(len(text_tokens)) results = [(rank(data), data) for data in self._origin_intrinsic_metadata.get_all()] results = [(rank_, data) for (rank_, data) in results if rank_ > 0] results.sort(key=operator.itemgetter(0), # Don't try to order 'data' reverse=True) for (rank_, result) in results[:limit]: yield result def origin_intrinsic_metadata_search_by_producer( self, page_token='', limit=100, ids_only=False, - mappings=None, tool_ids=None, - db=None, cur=None): - """Returns the list of origins whose metadata contain all the terms. - - Args: - page_token (str): Opaque token used for pagination. - limit (int): The maximum number of results to return - ids_only (bool): Determines whether only origin ids are returned - or the content as well - mappings (List[str]): Returns origins whose intrinsic metadata - were generated using at least one of these mappings. - - Returns: - dict: dict with the following keys: - - **next_page_token** (str, optional): opaque token to be used as - `page_token` for retrieveing the next page. - - **origins** (list): list of origin url (str) if `ids_only=True` - else dictionaries with the following keys: - - - **id** (str): origin urls - - **from_revision**: sha1 id of the revision used to generate - these metadata. - - **metadata** (str): associated metadata - - **tool** (dict): tool used to compute metadata - - **mappings** (List[str]): list of mappings used to translate - these metadata - - """ + mappings=None, tool_ids=None): assert isinstance(page_token, str) nb_results = 0 if mappings is not None: mappings = frozenset(mappings) if tool_ids is not None: tool_ids = frozenset(tool_ids) origins = [] - # we go to limit+1 to check wether we should add next_page_token in + # we go to limit+1 to check whether we should add next_page_token in # the response for entry in self._origin_intrinsic_metadata.get_all(): if entry['id'] <= page_token: continue if nb_results >= (limit + 1): break if mappings is not None and mappings.isdisjoint(entry['mappings']): continue if tool_ids is not None and entry['tool']['id'] not in tool_ids: continue origins.append(entry) nb_results += 1 result = {} if len(origins) > limit: origins = origins[:limit] result['next_page_token'] = origins[-1]['id'] if ids_only: origins = [origin['id'] for origin in origins] result['origins'] = origins return result def origin_intrinsic_metadata_stats(self): - """Returns statistics on stored intrinsic metadata. - - Returns: - dict: dictionary with keys: - - - total (int): total number of origins that were indexed - (possibly yielding an empty metadata dictionary) - - non_empty (int): total number of origins that we extracted - a non-empty metadata dictionary from - - per_mapping (dict): a dictionary with mapping names as - keys and number of origins whose indexing used this - mapping. Note that indexing a given origin may use - 0, 1, or many mappings. - """ mapping_count = {m: 0 for m in MAPPING_NAMES} total = non_empty = 0 for data in self._origin_intrinsic_metadata.get_all(): total += 1 if set(data['metadata']) - {'@context'}: non_empty += 1 for mapping in data['mappings']: mapping_count[mapping] += 1 return { 'per_mapping': mapping_count, 'total': total, 'non_empty': non_empty } def indexer_configuration_add(self, tools): - """Add new tools to the storage. - - Args: - tools ([dict]): List of dictionary representing tool to - insert in the db. Dictionary with the following keys: - - - **tool_name** (str): tool's name - - **tool_version** (str): tool's version - - **tool_configuration** (dict): tool's configuration - (free form dict) - - Returns: - list: List of dict inserted in the db (holding the id key as - well). The order of the list is not guaranteed to match - the order of the initial list. - - """ inserted = [] for tool in tools: tool = tool.copy() id_ = self._tool_key(tool) tool['id'] = id_ self._tools[id_] = tool inserted.append(tool) return inserted def indexer_configuration_get(self, tool): - """Retrieve tool information. - - Args: - tool (dict): Dictionary representing a tool with the - following keys: - - - **tool_name** (str): tool's name - - **tool_version** (str): tool's version - - **tool_configuration** (dict): tool's configuration - (free form dict) - - Returns: - The same dictionary with an `id` key, None otherwise. - - """ return self._tools.get(self._tool_key(tool)) def _tool_key(self, tool): return hash((tool['tool_name'], tool['tool_version'], json.dumps(tool['tool_configuration'], sort_keys=True))) diff --git a/swh/indexer/storage/__init__.py b/swh/indexer/storage/interface.py similarity index 54% copy from swh/indexer/storage/__init__.py copy to swh/indexer/storage/interface.py index a8d557f..a884f1c 100644 --- a/swh/indexer/storage/__init__.py +++ b/swh/indexer/storage/interface.py @@ -1,936 +1,600 @@ -# Copyright (C) 2015-2018 The Software Heritage developers +# Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information -import json -import psycopg2 - -from collections import defaultdict - from swh.core.api import remote_api_endpoint -from swh.storage.common import db_transaction_generator, db_transaction -from swh.storage.exc import StorageDBError -from .db import Db - -from . import converters - - -INDEXER_CFG_KEY = 'indexer_storage' - - -MAPPING_NAMES = ['codemeta', 'gemspec', 'maven', 'npm', 'pkg-info'] - - -def get_indexer_storage(cls, args): - """Get an indexer storage object of class `storage_class` with - arguments `storage_args`. - - Args: - cls (str): storage's class, either 'local' or 'remote' - args (dict): dictionary of arguments passed to the - storage class constructor - - Returns: - an instance of swh.indexer's storage (either local or remote) - Raises: - ValueError if passed an unknown storage class. - - """ - if cls == 'remote': - from .api.client import RemoteStorage as IndexerStorage - elif cls == 'local': - from . import IndexerStorage - elif cls == 'memory': - from .in_memory import IndexerStorage - else: - raise ValueError('Unknown indexer storage class `%s`' % cls) - - return IndexerStorage(**args) - - -def _check_id_duplicates(data): - """ - If any two dictionaries in `data` have the same id, raises - a `ValueError`. - - Values associated to the key must be hashable. - - Args: - data (List[dict]): List of dictionaries to be inserted - - >>> _check_id_duplicates([ - ... {'id': 'foo', 'data': 'spam'}, - ... {'id': 'bar', 'data': 'egg'}, - ... ]) - >>> _check_id_duplicates([ - ... {'id': 'foo', 'data': 'spam'}, - ... {'id': 'foo', 'data': 'egg'}, - ... ]) - Traceback (most recent call last): - ... - ValueError: The same id is present more than once. - """ - if len({item['id'] for item in data}) < len(data): - raise ValueError('The same id is present more than once.') - - -class IndexerStorage: - """SWH Indexer Storage - - """ - def __init__(self, db, min_pool_conns=1, max_pool_conns=10): - """ - Args: - db_conn: either a libpq connection string, or a psycopg2 connection - - """ - try: - if isinstance(db, psycopg2.extensions.connection): - self._pool = None - self._db = Db(db) - else: - self._pool = psycopg2.pool.ThreadedConnectionPool( - min_pool_conns, max_pool_conns, db - ) - self._db = None - except psycopg2.OperationalError as e: - raise StorageDBError(e) - - def get_db(self): - if self._db: - return self._db - return Db.from_pool(self._pool) - - def put_db(self, db): - if db is not self._db: - db.put_conn() +class IndexerStorageInterface: @remote_api_endpoint('check_config') - @db_transaction() - def check_config(self, *, check_write, db=None, cur=None): + def check_config(self, *, check_write): """Check that the storage is configured and ready to go.""" - # Check permissions on one of the tables - if check_write: - check = 'INSERT' - else: - check = 'SELECT' - - cur.execute( - "select has_table_privilege(current_user, 'content_mimetype', %s)", # noqa - (check,) - ) - return cur.fetchone()[0] + ... @remote_api_endpoint('content_mimetype/missing') - @db_transaction_generator() - def content_mimetype_missing(self, mimetypes, db=None, cur=None): + def content_mimetype_missing(self, mimetypes): """Generate mimetypes missing from storage. Args: mimetypes (iterable): iterable of dict with keys: - **id** (bytes): sha1 identifier - **indexer_configuration_id** (int): tool used to compute the results Yields: tuple (id, indexer_configuration_id): missing id """ - for obj in db.content_mimetype_missing_from_list(mimetypes, cur): - yield obj[0] + ... def _content_get_range(self, content_type, start, end, indexer_configuration_id, limit=1000, - with_textual_data=False, - db=None, cur=None): + with_textual_data=False): """Retrieve ids of type content_type within range [start, end] bound by limit. Args: **content_type** (str): content's type (mimetype, language, etc...) **start** (bytes): Starting identifier range (expected smaller than end) **end** (bytes): Ending identifier range (expected larger than start) **indexer_configuration_id** (int): The tool used to index data **limit** (int): Limit result (default to 1000) **with_textual_data** (bool): Deal with only textual content (True) or all content (all contents by defaults, False) Raises: ValueError for; - limit to None - wrong content_type provided Returns: a dict with keys: - **ids** [bytes]: iterable of content ids within the range. - **next** (Optional[bytes]): The next range of sha1 starts at this sha1 if any """ - if limit is None: - raise ValueError('Development error: limit should not be None') - if content_type not in db.content_indexer_names: - err = 'Development error: Wrong type. Should be one of [%s]' % ( - ','.join(db.content_indexer_names)) - raise ValueError(err) - - ids = [] - next_id = None - for counter, obj in enumerate(db.content_get_range( - content_type, start, end, indexer_configuration_id, - limit=limit+1, with_textual_data=with_textual_data, cur=cur)): - _id = obj[0] - if counter >= limit: - next_id = _id - break - - ids.append(_id) - - return { - 'ids': ids, - 'next': next_id - } + ... @remote_api_endpoint('content_mimetype/range') - @db_transaction() def content_mimetype_get_range(self, start, end, indexer_configuration_id, - limit=1000, db=None, cur=None): + limit=1000): """Retrieve mimetypes within range [start, end] bound by limit. Args: **start** (bytes): Starting identifier range (expected smaller than end) **end** (bytes): Ending identifier range (expected larger than start) **indexer_configuration_id** (int): The tool used to index data **limit** (int): Limit result (default to 1000) Raises: ValueError for limit to None Returns: a dict with keys: - **ids** [bytes]: iterable of content ids within the range. - **next** (Optional[bytes]): The next range of sha1 starts at this sha1 if any """ - return self._content_get_range('mimetype', start, end, - indexer_configuration_id, limit=limit, - db=db, cur=cur) + ... @remote_api_endpoint('content_mimetype/add') - @db_transaction() - def content_mimetype_add(self, mimetypes, conflict_update=False, db=None, - cur=None): + def content_mimetype_add(self, mimetypes, conflict_update=False): """Add mimetypes not present in storage. Args: mimetypes (iterable): dictionaries with keys: - **id** (bytes): sha1 identifier - **mimetype** (bytes): raw content's mimetype - **encoding** (bytes): raw content's encoding - **indexer_configuration_id** (int): tool's id used to compute the results - **conflict_update** (bool): Flag to determine if we want to overwrite (``True``) or skip duplicates (``False``, the default) """ - _check_id_duplicates(mimetypes) - mimetypes.sort(key=lambda m: m['id']) - db.mktemp_content_mimetype(cur) - db.copy_to(mimetypes, 'tmp_content_mimetype', - ['id', 'mimetype', 'encoding', 'indexer_configuration_id'], - cur) - db.content_mimetype_add_from_temp(conflict_update, cur) + ... @remote_api_endpoint('content_mimetype') - @db_transaction_generator() - def content_mimetype_get(self, ids, db=None, cur=None): + def content_mimetype_get(self, ids): """Retrieve full content mimetype per ids. Args: ids (iterable): sha1 identifier Yields: mimetypes (iterable): dictionaries with keys: - **id** (bytes): sha1 identifier - **mimetype** (bytes): raw content's mimetype - **encoding** (bytes): raw content's encoding - **tool** (dict): Tool used to compute the language """ - for c in db.content_mimetype_get_from_list(ids, cur): - yield converters.db_to_mimetype( - dict(zip(db.content_mimetype_cols, c))) + ... @remote_api_endpoint('content_language/missing') - @db_transaction_generator() - def content_language_missing(self, languages, db=None, cur=None): + def content_language_missing(self, languages): """List languages missing from storage. Args: languages (iterable): dictionaries with keys: - **id** (bytes): sha1 identifier - **indexer_configuration_id** (int): tool used to compute the results Yields: an iterable of missing id for the tuple (id, indexer_configuration_id) """ - for obj in db.content_language_missing_from_list(languages, cur): - yield obj[0] + ... @remote_api_endpoint('content_language') - @db_transaction_generator() - def content_language_get(self, ids, db=None, cur=None): + def content_language_get(self, ids): """Retrieve full content language per ids. Args: ids (iterable): sha1 identifier Yields: languages (iterable): dictionaries with keys: - **id** (bytes): sha1 identifier - **lang** (bytes): raw content's language - **tool** (dict): Tool used to compute the language """ - for c in db.content_language_get_from_list(ids, cur): - yield converters.db_to_language( - dict(zip(db.content_language_cols, c))) + ... @remote_api_endpoint('content_language/add') - @db_transaction() - def content_language_add(self, languages, conflict_update=False, db=None, - cur=None): + def content_language_add(self, languages, conflict_update=False): """Add languages not present in storage. Args: languages (iterable): dictionaries with keys: - **id** (bytes): sha1 - **lang** (bytes): language detected conflict_update (bool): Flag to determine if we want to overwrite (true) or skip duplicates (false, the default) """ - _check_id_duplicates(languages) - languages.sort(key=lambda m: m['id']) - db.mktemp_content_language(cur) - # empty language is mapped to 'unknown' - db.copy_to( - ({ - 'id': l['id'], - 'lang': 'unknown' if not l['lang'] else l['lang'], - 'indexer_configuration_id': l['indexer_configuration_id'], - } for l in languages), - 'tmp_content_language', - ['id', 'lang', 'indexer_configuration_id'], cur) - - db.content_language_add_from_temp(conflict_update, cur) + ... @remote_api_endpoint('content/ctags/missing') - @db_transaction_generator() - def content_ctags_missing(self, ctags, db=None, cur=None): + def content_ctags_missing(self, ctags): """List ctags missing from storage. Args: ctags (iterable): dicts with keys: - **id** (bytes): sha1 identifier - **indexer_configuration_id** (int): tool used to compute the results Yields: an iterable of missing id for the tuple (id, indexer_configuration_id) """ - for obj in db.content_ctags_missing_from_list(ctags, cur): - yield obj[0] + ... @remote_api_endpoint('content/ctags') - @db_transaction_generator() - def content_ctags_get(self, ids, db=None, cur=None): + def content_ctags_get(self, ids): """Retrieve ctags per id. Args: ids (iterable): sha1 checksums Yields: Dictionaries with keys: - **id** (bytes): content's identifier - **name** (str): symbol's name - **kind** (str): symbol's kind - **lang** (str): language for that content - **tool** (dict): tool used to compute the ctags' info """ - for c in db.content_ctags_get_from_list(ids, cur): - yield converters.db_to_ctags(dict(zip(db.content_ctags_cols, c))) + ... @remote_api_endpoint('content/ctags/add') - @db_transaction() - def content_ctags_add(self, ctags, conflict_update=False, db=None, - cur=None): + def content_ctags_add(self, ctags, conflict_update=False): """Add ctags not present in storage Args: ctags (iterable): dictionaries with keys: - **id** (bytes): sha1 - **ctags** ([list): List of dictionary with keys: name, kind, line, lang """ - _check_id_duplicates(ctags) - ctags.sort(key=lambda m: m['id']) - - def _convert_ctags(__ctags): - """Convert ctags dict to list of ctags. - - """ - for ctags in __ctags: - yield from converters.ctags_to_db(ctags) - - db.mktemp_content_ctags(cur) - db.copy_to(list(_convert_ctags(ctags)), - tblname='tmp_content_ctags', - columns=['id', 'name', 'kind', 'line', - 'lang', 'indexer_configuration_id'], - cur=cur) - - db.content_ctags_add_from_temp(conflict_update, cur) + ... @remote_api_endpoint('content/ctags/search') - @db_transaction_generator() def content_ctags_search(self, expression, - limit=10, last_sha1=None, db=None, cur=None): + limit=10, last_sha1=None): """Search through content's raw ctags symbols. Args: expression (str): Expression to search for limit (int): Number of rows to return (default to 10). last_sha1 (str): Offset from which retrieving data (default to ''). Yields: rows of ctags including id, name, lang, kind, line, etc... """ - for obj in db.content_ctags_search(expression, last_sha1, limit, - cur=cur): - yield converters.db_to_ctags(dict(zip(db.content_ctags_cols, obj))) + ... @remote_api_endpoint('content/fossology_license') - @db_transaction_generator() - def content_fossology_license_get(self, ids, db=None, cur=None): + def content_fossology_license_get(self, ids): """Retrieve licenses per id. Args: ids (iterable): sha1 checksums Yields: dict: ``{id: facts}`` where ``facts`` is a dict with the following keys: - **licenses** ([str]): associated licenses for that content - **tool** (dict): Tool used to compute the license """ - d = defaultdict(list) - for c in db.content_fossology_license_get_from_list(ids, cur): - license = dict(zip(db.content_fossology_license_cols, c)) - - id_ = license['id'] - d[id_].append(converters.db_to_fossology_license(license)) - - for id_, facts in d.items(): - yield {id_: facts} + ... @remote_api_endpoint('content/fossology_license/add') - @db_transaction() - def content_fossology_license_add(self, licenses, conflict_update=False, - db=None, cur=None): + def content_fossology_license_add(self, licenses, conflict_update=False): """Add licenses not present in storage. Args: licenses (iterable): dictionaries with keys: - **id**: sha1 - **licenses** ([bytes]): List of licenses associated to sha1 - **tool** (str): nomossa conflict_update: Flag to determine if we want to overwrite (true) or skip duplicates (false, the default) Returns: list: content_license entries which failed due to unknown licenses """ - _check_id_duplicates(licenses) - licenses.sort(key=lambda m: m['id']) - db.mktemp_content_fossology_license(cur) - db.copy_to( - ({ - 'id': sha1['id'], - 'indexer_configuration_id': sha1['indexer_configuration_id'], - 'license': license, - } for sha1 in licenses - for license in sha1['licenses']), - tblname='tmp_content_fossology_license', - columns=['id', 'license', 'indexer_configuration_id'], - cur=cur) - db.content_fossology_license_add_from_temp(conflict_update, cur) + ... @remote_api_endpoint('content/fossology_license/range') - @db_transaction() def content_fossology_license_get_range( self, start, end, indexer_configuration_id, - limit=1000, db=None, cur=None): + limit=1000): """Retrieve licenses within range [start, end] bound by limit. Args: **start** (bytes): Starting identifier range (expected smaller than end) **end** (bytes): Ending identifier range (expected larger than start) **indexer_configuration_id** (int): The tool used to index data **limit** (int): Limit result (default to 1000) Raises: ValueError for limit to None Returns: a dict with keys: - **ids** [bytes]: iterable of content ids within the range. - **next** (Optional[bytes]): The next range of sha1 starts at this sha1 if any """ - return self._content_get_range('fossology_license', start, end, - indexer_configuration_id, limit=limit, - with_textual_data=True, db=db, cur=cur) + ... @remote_api_endpoint('content_metadata/missing') - @db_transaction_generator() - def content_metadata_missing(self, metadata, db=None, cur=None): + def content_metadata_missing(self, metadata): """List metadata missing from storage. Args: metadata (iterable): dictionaries with keys: - **id** (bytes): sha1 identifier - **indexer_configuration_id** (int): tool used to compute the results Yields: missing sha1s """ - for obj in db.content_metadata_missing_from_list(metadata, cur): - yield obj[0] + ... @remote_api_endpoint('content_metadata') - @db_transaction_generator() - def content_metadata_get(self, ids, db=None, cur=None): + def content_metadata_get(self, ids): """Retrieve metadata per id. Args: ids (iterable): sha1 checksums Yields: dictionaries with the following keys: id (bytes) metadata (str): associated metadata tool (dict): tool used to compute metadata """ - for c in db.content_metadata_get_from_list(ids, cur): - yield converters.db_to_metadata( - dict(zip(db.content_metadata_cols, c))) + ... @remote_api_endpoint('content_metadata/add') - @db_transaction() - def content_metadata_add(self, metadata, conflict_update=False, db=None, - cur=None): + def content_metadata_add(self, metadata, conflict_update=False): """Add metadata not present in storage. Args: metadata (iterable): dictionaries with keys: - **id**: sha1 - **metadata**: arbitrary dict conflict_update: Flag to determine if we want to overwrite (true) or skip duplicates (false, the default) """ - _check_id_duplicates(metadata) - metadata.sort(key=lambda m: m['id']) - - db.mktemp_content_metadata(cur) - - db.copy_to(metadata, 'tmp_content_metadata', - ['id', 'metadata', 'indexer_configuration_id'], - cur) - db.content_metadata_add_from_temp(conflict_update, cur) + ... @remote_api_endpoint('revision_intrinsic_metadata/missing') - @db_transaction_generator() - def revision_intrinsic_metadata_missing(self, metadata, db=None, cur=None): + def revision_intrinsic_metadata_missing(self, metadata): """List metadata missing from storage. Args: metadata (iterable): dictionaries with keys: - **id** (bytes): sha1_git revision identifier - **indexer_configuration_id** (int): tool used to compute the results Yields: missing ids """ - for obj in db.revision_intrinsic_metadata_missing_from_list( - metadata, cur): - yield obj[0] + ... @remote_api_endpoint('revision_intrinsic_metadata') - @db_transaction_generator() - def revision_intrinsic_metadata_get(self, ids, db=None, cur=None): + def revision_intrinsic_metadata_get(self, ids): """Retrieve revision metadata per id. Args: ids (iterable): sha1 checksums Yields: - dictionaries with the following keys: + : dictionaries with the following keys: - **id** (bytes) - **metadata** (str): associated metadata - **tool** (dict): tool used to compute metadata - **mappings** (List[str]): list of mappings used to translate these metadata """ - for c in db.revision_intrinsic_metadata_get_from_list(ids, cur): - yield converters.db_to_metadata( - dict(zip(db.revision_intrinsic_metadata_cols, c))) + ... @remote_api_endpoint('revision_intrinsic_metadata/add') - @db_transaction() - def revision_intrinsic_metadata_add(self, metadata, conflict_update=False, - db=None, cur=None): + def revision_intrinsic_metadata_add(self, metadata, conflict_update=False): """Add metadata not present in storage. Args: metadata (iterable): dictionaries with keys: - **id**: sha1_git of revision - **metadata**: arbitrary dict - **indexer_configuration_id**: tool used to compute metadata - **mappings** (List[str]): list of mappings used to translate these metadata conflict_update: Flag to determine if we want to overwrite (true) or skip duplicates (false, the default) """ - _check_id_duplicates(metadata) - metadata.sort(key=lambda m: m['id']) - - db.mktemp_revision_intrinsic_metadata(cur) - - db.copy_to(metadata, 'tmp_revision_intrinsic_metadata', - ['id', 'metadata', 'mappings', - 'indexer_configuration_id'], - cur) - db.revision_intrinsic_metadata_add_from_temp(conflict_update, cur) + ... @remote_api_endpoint('revision_intrinsic_metadata/delete') - @db_transaction() - def revision_intrinsic_metadata_delete(self, entries, db=None, cur=None): + def revision_intrinsic_metadata_delete(self, entries): """Remove revision metadata from the storage. Args: entries (dict): dictionaries with the following keys: + - **id** (bytes): revision identifier - **indexer_configuration_id** (int): tool used to compute metadata """ - db.revision_intrinsic_metadata_delete(entries, cur) + ... @remote_api_endpoint('origin_intrinsic_metadata') - @db_transaction_generator() - def origin_intrinsic_metadata_get(self, ids, db=None, cur=None): + def origin_intrinsic_metadata_get(self, ids): """Retrieve origin metadata per id. Args: ids (iterable): origin identifiers Yields: list: dictionaries with the following keys: - **id** (str): origin url - **from_revision** (bytes): which revision this metadata was extracted from - **metadata** (str): associated metadata - **tool** (dict): tool used to compute metadata - **mappings** (List[str]): list of mappings used to translate these metadata """ - for c in db.origin_intrinsic_metadata_get_from_list(ids, cur): - yield converters.db_to_metadata( - dict(zip(db.origin_intrinsic_metadata_cols, c))) + ... @remote_api_endpoint('origin_intrinsic_metadata/add') - @db_transaction() def origin_intrinsic_metadata_add(self, metadata, - conflict_update=False, db=None, - cur=None): + conflict_update=False): """Add origin metadata not present in storage. Args: metadata (iterable): dictionaries with keys: - **id**: origin urls - **from_revision**: sha1 id of the revision used to generate these metadata. - **metadata**: arbitrary dict - **indexer_configuration_id**: tool used to compute metadata - **mappings** (List[str]): list of mappings used to translate these metadata conflict_update: Flag to determine if we want to overwrite (true) or skip duplicates (false, the default) """ - _check_id_duplicates(metadata) - metadata.sort(key=lambda m: m['id']) - - db.mktemp_origin_intrinsic_metadata(cur) - - db.copy_to(metadata, 'tmp_origin_intrinsic_metadata', - ['id', 'metadata', - 'indexer_configuration_id', - 'from_revision', 'mappings'], - cur) - db.origin_intrinsic_metadata_add_from_temp(conflict_update, cur) + ... @remote_api_endpoint('origin_intrinsic_metadata/delete') - @db_transaction() def origin_intrinsic_metadata_delete( - self, entries, db=None, cur=None): + self, entries): """Remove origin metadata from the storage. Args: entries (dict): dictionaries with the following keys: + - **id** (str): origin urls - **indexer_configuration_id** (int): tool used to compute metadata """ - db.origin_intrinsic_metadata_delete(entries, cur) + ... @remote_api_endpoint('origin_intrinsic_metadata/search/fulltext') - @db_transaction_generator() def origin_intrinsic_metadata_search_fulltext( - self, conjunction, limit=100, db=None, cur=None): + self, conjunction, limit=100): """Returns the list of origins whose metadata contain all the terms. Args: conjunction (List[str]): List of terms to be searched for. limit (int): The maximum number of results to return Yields: list: dictionaries with the following keys: - **id** (str): origin urls - **from_revision**: sha1 id of the revision used to generate these metadata. - **metadata** (str): associated metadata - **tool** (dict): tool used to compute metadata - **mappings** (List[str]): list of mappings used to translate these metadata """ - for c in db.origin_intrinsic_metadata_search_fulltext( - conjunction, limit=limit, cur=cur): - yield converters.db_to_metadata( - dict(zip(db.origin_intrinsic_metadata_cols, c))) + ... @remote_api_endpoint('origin_intrinsic_metadata/search/by_producer') - @db_transaction() def origin_intrinsic_metadata_search_by_producer( self, page_token='', limit=100, ids_only=False, - mappings=None, tool_ids=None, - db=None, cur=None): + mappings=None, tool_ids=None): """Returns the list of origins whose metadata contain all the terms. Args: page_token (str): Opaque token used for pagination. limit (int): The maximum number of results to return ids_only (bool): Determines whether only origin urls are returned or the content as well mappings (List[str]): Returns origins whose intrinsic metadata were generated using at least one of these mappings. Returns: dict: dict with the following keys: - **next_page_token** (str, optional): opaque token to be used as `page_token` for retrieving the next page. If absent, there is no more pages to gather. - **origins** (list): list of origin url (str) if `ids_only=True` else dictionaries with the following keys: - **id** (str): origin urls - **from_revision**: sha1 id of the revision used to generate these metadata. - **metadata** (str): associated metadata - **tool** (dict): tool used to compute metadata - **mappings** (List[str]): list of mappings used to translate these metadata """ - assert isinstance(page_token, str) - # we go to limit+1 to check wether we should add next_page_token in - # the response - res = db.origin_intrinsic_metadata_search_by_producer( - page_token, limit + 1, ids_only, mappings, tool_ids, cur) - result = {} - if ids_only: - result['origins'] = [origin for (origin,) in res] - if len(result['origins']) > limit: - result['origins'][limit:] = [] - result['next_page_token'] = result['origins'][-1] - else: - result['origins'] = [converters.db_to_metadata( - dict(zip(db.origin_intrinsic_metadata_cols, c)))for c in res] - if len(result['origins']) > limit: - result['origins'][limit:] = [] - result['next_page_token'] = result['origins'][-1]['id'] - return result + ... @remote_api_endpoint('origin_intrinsic_metadata/stats') - @db_transaction() def origin_intrinsic_metadata_stats( - self, db=None, cur=None): + self): """Returns counts of indexed metadata per origins, broken down into metadata types. Returns: dict: dictionary with keys: - total (int): total number of origins that were indexed (possibly yielding an empty metadata dictionary) - non_empty (int): total number of origins that we extracted a non-empty metadata dictionary from - per_mapping (dict): a dictionary with mapping names as keys and number of origins whose indexing used this mapping. Note that indexing a given origin may use 0, 1, or many mappings. """ - mapping_names = [m for m in MAPPING_NAMES] - select_parts = [] - - # Count rows for each mapping - for mapping_name in mapping_names: - select_parts.append(( - "sum(case when (mappings @> ARRAY['%s']) " - " then 1 else 0 end)" - ) % mapping_name) - - # Total - select_parts.append("sum(1)") - - # Rows whose metadata has at least one key that is not '@context' - select_parts.append( - "sum(case when ('{}'::jsonb @> (metadata - '@context')) " - " then 0 else 1 end)") - cur.execute('select ' + ', '.join(select_parts) - + ' from origin_intrinsic_metadata') - results = dict(zip(mapping_names + ['total', 'non_empty'], - cur.fetchone())) - return { - 'total': results.pop('total'), - 'non_empty': results.pop('non_empty'), - 'per_mapping': results, - } + ... @remote_api_endpoint('indexer_configuration/add') - @db_transaction_generator() - def indexer_configuration_add(self, tools, db=None, cur=None): + def indexer_configuration_add(self, tools): """Add new tools to the storage. Args: tools ([dict]): List of dictionary representing tool to insert in the db. Dictionary with the following keys: - **tool_name** (str): tool's name - **tool_version** (str): tool's version - **tool_configuration** (dict): tool's configuration (free form dict) Returns: List of dict inserted in the db (holding the id key as well). The order of the list is not guaranteed to match the order of the initial list. """ - db.mktemp_indexer_configuration(cur) - db.copy_to(tools, 'tmp_indexer_configuration', - ['tool_name', 'tool_version', 'tool_configuration'], - cur) - - tools = db.indexer_configuration_add_from_temp(cur) - for line in tools: - yield dict(zip(db.indexer_configuration_cols, line)) + ... @remote_api_endpoint('indexer_configuration/data') - @db_transaction() - def indexer_configuration_get(self, tool, db=None, cur=None): + def indexer_configuration_get(self, tool): """Retrieve tool information. Args: tool (dict): Dictionary representing a tool with the following keys: - **tool_name** (str): tool's name - **tool_version** (str): tool's version - **tool_configuration** (dict): tool's configuration (free form dict) Returns: The same dictionary with an `id` key, None otherwise. """ - tool_conf = tool['tool_configuration'] - if isinstance(tool_conf, dict): - tool_conf = json.dumps(tool_conf) - idx = db.indexer_configuration_get(tool['tool_name'], - tool['tool_version'], - tool_conf) - if not idx: - return None - return dict(zip(db.indexer_configuration_cols, idx)) + ... diff --git a/swh/indexer/tests/conftest.py b/swh/indexer/tests/conftest.py index 78f1975..3501188 100644 --- a/swh/indexer/tests/conftest.py +++ b/swh/indexer/tests/conftest.py @@ -1,71 +1,81 @@ +# Copyright (C) 2019-2020 The Software Heritage developers +# See the AUTHORS file at the top-level directory of this distribution +# License: GNU General Public License version 3, or any later version +# See top-level LICENSE file for more information + from datetime import timedelta from unittest.mock import patch import pytest from swh.objstorage import get_objstorage from swh.scheduler.tests.conftest import * # noqa -from swh.storage.in_memory import Storage - -from swh.indexer.storage.in_memory import IndexerStorage +from swh.storage import get_storage +from swh.indexer.storage import get_indexer_storage from .utils import fill_storage, fill_obj_storage TASK_NAMES = ['revision_intrinsic_metadata', 'origin_intrinsic_metadata'] @pytest.fixture def indexer_scheduler(swh_scheduler): for taskname in TASK_NAMES: swh_scheduler.create_task_type({ 'type': taskname, 'description': 'The {} indexer testing task'.format(taskname), 'backend_name': 'swh.indexer.tests.tasks.{}'.format(taskname), 'default_interval': timedelta(days=1), 'min_interval': timedelta(hours=6), 'max_interval': timedelta(days=12), 'num_retries': 3, }) return swh_scheduler @pytest.fixture def idx_storage(): - """An instance of swh.indexer.storage.in_memory.IndexerStorage that - gets injected into all indexers classes.""" - idx_storage = IndexerStorage() + """An instance of in-memory indexer storage that gets injected into all + indexers classes. + + """ + idx_storage = get_indexer_storage('memory', {}) with patch('swh.indexer.storage.in_memory.IndexerStorage') \ as idx_storage_mock: idx_storage_mock.return_value = idx_storage yield idx_storage @pytest.fixture def storage(): - """An instance of swh.storage.in_memory.Storage that gets injected - into all indexers classes.""" - storage = Storage() + """An instance of in-memory storage that gets injected into all indexers + classes. + + """ + storage = get_storage('memory') fill_storage(storage) - with patch('swh.storage.in_memory.Storage') as storage_mock: + with patch('swh.storage.in_memory.InMemoryStorage') as storage_mock: storage_mock.return_value = storage yield storage @pytest.fixture def obj_storage(): - """An instance of swh.objstorage.objstorage_in_memory.InMemoryObjStorage - that gets injected into all indexers classes.""" + """An instance of in-memory objstorage that gets injected into all indexers + classes. + + """ objstorage = get_objstorage('memory', {}) fill_obj_storage(objstorage) with patch.dict('swh.objstorage._STORAGE_CLASSES', {'memory': lambda: objstorage}): yield objstorage @pytest.fixture(scope='session') # type: ignore # expected redefinition def celery_includes(): return [ 'swh.indexer.tests.tasks', 'swh.indexer.tasks', ] diff --git a/swh/indexer/tests/storage/test_server.py b/swh/indexer/tests/storage/test_server.py index f4055bb..9130e1f 100644 --- a/swh/indexer/tests/storage/test_server.py +++ b/swh/indexer/tests/storage/test_server.py @@ -1,123 +1,123 @@ # Copyright (C) 2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import pytest import yaml from swh.indexer.storage.api.server import load_and_check_config def prepare_config_file(tmpdir, content, name='config.yml'): """Prepare configuration file in `$tmpdir/name` with content `content`. Args: tmpdir (LocalPath): root directory content (str/dict): Content of the file either as string or as a dict. If a dict, converts the dict into a yaml string. name (str): configuration filename Returns path (str) of the configuration file prepared. """ config_path = tmpdir / name if isinstance(content, dict): # convert if needed content = yaml.dump(content) config_path.write_text(content, encoding='utf-8') # pytest on python3.5 does not support LocalPath manipulation, so # convert path to string return str(config_path) def test_load_and_check_config_no_configuration(): - """Inexistant configuration files raises""" + """Inexistent configuration files raises""" with pytest.raises(EnvironmentError) as e: load_and_check_config(None) assert e.value.args[0] == 'Configuration file must be defined' - config_path = '/indexer/inexistant/config.yml' + config_path = '/indexer/inexistent/config.yml' with pytest.raises(FileNotFoundError) as e: load_and_check_config(config_path) assert e.value.args[0] == 'Configuration file %s does not exist' % ( config_path, ) def test_load_and_check_config_wrong_configuration(tmpdir): """Wrong configuration raises""" config_path = prepare_config_file(tmpdir, 'something: useless') with pytest.raises(KeyError) as e: load_and_check_config(config_path) assert e.value.args[0] == 'Missing \'%indexer_storage\' configuration' def test_load_and_check_config_remote_config_local_type_raise(tmpdir): """'local' configuration without 'local' storage raises""" config = { 'indexer_storage': { 'cls': 'remote', 'args': {} } } config_path = prepare_config_file(tmpdir, config) with pytest.raises(ValueError) as e: load_and_check_config(config_path, type='local') assert ( e.value.args[0] == "The indexer_storage backend can only be started with a 'local' " "configuration" ) def test_load_and_check_config_local_incomplete_configuration(tmpdir): """Incomplete 'local' configuration should raise""" config = { 'indexer_storage': { 'cls': 'local', 'args': { } } } config_path = prepare_config_file(tmpdir, config) with pytest.raises(ValueError) as e: load_and_check_config(config_path) assert ( e.value.args[0] == "Invalid configuration; missing 'db' config entry" ) def test_load_and_check_config_local_config_fine(tmpdir): """'Remote configuration is fine""" config = { 'indexer_storage': { 'cls': 'local', 'args': { 'db': 'db', } } } config_path = prepare_config_file(tmpdir, config) cfg = load_and_check_config(config_path, type='local') assert cfg == config def test_load_and_check_config_remote_config_fine(tmpdir): """'Remote configuration is fine""" config = { 'indexer_storage': { 'cls': 'remote', 'args': {} } } config_path = prepare_config_file(tmpdir, config) cfg = load_and_check_config(config_path, type='any') assert cfg == config diff --git a/swh/indexer/tests/storage/test_storage.py b/swh/indexer/tests/storage/test_storage.py index c6dbc94..3218dc2 100644 --- a/swh/indexer/tests/storage/test_storage.py +++ b/swh/indexer/tests/storage/test_storage.py @@ -1,1855 +1,1888 @@ # Copyright (C) 2015-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information +import inspect import threading + import pytest + from swh.model.hashutil import hash_to_bytes +from swh.indexer.storage.interface import IndexerStorageInterface + def prepare_mimetypes_from(fossology_licenses): """Fossology license needs some consistent data in db to run. """ mimetypes = [] for c in fossology_licenses: mimetypes.append({ 'id': c['id'], 'mimetype': 'text/plain', 'encoding': 'utf-8', 'indexer_configuration_id': c['indexer_configuration_id'], }) return mimetypes def endpoint(storage, endpoint_type, endpoint_name): return getattr(storage, endpoint_type + '_' + endpoint_name) class StorageETypeTester: """Base class for testing a series of common behaviour between a bunch of endpoint types supported by an IndexerStorage. This is supposed to be inherited with the following class attributes: - endpoint_type - tool_name - example_data See below for example usage. """ def test_missing(self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data etype = self.endpoint_type tool_id = data.tools[self.tool_name]['id'] # given 2 (hopefully) unknown objects query = [ { 'id': data.sha1_1, 'indexer_configuration_id': tool_id, }, { 'id': data.sha1_2, 'indexer_configuration_id': tool_id, }] # we expect these are both returned by the xxx_missing endpoint actual_missing = endpoint(storage, etype, 'missing')(query) assert list(actual_missing) == [ data.sha1_1, data.sha1_2, ] # now, when we add one of them endpoint(storage, etype, 'add')([{ 'id': data.sha1_2, **self.example_data[0], 'indexer_configuration_id': tool_id, }]) # we expect only the other one returned actual_missing = endpoint(storage, etype, 'missing')(query) assert list(actual_missing) == [data.sha1_1] def test_add__drop_duplicate(self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data etype = self.endpoint_type tool_id = data.tools[self.tool_name]['id'] # add the first object data_v1 = { 'id': data.sha1_2, **self.example_data[0], 'indexer_configuration_id': tool_id, } endpoint(storage, etype, 'add')([data_v1]) # should be able to retrieve it actual_data = list(endpoint(storage, etype, 'get')([data.sha1_2])) expected_data_v1 = [{ 'id': data.sha1_2, **self.example_data[0], 'tool': data.tools[self.tool_name], }] assert actual_data == expected_data_v1 # now if we add a modified version of the same object (same id) data_v2 = data_v1.copy() data_v2.update(self.example_data[1]) endpoint(storage, etype, 'add')([data_v2]) # we expect to retrieve the original data, not the modified one actual_data = list(endpoint(storage, etype, 'get')([data.sha1_2])) assert actual_data == expected_data_v1 def test_add__update_in_place_duplicate( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data etype = self.endpoint_type tool = data.tools[self.tool_name] data_v1 = { 'id': data.sha1_2, **self.example_data[0], 'indexer_configuration_id': tool['id'], } # given endpoint(storage, etype, 'add')([data_v1]) # when actual_data = list(endpoint(storage, etype, 'get')([data.sha1_2])) expected_data_v1 = [{ 'id': data.sha1_2, **self.example_data[0], 'tool': tool, }] # then assert actual_data == expected_data_v1 # given data_v2 = data_v1.copy() data_v2.update(self.example_data[1]) endpoint(storage, etype, 'add')([data_v2], conflict_update=True) actual_data = list(endpoint(storage, etype, 'get')([data.sha1_2])) expected_data_v2 = [{ 'id': data.sha1_2, **self.example_data[1], 'tool': tool, }] # data did change as the v2 was used to overwrite v1 assert actual_data == expected_data_v2 def test_add__update_in_place_deadlock( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data etype = self.endpoint_type tool = data.tools[self.tool_name] hashes = [ hash_to_bytes( '34973274ccef6ab4dfaaf86599792fa9c3fe4{:03d}'.format(i)) for i in range(1000)] data_v1 = [ { 'id': hash_, **self.example_data[0], 'indexer_configuration_id': tool['id'], } for hash_ in hashes ] data_v2 = [ { 'id': hash_, **self.example_data[1], 'indexer_configuration_id': tool['id'], } for hash_ in hashes ] # Remove one item from each, so that both queries have to succeed for # all items to be in the DB. data_v2a = data_v2[1:] data_v2b = list(reversed(data_v2[0:-1])) # given endpoint(storage, etype, 'add')(data_v1) # when actual_data = list(endpoint(storage, etype, 'get')(hashes)) expected_data_v1 = [ { 'id': hash_, **self.example_data[0], 'tool': tool, } for hash_ in hashes ] # then assert actual_data == expected_data_v1 # given def f1(): endpoint(storage, etype, 'add')(data_v2a, conflict_update=True) def f2(): endpoint(storage, etype, 'add')(data_v2b, conflict_update=True) t1 = threading.Thread(target=f1) t2 = threading.Thread(target=f2) t2.start() t1.start() t1.join() t2.join() actual_data = sorted(endpoint(storage, etype, 'get')(hashes), key=lambda x: x['id']) expected_data_v2 = [ { 'id': hash_, **self.example_data[1], 'tool': tool, } for hash_ in hashes ] assert actual_data == expected_data_v2 def test_add__duplicate_twice(self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data etype = self.endpoint_type tool = data.tools[self.tool_name] data_rev1 = { 'id': data.revision_id_2, **self.example_data[0], 'indexer_configuration_id': tool['id'] } data_rev2 = { 'id': data.revision_id_2, **self.example_data[1], 'indexer_configuration_id': tool['id'] } # when endpoint(storage, etype, 'add')([data_rev1]) with pytest.raises(ValueError): endpoint(storage, etype, 'add')( [data_rev2, data_rev2], conflict_update=True) # then actual_data = list(endpoint(storage, etype, 'get')( [data.revision_id_2, data.revision_id_1])) expected_data = [{ 'id': data.revision_id_2, **self.example_data[0], 'tool': tool, }] assert actual_data == expected_data def test_get(self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data etype = self.endpoint_type tool = data.tools[self.tool_name] query = [data.sha1_2, data.sha1_1] data1 = { 'id': data.sha1_2, **self.example_data[0], 'indexer_configuration_id': tool['id'], } # when endpoint(storage, etype, 'add')([data1]) # then actual_data = list(endpoint(storage, etype, 'get')(query)) # then expected_data = [{ 'id': data.sha1_2, **self.example_data[0], 'tool': tool, }] assert actual_data == expected_data class TestIndexerStorageContentMimetypes(StorageETypeTester): """Test Indexer Storage content_mimetype related methods """ endpoint_type = 'content_mimetype' tool_name = 'file' example_data = [ { 'mimetype': 'text/plain', 'encoding': 'utf-8', }, { 'mimetype': 'text/html', 'encoding': 'us-ascii', }, ] def test_generate_content_mimetype_get_range_limit_none( self, swh_indexer_storage): """mimetype_get_range call with wrong limit input should fail""" storage = swh_indexer_storage with pytest.raises(ValueError) as e: storage.content_mimetype_get_range( start=None, end=None, indexer_configuration_id=None, limit=None) assert e.value.args == ( 'Development error: limit should not be None',) def test_generate_content_mimetype_get_range_no_limit( self, swh_indexer_storage_with_data): """mimetype_get_range returns mimetypes within range provided""" storage, data = swh_indexer_storage_with_data mimetypes = data.mimetypes # All ids from the db content_ids = sorted([c['id'] for c in mimetypes]) start = content_ids[0] end = content_ids[-1] # retrieve mimetypes tool_id = mimetypes[0]['indexer_configuration_id'] actual_result = storage.content_mimetype_get_range( start, end, indexer_configuration_id=tool_id) actual_ids = actual_result['ids'] actual_next = actual_result['next'] assert len(mimetypes) == len(actual_ids) assert actual_next is None assert content_ids == actual_ids def test_generate_content_mimetype_get_range_limit( self, swh_indexer_storage_with_data): """mimetype_get_range paginates results if limit exceeded""" storage, data = swh_indexer_storage_with_data indexer_configuration_id = data.tools['file']['id'] # input the list of sha1s we want from storage content_ids = sorted( [c['id'] for c in data.mimetypes]) mimetypes = list(storage.content_mimetype_get(content_ids)) assert len(mimetypes) == len(data.mimetypes) start = content_ids[0] end = content_ids[-1] # retrieve mimetypes limited to 10 results actual_result = storage.content_mimetype_get_range( start, end, indexer_configuration_id=indexer_configuration_id, limit=10) assert actual_result assert set(actual_result.keys()) == {'ids', 'next'} actual_ids = actual_result['ids'] actual_next = actual_result['next'] assert len(actual_ids) == 10 assert actual_next is not None assert actual_next == content_ids[10] expected_mimetypes = content_ids[:10] assert expected_mimetypes == actual_ids # retrieve next part actual_result = storage.content_mimetype_get_range( start=end, end=end, indexer_configuration_id=indexer_configuration_id) assert set(actual_result.keys()) == {'ids', 'next'} actual_ids = actual_result['ids'] actual_next = actual_result['next'] assert actual_next is None expected_mimetypes = [content_ids[-1]] assert expected_mimetypes == actual_ids class TestIndexerStorageContentLanguage(StorageETypeTester): """Test Indexer Storage content_language related methods """ endpoint_type = 'content_language' tool_name = 'pygments' example_data = [ { 'lang': 'haskell', }, { 'lang': 'common-lisp', }, ] class TestIndexerStorageContentCTags(StorageETypeTester): """Test Indexer Storage content_ctags related methods """ endpoint_type = 'content_ctags' tool_name = 'universal-ctags' example_data = [ { 'ctags': [{ 'name': 'done', 'kind': 'variable', 'line': 119, 'lang': 'OCaml', }] }, { 'ctags': [ { 'name': 'done', 'kind': 'variable', 'line': 100, 'lang': 'Python', }, { 'name': 'main', 'kind': 'function', 'line': 119, 'lang': 'Python', }] }, ] # the following tests are disabled because CTAGS behaves differently @pytest.mark.skip def test_add__drop_duplicate(self): pass @pytest.mark.skip def test_add__update_in_place_duplicate(self): pass @pytest.mark.skip def test_add__update_in_place_deadlock(self): pass @pytest.mark.skip def test_add__duplicate_twice(self): pass @pytest.mark.skip def test_get(self): pass def test_content_ctags_search(self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data # 1. given tool = data.tools['universal-ctags'] tool_id = tool['id'] ctag1 = { 'id': data.sha1_1, 'indexer_configuration_id': tool_id, 'ctags': [ { 'name': 'hello', 'kind': 'function', 'line': 133, 'lang': 'Python', }, { 'name': 'counter', 'kind': 'variable', 'line': 119, 'lang': 'Python', }, { 'name': 'hello', 'kind': 'variable', 'line': 210, 'lang': 'Python', }, ] } ctag2 = { 'id': data.sha1_2, 'indexer_configuration_id': tool_id, 'ctags': [ { 'name': 'hello', 'kind': 'variable', 'line': 100, 'lang': 'C', }, { 'name': 'result', 'kind': 'variable', 'line': 120, 'lang': 'C', }, ] } storage.content_ctags_add([ctag1, ctag2]) # 1. when actual_ctags = list(storage.content_ctags_search('hello', limit=1)) # 1. then assert actual_ctags == [ { 'id': ctag1['id'], 'tool': tool, 'name': 'hello', 'kind': 'function', 'line': 133, 'lang': 'Python', } ] # 2. when actual_ctags = list(storage.content_ctags_search( 'hello', limit=1, last_sha1=ctag1['id'])) # 2. then assert actual_ctags == [ { 'id': ctag2['id'], 'tool': tool, 'name': 'hello', 'kind': 'variable', 'line': 100, 'lang': 'C', } ] # 3. when actual_ctags = list(storage.content_ctags_search('hello')) # 3. then assert actual_ctags == [ { 'id': ctag1['id'], 'tool': tool, 'name': 'hello', 'kind': 'function', 'line': 133, 'lang': 'Python', }, { 'id': ctag1['id'], 'tool': tool, 'name': 'hello', 'kind': 'variable', 'line': 210, 'lang': 'Python', }, { 'id': ctag2['id'], 'tool': tool, 'name': 'hello', 'kind': 'variable', 'line': 100, 'lang': 'C', }, ] # 4. when actual_ctags = list(storage.content_ctags_search('counter')) # then assert actual_ctags == [{ 'id': ctag1['id'], 'tool': tool, 'name': 'counter', 'kind': 'variable', 'line': 119, 'lang': 'Python', }] # 5. when actual_ctags = list(storage.content_ctags_search('result', limit=1)) # then assert actual_ctags == [{ 'id': ctag2['id'], 'tool': tool, 'name': 'result', 'kind': 'variable', 'line': 120, 'lang': 'C', }] def test_content_ctags_search_no_result(self, swh_indexer_storage): storage = swh_indexer_storage actual_ctags = list(storage.content_ctags_search('counter')) assert not actual_ctags def test_content_ctags_add__add_new_ctags_added( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data # given tool = data.tools['universal-ctags'] tool_id = tool['id'] ctag_v1 = { 'id': data.sha1_2, 'indexer_configuration_id': tool_id, 'ctags': [{ 'name': 'done', 'kind': 'variable', 'line': 100, 'lang': 'Scheme', }] } # given storage.content_ctags_add([ctag_v1]) storage.content_ctags_add([ctag_v1]) # conflict does nothing # when actual_ctags = list(storage.content_ctags_get([data.sha1_2])) # then expected_ctags = [{ 'id': data.sha1_2, 'name': 'done', 'kind': 'variable', 'line': 100, 'lang': 'Scheme', 'tool': tool, }] assert actual_ctags == expected_ctags # given ctag_v2 = ctag_v1.copy() ctag_v2.update({ 'ctags': [ { 'name': 'defn', 'kind': 'function', 'line': 120, 'lang': 'Scheme', } ] }) storage.content_ctags_add([ctag_v2]) expected_ctags = [ { 'id': data.sha1_2, 'name': 'done', 'kind': 'variable', 'line': 100, 'lang': 'Scheme', 'tool': tool, }, { 'id': data.sha1_2, 'name': 'defn', 'kind': 'function', 'line': 120, 'lang': 'Scheme', 'tool': tool, } ] actual_ctags = list(storage.content_ctags_get( [data.sha1_2])) assert actual_ctags == expected_ctags def test_content_ctags_add__update_in_place( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data # given tool = data.tools['universal-ctags'] tool_id = tool['id'] ctag_v1 = { 'id': data.sha1_2, 'indexer_configuration_id': tool_id, 'ctags': [{ 'name': 'done', 'kind': 'variable', 'line': 100, 'lang': 'Scheme', }] } # given storage.content_ctags_add([ctag_v1]) # when actual_ctags = list(storage.content_ctags_get( [data.sha1_2])) # then expected_ctags = [ { 'id': data.sha1_2, 'name': 'done', 'kind': 'variable', 'line': 100, 'lang': 'Scheme', 'tool': tool } ] assert actual_ctags == expected_ctags # given ctag_v2 = ctag_v1.copy() ctag_v2.update({ 'ctags': [ { 'name': 'done', 'kind': 'variable', 'line': 100, 'lang': 'Scheme', }, { 'name': 'defn', 'kind': 'function', 'line': 120, 'lang': 'Scheme', } ] }) storage.content_ctags_add([ctag_v2], conflict_update=True) actual_ctags = list(storage.content_ctags_get( [data.sha1_2])) # ctag did change as the v2 was used to overwrite v1 expected_ctags = [ { 'id': data.sha1_2, 'name': 'done', 'kind': 'variable', 'line': 100, 'lang': 'Scheme', 'tool': tool, }, { 'id': data.sha1_2, 'name': 'defn', 'kind': 'function', 'line': 120, 'lang': 'Scheme', 'tool': tool, } ] assert actual_ctags == expected_ctags class TestIndexerStorageContentMetadata(StorageETypeTester): """Test Indexer Storage content_metadata related methods """ tool_name = 'swh-metadata-detector' endpoint_type = 'content_metadata' example_data = [ { 'metadata': { 'other': {}, 'codeRepository': { 'type': 'git', 'url': 'https://github.com/moranegg/metadata_test' }, 'description': 'Simple package.json test for indexer', 'name': 'test_metadata', 'version': '0.0.1' }, }, { 'metadata': { 'other': {}, 'name': 'test_metadata', 'version': '0.0.1' }, }, ] class TestIndexerStorageRevisionIntrinsicMetadata(StorageETypeTester): """Test Indexer Storage revision_intrinsic_metadata related methods """ tool_name = 'swh-metadata-detector' endpoint_type = 'revision_intrinsic_metadata' example_data = [ { 'metadata': { 'other': {}, 'codeRepository': { 'type': 'git', 'url': 'https://github.com/moranegg/metadata_test' }, 'description': 'Simple package.json test for indexer', 'name': 'test_metadata', 'version': '0.0.1' }, 'mappings': ['mapping1'], }, { 'metadata': { 'other': {}, 'name': 'test_metadata', 'version': '0.0.1' }, 'mappings': ['mapping2'], }, ] def test_revision_intrinsic_metadata_delete( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data etype = self.endpoint_type tool = data.tools[self.tool_name] query = [data.sha1_2, data.sha1_1] data1 = { 'id': data.sha1_2, **self.example_data[0], 'indexer_configuration_id': tool['id'], } # when endpoint(storage, etype, 'add')([data1]) endpoint(storage, etype, 'delete')([ { 'id': data.sha1_2, 'indexer_configuration_id': tool['id'], } ]) # then actual_data = list(endpoint(storage, etype, 'get')(query)) # then assert not actual_data def test_revision_intrinsic_metadata_delete_nonexisting( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data etype = self.endpoint_type tool = data.tools[self.tool_name] endpoint(storage, etype, 'delete')([ { 'id': data.sha1_2, 'indexer_configuration_id': tool['id'], } ]) class TestIndexerStorageContentFossologyLicence: def test_content_fossology_license_add__new_license_added( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data # given tool = data.tools['nomos'] tool_id = tool['id'] license_v1 = { 'id': data.sha1_1, 'licenses': ['Apache-2.0'], 'indexer_configuration_id': tool_id, } # given storage.content_fossology_license_add([license_v1]) # conflict does nothing storage.content_fossology_license_add([license_v1]) # when actual_licenses = list(storage.content_fossology_license_get( [data.sha1_1])) # then expected_license = { data.sha1_1: [{ 'licenses': ['Apache-2.0'], 'tool': tool, }] } assert actual_licenses == [expected_license] # given license_v2 = license_v1.copy() license_v2.update({ 'licenses': ['BSD-2-Clause'], }) storage.content_fossology_license_add([license_v2]) actual_licenses = list(storage.content_fossology_license_get( [data.sha1_1])) expected_license = { data.sha1_1: [{ 'licenses': ['Apache-2.0', 'BSD-2-Clause'], 'tool': tool }] } # license did not change as the v2 was dropped. assert actual_licenses == [expected_license] def test_generate_content_fossology_license_get_range_limit_none( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data """license_get_range call with wrong limit input should fail""" with pytest.raises(ValueError) as e: storage.content_fossology_license_get_range( start=None, end=None, indexer_configuration_id=None, limit=None) assert e.value.args == ( 'Development error: limit should not be None',) def test_generate_content_fossology_license_get_range_no_limit( self, swh_indexer_storage_with_data): """license_get_range returns licenses within range provided""" storage, data = swh_indexer_storage_with_data # craft some consistent mimetypes fossology_licenses = data.fossology_licenses mimetypes = prepare_mimetypes_from(fossology_licenses) storage.content_mimetype_add(mimetypes, conflict_update=True) # add fossology_licenses to storage storage.content_fossology_license_add(fossology_licenses) # All ids from the db content_ids = sorted([c['id'] for c in fossology_licenses]) start = content_ids[0] end = content_ids[-1] # retrieve fossology_licenses tool_id = fossology_licenses[0]['indexer_configuration_id'] actual_result = storage.content_fossology_license_get_range( start, end, indexer_configuration_id=tool_id) actual_ids = actual_result['ids'] actual_next = actual_result['next'] assert len(fossology_licenses) == len(actual_ids) assert actual_next is None assert content_ids == actual_ids def test_generate_content_fossology_license_get_range_no_limit_with_filter( self, swh_indexer_storage_with_data): """This filters non textual, then returns results within range""" storage, data = swh_indexer_storage_with_data fossology_licenses = data.fossology_licenses mimetypes = data.mimetypes # craft some consistent mimetypes _mimetypes = prepare_mimetypes_from(fossology_licenses) # add binary mimetypes which will get filtered out in results for m in mimetypes: _mimetypes.append({ 'mimetype': 'binary', **m, }) storage.content_mimetype_add(_mimetypes, conflict_update=True) # add fossology_licenses to storage storage.content_fossology_license_add(fossology_licenses) # All ids from the db content_ids = sorted([c['id'] for c in fossology_licenses]) start = content_ids[0] end = content_ids[-1] # retrieve fossology_licenses tool_id = fossology_licenses[0]['indexer_configuration_id'] actual_result = storage.content_fossology_license_get_range( start, end, indexer_configuration_id=tool_id) actual_ids = actual_result['ids'] actual_next = actual_result['next'] assert len(fossology_licenses) == len(actual_ids) assert actual_next is None assert content_ids == actual_ids def test_generate_fossology_license_get_range_limit( self, swh_indexer_storage_with_data): """fossology_license_get_range paginates results if limit exceeded""" storage, data = swh_indexer_storage_with_data fossology_licenses = data.fossology_licenses # craft some consistent mimetypes mimetypes = prepare_mimetypes_from(fossology_licenses) # add fossology_licenses to storage storage.content_mimetype_add(mimetypes, conflict_update=True) storage.content_fossology_license_add(fossology_licenses) # input the list of sha1s we want from storage content_ids = sorted([c['id'] for c in fossology_licenses]) start = content_ids[0] end = content_ids[-1] # retrieve fossology_licenses limited to 3 results limited_results = len(fossology_licenses) - 1 tool_id = fossology_licenses[0]['indexer_configuration_id'] actual_result = storage.content_fossology_license_get_range( start, end, indexer_configuration_id=tool_id, limit=limited_results) actual_ids = actual_result['ids'] actual_next = actual_result['next'] assert limited_results == len(actual_ids) assert actual_next is not None assert actual_next == content_ids[-1] expected_fossology_licenses = content_ids[:-1] assert expected_fossology_licenses == actual_ids # retrieve next part actual_results2 = storage.content_fossology_license_get_range( start=end, end=end, indexer_configuration_id=tool_id) actual_ids2 = actual_results2['ids'] actual_next2 = actual_results2['next'] assert actual_next2 is None expected_fossology_licenses2 = [content_ids[-1]] assert expected_fossology_licenses2 == actual_ids2 class TestIndexerStorageOriginIntrinsicMetadata: def test_origin_intrinsic_metadata_get( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data # given tool_id = data.tools['swh-metadata-detector']['id'] metadata = { 'version': None, 'name': None, } metadata_rev = { 'id': data.revision_id_2, 'metadata': metadata, 'mappings': ['mapping1'], 'indexer_configuration_id': tool_id, } metadata_origin = { 'id': data.origin_url_1, 'metadata': metadata, 'indexer_configuration_id': tool_id, 'mappings': ['mapping1'], 'from_revision': data.revision_id_2, } # when storage.revision_intrinsic_metadata_add([metadata_rev]) storage.origin_intrinsic_metadata_add([metadata_origin]) # then actual_metadata = list(storage.origin_intrinsic_metadata_get( [data.origin_url_1, 'no://where'])) expected_metadata = [{ 'id': data.origin_url_1, 'metadata': metadata, 'tool': data.tools['swh-metadata-detector'], 'from_revision': data.revision_id_2, 'mappings': ['mapping1'], }] assert actual_metadata == expected_metadata def test_origin_intrinsic_metadata_delete( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data # given tool_id = data.tools['swh-metadata-detector']['id'] metadata = { 'version': None, 'name': None, } metadata_rev = { 'id': data.revision_id_2, 'metadata': metadata, 'mappings': ['mapping1'], 'indexer_configuration_id': tool_id, } metadata_origin = { 'id': data.origin_url_1, 'metadata': metadata, 'indexer_configuration_id': tool_id, 'mappings': ['mapping1'], 'from_revision': data.revision_id_2, } metadata_origin2 = metadata_origin.copy() metadata_origin2['id'] = data.origin_url_2 # when storage.revision_intrinsic_metadata_add([metadata_rev]) storage.origin_intrinsic_metadata_add([ metadata_origin, metadata_origin2]) storage.origin_intrinsic_metadata_delete([ { 'id': data.origin_url_1, 'indexer_configuration_id': tool_id } ]) # then actual_metadata = list(storage.origin_intrinsic_metadata_get( [data.origin_url_1, data.origin_url_2, 'no://where'])) for item in actual_metadata: item['indexer_configuration_id'] = item.pop('tool')['id'] assert actual_metadata == [metadata_origin2] def test_origin_intrinsic_metadata_delete_nonexisting( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data tool_id = data.tools['swh-metadata-detector']['id'] storage.origin_intrinsic_metadata_delete([ { 'id': data.origin_url_1, 'indexer_configuration_id': tool_id } ]) def test_origin_intrinsic_metadata_add_drop_duplicate( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data # given tool_id = data.tools['swh-metadata-detector']['id'] metadata_v1 = { 'version': None, 'name': None, } metadata_rev_v1 = { 'id': data.revision_id_1, 'metadata': metadata_v1.copy(), 'mappings': [], 'indexer_configuration_id': tool_id, } metadata_origin_v1 = { 'id': data.origin_url_1, 'metadata': metadata_v1.copy(), 'indexer_configuration_id': tool_id, 'mappings': [], 'from_revision': data.revision_id_1, } # given storage.revision_intrinsic_metadata_add([metadata_rev_v1]) storage.origin_intrinsic_metadata_add([metadata_origin_v1]) # when actual_metadata = list(storage.origin_intrinsic_metadata_get( [data.origin_url_1, 'no://where'])) expected_metadata_v1 = [{ 'id': data.origin_url_1, 'metadata': metadata_v1, 'tool': data.tools['swh-metadata-detector'], 'from_revision': data.revision_id_1, 'mappings': [], }] assert actual_metadata == expected_metadata_v1 # given metadata_v2 = metadata_v1.copy() metadata_v2.update({ 'name': 'test_metadata', 'author': 'MG', }) metadata_rev_v2 = metadata_rev_v1.copy() metadata_origin_v2 = metadata_origin_v1.copy() metadata_rev_v2['metadata'] = metadata_v2 metadata_origin_v2['metadata'] = metadata_v2 storage.revision_intrinsic_metadata_add([metadata_rev_v2]) storage.origin_intrinsic_metadata_add([metadata_origin_v2]) # then actual_metadata = list(storage.origin_intrinsic_metadata_get( [data.origin_url_1])) # metadata did not change as the v2 was dropped. assert actual_metadata == expected_metadata_v1 def test_origin_intrinsic_metadata_add_update_in_place_duplicate( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data # given tool_id = data.tools['swh-metadata-detector']['id'] metadata_v1 = { 'version': None, 'name': None, } metadata_rev_v1 = { 'id': data.revision_id_2, 'metadata': metadata_v1, 'mappings': [], 'indexer_configuration_id': tool_id, } metadata_origin_v1 = { 'id': data.origin_url_1, 'metadata': metadata_v1.copy(), 'indexer_configuration_id': tool_id, 'mappings': [], 'from_revision': data.revision_id_2, } # given storage.revision_intrinsic_metadata_add([metadata_rev_v1]) storage.origin_intrinsic_metadata_add([metadata_origin_v1]) # when actual_metadata = list(storage.origin_intrinsic_metadata_get( [data.origin_url_1])) # then expected_metadata_v1 = [{ 'id': data.origin_url_1, 'metadata': metadata_v1, 'tool': data.tools['swh-metadata-detector'], 'from_revision': data.revision_id_2, 'mappings': [], }] assert actual_metadata == expected_metadata_v1 # given metadata_v2 = metadata_v1.copy() metadata_v2.update({ 'name': 'test_update_duplicated_metadata', 'author': 'MG', }) metadata_rev_v2 = metadata_rev_v1.copy() metadata_origin_v2 = metadata_origin_v1.copy() metadata_rev_v2['metadata'] = metadata_v2 metadata_origin_v2 = { 'id': data.origin_url_1, 'metadata': metadata_v2.copy(), 'indexer_configuration_id': tool_id, 'mappings': ['npm'], 'from_revision': data.revision_id_1, } storage.revision_intrinsic_metadata_add( [metadata_rev_v2], conflict_update=True) storage.origin_intrinsic_metadata_add( [metadata_origin_v2], conflict_update=True) actual_metadata = list(storage.origin_intrinsic_metadata_get( [data.origin_url_1])) expected_metadata_v2 = [{ 'id': data.origin_url_1, 'metadata': metadata_v2, 'tool': data.tools['swh-metadata-detector'], 'from_revision': data.revision_id_1, 'mappings': ['npm'], }] # metadata did change as the v2 was used to overwrite v1 assert actual_metadata == expected_metadata_v2 def test_origin_intrinsic_metadata_add__update_in_place_deadlock( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data # given tool_id = data.tools['swh-metadata-detector']['id'] ids = list(range(10)) example_data1 = { 'metadata': { 'version': None, 'name': None, }, 'mappings': [], } example_data2 = { 'metadata': { 'version': 'v1.1.1', 'name': 'foo', }, 'mappings': [], } metadata_rev_v1 = { 'id': data.revision_id_2, 'metadata': { 'version': None, 'name': None, }, 'mappings': [], 'indexer_configuration_id': tool_id, } data_v1 = [ { 'id': 'file:///tmp/origin%d' % id_, 'from_revision': data.revision_id_2, **example_data1, 'indexer_configuration_id': tool_id, } for id_ in ids ] data_v2 = [ { 'id': 'file:///tmp/origin%d' % id_, 'from_revision': data.revision_id_2, **example_data2, 'indexer_configuration_id': tool_id, } for id_ in ids ] # Remove one item from each, so that both queries have to succeed for # all items to be in the DB. data_v2a = data_v2[1:] data_v2b = list(reversed(data_v2[0:-1])) # given storage.revision_intrinsic_metadata_add([metadata_rev_v1]) storage.origin_intrinsic_metadata_add(data_v1) # when origins = ['file:///tmp/origin%d' % i for i in ids] actual_data = list(storage.origin_intrinsic_metadata_get(origins)) expected_data_v1 = [ { 'id': 'file:///tmp/origin%d' % id_, 'from_revision': data.revision_id_2, **example_data1, 'tool': data.tools['swh-metadata-detector'], } for id_ in ids ] # then assert actual_data == expected_data_v1 # given def f1(): storage.origin_intrinsic_metadata_add( data_v2a, conflict_update=True) def f2(): storage.origin_intrinsic_metadata_add( data_v2b, conflict_update=True) t1 = threading.Thread(target=f1) t2 = threading.Thread(target=f2) t2.start() t1.start() t1.join() t2.join() actual_data = list(storage.origin_intrinsic_metadata_get(origins)) expected_data_v2 = [ { 'id': 'file:///tmp/origin%d' % id_, 'from_revision': data.revision_id_2, **example_data2, 'tool': data.tools['swh-metadata-detector'], } for id_ in ids ] assert len(actual_data) == len(expected_data_v2) assert sorted(actual_data, key=lambda x: x['id']) == expected_data_v2 def test_origin_intrinsic_metadata_add__duplicate_twice( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data # given tool_id = data.tools['swh-metadata-detector']['id'] metadata = { 'developmentStatus': None, 'name': None, } metadata_rev = { 'id': data.revision_id_2, 'metadata': metadata, 'mappings': ['mapping1'], 'indexer_configuration_id': tool_id, } metadata_origin = { 'id': data.origin_url_1, 'metadata': metadata, 'indexer_configuration_id': tool_id, 'mappings': ['mapping1'], 'from_revision': data.revision_id_2, } # when storage.revision_intrinsic_metadata_add([metadata_rev]) with pytest.raises(ValueError): storage.origin_intrinsic_metadata_add([ metadata_origin, metadata_origin]) def test_origin_intrinsic_metadata_search_fulltext( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data # given tool_id = data.tools['swh-metadata-detector']['id'] metadata1 = { 'author': 'John Doe', } metadata1_rev = { 'id': data.revision_id_1, 'metadata': metadata1, 'mappings': [], 'indexer_configuration_id': tool_id, } metadata1_origin = { 'id': data.origin_url_1, 'metadata': metadata1, 'mappings': [], 'indexer_configuration_id': tool_id, 'from_revision': data.revision_id_1, } metadata2 = { 'author': 'Jane Doe', } metadata2_rev = { 'id': data.revision_id_2, 'metadata': metadata2, 'mappings': [], 'indexer_configuration_id': tool_id, } metadata2_origin = { 'id': data.origin_url_2, 'metadata': metadata2, 'mappings': [], 'indexer_configuration_id': tool_id, 'from_revision': data.revision_id_2, } # when storage.revision_intrinsic_metadata_add([metadata1_rev]) storage.origin_intrinsic_metadata_add([metadata1_origin]) storage.revision_intrinsic_metadata_add([metadata2_rev]) storage.origin_intrinsic_metadata_add([metadata2_origin]) # then search = storage.origin_intrinsic_metadata_search_fulltext assert set([res['id'] for res in search(['Doe'])]) \ == set([data.origin_url_1, data.origin_url_2]) assert [res['id'] for res in search(['John', 'Doe'])] \ == [data.origin_url_1] assert [res['id'] for res in search(['John'])] \ == [data.origin_url_1] assert not list(search(['John', 'Jane'])) def test_origin_intrinsic_metadata_search_fulltext_rank( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data # given tool_id = data.tools['swh-metadata-detector']['id'] # The following authors have "Random Person" to add some more content # to the JSON data, to work around normalization quirks when there # are few words (rank/(1+ln(nb_words)) is very sensitive to nb_words # for small values of nb_words). metadata1 = { 'author': [ 'Random Person', 'John Doe', 'Jane Doe', ] } metadata1_rev = { 'id': data.revision_id_1, 'metadata': metadata1, 'mappings': [], 'indexer_configuration_id': tool_id, } metadata1_origin = { 'id': data.origin_url_1, 'metadata': metadata1, 'mappings': [], 'indexer_configuration_id': tool_id, 'from_revision': data.revision_id_1, } metadata2 = { 'author': [ 'Random Person', 'Jane Doe', ] } metadata2_rev = { 'id': data.revision_id_2, 'metadata': metadata2, 'mappings': [], 'indexer_configuration_id': tool_id, } metadata2_origin = { 'id': data.origin_url_2, 'metadata': metadata2, 'mappings': [], 'indexer_configuration_id': tool_id, 'from_revision': data.revision_id_2, } # when storage.revision_intrinsic_metadata_add([metadata1_rev]) storage.origin_intrinsic_metadata_add([metadata1_origin]) storage.revision_intrinsic_metadata_add([metadata2_rev]) storage.origin_intrinsic_metadata_add([metadata2_origin]) # then search = storage.origin_intrinsic_metadata_search_fulltext assert [res['id'] for res in search(['Doe'])] \ == [data.origin_url_1, data.origin_url_2] assert [res['id'] for res in search(['Doe'], limit=1)] \ == [data.origin_url_1] assert [res['id'] for res in search(['John'])] \ == [data.origin_url_1] assert [res['id'] for res in search(['Jane'])] \ == [data.origin_url_2, data.origin_url_1] assert [res['id'] for res in search(['John', 'Jane'])] \ == [data.origin_url_1] def _fill_origin_intrinsic_metadata( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data tool1_id = data.tools['swh-metadata-detector']['id'] tool2_id = data.tools['swh-metadata-detector2']['id'] metadata1 = { '@context': 'foo', 'author': 'John Doe', } metadata1_rev = { 'id': data.revision_id_1, 'metadata': metadata1, 'mappings': ['npm'], 'indexer_configuration_id': tool1_id, } metadata1_origin = { 'id': data.origin_url_1, 'metadata': metadata1, 'mappings': ['npm'], 'indexer_configuration_id': tool1_id, 'from_revision': data.revision_id_1, } metadata2 = { '@context': 'foo', 'author': 'Jane Doe', } metadata2_rev = { 'id': data.revision_id_2, 'metadata': metadata2, 'mappings': ['npm', 'gemspec'], 'indexer_configuration_id': tool2_id, } metadata2_origin = { 'id': data.origin_url_2, 'metadata': metadata2, 'mappings': ['npm', 'gemspec'], 'indexer_configuration_id': tool2_id, 'from_revision': data.revision_id_2, } metadata3 = { '@context': 'foo', } metadata3_rev = { 'id': data.revision_id_3, 'metadata': metadata3, 'mappings': ['npm', 'gemspec'], 'indexer_configuration_id': tool2_id, } metadata3_origin = { 'id': data.origin_url_3, 'metadata': metadata3, 'mappings': ['pkg-info'], 'indexer_configuration_id': tool2_id, 'from_revision': data.revision_id_3, } storage.revision_intrinsic_metadata_add([metadata1_rev]) storage.origin_intrinsic_metadata_add([metadata1_origin]) storage.revision_intrinsic_metadata_add([metadata2_rev]) storage.origin_intrinsic_metadata_add([metadata2_origin]) storage.revision_intrinsic_metadata_add([metadata3_rev]) storage.origin_intrinsic_metadata_add([metadata3_origin]) def test_origin_intrinsic_metadata_search_by_producer( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data self._fill_origin_intrinsic_metadata( swh_indexer_storage_with_data) tool1 = data.tools['swh-metadata-detector'] tool2 = data.tools['swh-metadata-detector2'] endpoint = storage.origin_intrinsic_metadata_search_by_producer # test pagination # no 'page_token' param, return all origins result = endpoint(ids_only=True) assert result['origins'] \ == [data.origin_url_1, data.origin_url_2, data.origin_url_3] assert 'next_page_token' not in result # 'page_token' is < than origin_1, return everything result = endpoint(page_token=data.origin_url_1[:-1], ids_only=True) assert result['origins'] \ == [data.origin_url_1, data.origin_url_2, data.origin_url_3] assert 'next_page_token' not in result # 'page_token' is origin_3, return nothing result = endpoint(page_token=data.origin_url_3, ids_only=True) assert not result['origins'] assert 'next_page_token' not in result # test limit argument result = endpoint(page_token=data.origin_url_1[:-1], limit=2, ids_only=True) assert result['origins'] == [data.origin_url_1, data.origin_url_2] assert result['next_page_token'] == result['origins'][-1] result = endpoint(page_token=data.origin_url_1, limit=2, ids_only=True) assert result['origins'] == [data.origin_url_2, data.origin_url_3] assert 'next_page_token' not in result result = endpoint(page_token=data.origin_url_2, limit=2, ids_only=True) assert result['origins'] == [data.origin_url_3] assert 'next_page_token' not in result # test mappings filtering result = endpoint(mappings=['npm'], ids_only=True) assert result['origins'] == [data.origin_url_1, data.origin_url_2] assert 'next_page_token' not in result result = endpoint(mappings=['npm', 'gemspec'], ids_only=True) assert result['origins'] == [data.origin_url_1, data.origin_url_2] assert 'next_page_token' not in result result = endpoint(mappings=['gemspec'], ids_only=True) assert result['origins'] == [data.origin_url_2] assert 'next_page_token' not in result result = endpoint(mappings=['pkg-info'], ids_only=True) assert result['origins'] == [data.origin_url_3] assert 'next_page_token' not in result result = endpoint(mappings=['foobar'], ids_only=True) assert not result['origins'] assert 'next_page_token' not in result # test pagination + mappings result = endpoint(mappings=['npm'], limit=1, ids_only=True) assert result['origins'] == [data.origin_url_1] assert result['next_page_token'] == result['origins'][-1] # test tool filtering result = endpoint(tool_ids=[tool1['id']], ids_only=True) assert result['origins'] == [data.origin_url_1] assert 'next_page_token' not in result result = endpoint(tool_ids=[tool2['id']], ids_only=True) assert sorted(result['origins']) \ == [data.origin_url_2, data.origin_url_3] assert 'next_page_token' not in result result = endpoint(tool_ids=[tool1['id'], tool2['id']], ids_only=True) assert sorted(result['origins']) \ == [data.origin_url_1, data.origin_url_2, data.origin_url_3] assert 'next_page_token' not in result # test ids_only=False assert endpoint(mappings=['gemspec'])['origins'] \ == [{ 'id': data.origin_url_2, 'metadata': { '@context': 'foo', 'author': 'Jane Doe', }, 'mappings': ['npm', 'gemspec'], 'tool': tool2, 'from_revision': data.revision_id_2, }] def test_origin_intrinsic_metadata_stats( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data self._fill_origin_intrinsic_metadata( swh_indexer_storage_with_data) result = storage.origin_intrinsic_metadata_stats() assert result == { 'per_mapping': { 'gemspec': 1, 'npm': 2, 'pkg-info': 1, 'codemeta': 0, 'maven': 0, }, 'total': 3, 'non_empty': 2, } class TestIndexerStorageIndexerCondifuration: def test_indexer_configuration_add( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data tool = { 'tool_name': 'some-unknown-tool', 'tool_version': 'some-version', 'tool_configuration': {"debian-package": "some-package"}, } actual_tool = storage.indexer_configuration_get(tool) assert actual_tool is None # does not exist # add it actual_tools = list(storage.indexer_configuration_add([tool])) assert len(actual_tools) == 1 actual_tool = actual_tools[0] assert actual_tool is not None # now it exists new_id = actual_tool.pop('id') assert actual_tool == tool actual_tools2 = list(storage.indexer_configuration_add([tool])) actual_tool2 = actual_tools2[0] assert actual_tool2 is not None # now it exists new_id2 = actual_tool2.pop('id') assert new_id == new_id2 assert actual_tool == actual_tool2 def test_indexer_configuration_add_multiple( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data tool = { 'tool_name': 'some-unknown-tool', 'tool_version': 'some-version', 'tool_configuration': {"debian-package": "some-package"}, } actual_tools = list(storage.indexer_configuration_add([tool])) assert len(actual_tools) == 1 new_tools = [tool, { 'tool_name': 'yet-another-tool', 'tool_version': 'version', 'tool_configuration': {}, }] actual_tools = list(storage.indexer_configuration_add(new_tools)) assert len(actual_tools) == 2 # order not guaranteed, so we iterate over results to check for tool in actual_tools: _id = tool.pop('id') assert _id is not None assert tool in new_tools def test_indexer_configuration_get_missing( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data tool = { 'tool_name': 'unknown-tool', 'tool_version': '3.1.0rc2-31-ga2cbb8c', 'tool_configuration': {"command_line": "nomossa "}, } actual_tool = storage.indexer_configuration_get(tool) assert actual_tool is None def test_indexer_configuration_get( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data tool = { 'tool_name': 'nomos', 'tool_version': '3.1.0rc2-31-ga2cbb8c', 'tool_configuration': {"command_line": "nomossa "}, } actual_tool = storage.indexer_configuration_get(tool) assert actual_tool expected_tool = tool.copy() del actual_tool['id'] assert expected_tool == actual_tool def test_indexer_configuration_metadata_get_missing_context( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data tool = { 'tool_name': 'swh-metadata-translator', 'tool_version': '0.0.1', 'tool_configuration': {"context": "unknown-context"}, } actual_tool = storage.indexer_configuration_get(tool) assert actual_tool is None def test_indexer_configuration_metadata_get( self, swh_indexer_storage_with_data): storage, data = swh_indexer_storage_with_data tool = { 'tool_name': 'swh-metadata-translator', 'tool_version': '0.0.1', 'tool_configuration': {"type": "local", "context": "NpmMapping"}, } storage.indexer_configuration_add([tool]) actual_tool = storage.indexer_configuration_get(tool) assert actual_tool expected_tool = tool.copy() expected_tool['id'] = actual_tool['id'] assert expected_tool == actual_tool class TestIndexerStorageMisc: """Misc endpoints tests for the IndexerStorage. """ def test_check_config(self, swh_indexer_storage): storage = swh_indexer_storage assert storage.check_config(check_write=True) assert storage.check_config(check_write=False) + + def test_types(self, swh_indexer_storage): + """Checks all methods of StorageInterface are implemented by this + backend, and that they have the same signature.""" + # Create an instance of the protocol (which cannot be instantiated + # directly, so this creates a subclass, then instantiates it) + interface = type('_', (IndexerStorageInterface,), {})() + + assert 'content_mimetype_add' in dir(interface) + + missing_methods = [] + + for meth_name in dir(interface): + if meth_name.startswith('_'): + continue + interface_meth = getattr(interface, meth_name) + try: + concrete_meth = getattr(swh_indexer_storage, meth_name) + except AttributeError: + missing_methods.append(meth_name) + continue + + expected_signature = inspect.signature(interface_meth) + actual_signature = inspect.signature(concrete_meth) + + assert expected_signature == actual_signature, meth_name + + assert missing_methods == [] diff --git a/swh/indexer/tests/test_cli.py b/swh/indexer/tests/test_cli.py index 8dad2c7..6a60799 100644 --- a/swh/indexer/tests/test_cli.py +++ b/swh/indexer/tests/test_cli.py @@ -1,361 +1,360 @@ # Copyright (C) 2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from functools import reduce import re import tempfile from unittest.mock import patch from click.testing import CliRunner from swh.journal.tests.utils import FakeKafkaMessage, MockedKafkaConsumer from swh.model.hashutil import hash_to_bytes from swh.indexer.cli import cli CLI_CONFIG = ''' scheduler: cls: foo args: {} storage: cls: memory - args: {} indexer_storage: cls: memory args: {} ''' def fill_idx_storage(idx_storage, nb_rows): tools = [ { 'tool_name': 'tool %d' % i, 'tool_version': '0.0.1', 'tool_configuration': {}, } for i in range(2) ] tools = idx_storage.indexer_configuration_add(tools) origin_metadata = [ { 'id': 'file://dev/%04d' % origin_id, 'from_revision': hash_to_bytes('abcd{:0>4}'.format(origin_id)), 'indexer_configuration_id': tools[origin_id % 2]['id'], 'metadata': {'name': 'origin %d' % origin_id}, 'mappings': ['mapping%d' % (origin_id % 10)] } for origin_id in range(nb_rows) ] revision_metadata = [ { 'id': hash_to_bytes('abcd{:0>4}'.format(origin_id)), 'indexer_configuration_id': tools[origin_id % 2]['id'], 'metadata': {'name': 'origin %d' % origin_id}, 'mappings': ['mapping%d' % (origin_id % 10)] } for origin_id in range(nb_rows) ] idx_storage.revision_intrinsic_metadata_add(revision_metadata) idx_storage.origin_intrinsic_metadata_add(origin_metadata) return [tool['id'] for tool in tools] def _origins_in_task_args(tasks): """Returns the set of origins contained in the arguments of the provided tasks (assumed to be of type index-origin-metadata).""" return reduce( set.union, (set(task['arguments']['args'][0]) for task in tasks), set() ) def _assert_tasks_for_origins(tasks, origins): expected_kwargs = {"policy_update": "update-dups"} assert {task['type'] for task in tasks} == {'index-origin-metadata'} assert all(len(task['arguments']['args']) == 1 for task in tasks) for task in tasks: assert task['arguments']['kwargs'] == expected_kwargs, task assert _origins_in_task_args(tasks) == set([ 'file://dev/%04d' % i for i in origins]) def invoke(scheduler, catch_exceptions, args): runner = CliRunner() with patch('swh.indexer.cli.get_scheduler') as get_scheduler_mock, \ tempfile.NamedTemporaryFile('a', suffix='.yml') as config_fd: config_fd.write(CLI_CONFIG) config_fd.seek(0) get_scheduler_mock.return_value = scheduler result = runner.invoke(cli, ['-C' + config_fd.name] + args) if not catch_exceptions and result.exception: print(result.output) raise result.exception return result def test_mapping_list(indexer_scheduler): result = invoke(indexer_scheduler, False, [ 'mapping', 'list', ]) expected_output = '\n'.join([ 'codemeta', 'gemspec', 'maven', 'npm', 'pkg-info', '', ]) assert result.exit_code == 0, result.output assert result.output == expected_output def test_mapping_list_terms(indexer_scheduler): result = invoke(indexer_scheduler, False, [ 'mapping', 'list-terms', ]) assert result.exit_code == 0, result.output assert re.search(r'http://schema.org/url:\n.*npm', result.output) assert re.search(r'http://schema.org/url:\n.*codemeta', result.output) assert re.search( r'https://codemeta.github.io/terms/developmentStatus:\n\tcodemeta', result.output) def test_mapping_list_terms_exclude(indexer_scheduler): result = invoke(indexer_scheduler, False, [ 'mapping', 'list-terms', '--exclude-mapping', 'codemeta' ]) assert result.exit_code == 0, result.output assert re.search(r'http://schema.org/url:\n.*npm', result.output) assert not re.search(r'http://schema.org/url:\n.*codemeta', result.output) assert not re.search( r'https://codemeta.github.io/terms/developmentStatus:\n\tcodemeta', result.output) @patch('swh.scheduler.cli.utils.TASK_BATCH_SIZE', 3) @patch('swh.scheduler.cli_utils.TASK_BATCH_SIZE', 3) def test_origin_metadata_reindex_empty_db( indexer_scheduler, idx_storage, storage): result = invoke(indexer_scheduler, False, [ 'schedule', 'reindex_origin_metadata', ]) expected_output = ( 'Nothing to do (no origin metadata matched the criteria).\n' ) assert result.exit_code == 0, result.output assert result.output == expected_output tasks = indexer_scheduler.search_tasks() assert len(tasks) == 0 @patch('swh.scheduler.cli.utils.TASK_BATCH_SIZE', 3) @patch('swh.scheduler.cli_utils.TASK_BATCH_SIZE', 3) def test_origin_metadata_reindex_divisor( indexer_scheduler, idx_storage, storage): """Tests the re-indexing when origin_batch_size*task_batch_size is a divisor of nb_origins.""" fill_idx_storage(idx_storage, 90) result = invoke(indexer_scheduler, False, [ 'schedule', 'reindex_origin_metadata', ]) # Check the output expected_output = ( 'Scheduled 3 tasks (30 origins).\n' 'Scheduled 6 tasks (60 origins).\n' 'Scheduled 9 tasks (90 origins).\n' 'Done.\n' ) assert result.exit_code == 0, result.output assert result.output == expected_output # Check scheduled tasks tasks = indexer_scheduler.search_tasks() assert len(tasks) == 9 _assert_tasks_for_origins(tasks, range(90)) @patch('swh.scheduler.cli.utils.TASK_BATCH_SIZE', 3) @patch('swh.scheduler.cli_utils.TASK_BATCH_SIZE', 3) def test_origin_metadata_reindex_dry_run( indexer_scheduler, idx_storage, storage): """Tests the re-indexing when origin_batch_size*task_batch_size is a divisor of nb_origins.""" fill_idx_storage(idx_storage, 90) result = invoke(indexer_scheduler, False, [ 'schedule', '--dry-run', 'reindex_origin_metadata', ]) # Check the output expected_output = ( 'Scheduled 3 tasks (30 origins).\n' 'Scheduled 6 tasks (60 origins).\n' 'Scheduled 9 tasks (90 origins).\n' 'Done.\n' ) assert result.exit_code == 0, result.output assert result.output == expected_output # Check scheduled tasks tasks = indexer_scheduler.search_tasks() assert len(tasks) == 0 @patch('swh.scheduler.cli.utils.TASK_BATCH_SIZE', 3) @patch('swh.scheduler.cli_utils.TASK_BATCH_SIZE', 3) def test_origin_metadata_reindex_nondivisor( indexer_scheduler, idx_storage, storage): """Tests the re-indexing when neither origin_batch_size or task_batch_size is a divisor of nb_origins.""" fill_idx_storage(idx_storage, 70) result = invoke(indexer_scheduler, False, [ 'schedule', 'reindex_origin_metadata', '--batch-size', '20', ]) # Check the output expected_output = ( 'Scheduled 3 tasks (60 origins).\n' 'Scheduled 4 tasks (70 origins).\n' 'Done.\n' ) assert result.exit_code == 0, result.output assert result.output == expected_output # Check scheduled tasks tasks = indexer_scheduler.search_tasks() assert len(tasks) == 4 _assert_tasks_for_origins(tasks, range(70)) @patch('swh.scheduler.cli.utils.TASK_BATCH_SIZE', 3) @patch('swh.scheduler.cli_utils.TASK_BATCH_SIZE', 3) def test_origin_metadata_reindex_filter_one_mapping( indexer_scheduler, idx_storage, storage): """Tests the re-indexing when origin_batch_size*task_batch_size is a divisor of nb_origins.""" fill_idx_storage(idx_storage, 110) result = invoke(indexer_scheduler, False, [ 'schedule', 'reindex_origin_metadata', '--mapping', 'mapping1', ]) # Check the output expected_output = ( 'Scheduled 2 tasks (11 origins).\n' 'Done.\n' ) assert result.exit_code == 0, result.output assert result.output == expected_output # Check scheduled tasks tasks = indexer_scheduler.search_tasks() assert len(tasks) == 2 _assert_tasks_for_origins( tasks, [1, 11, 21, 31, 41, 51, 61, 71, 81, 91, 101]) @patch('swh.scheduler.cli.utils.TASK_BATCH_SIZE', 3) @patch('swh.scheduler.cli_utils.TASK_BATCH_SIZE', 3) def test_origin_metadata_reindex_filter_two_mappings( indexer_scheduler, idx_storage, storage): """Tests the re-indexing when origin_batch_size*task_batch_size is a divisor of nb_origins.""" fill_idx_storage(idx_storage, 110) result = invoke(indexer_scheduler, False, [ 'schedule', 'reindex_origin_metadata', '--mapping', 'mapping1', '--mapping', 'mapping2', ]) # Check the output expected_output = ( 'Scheduled 3 tasks (22 origins).\n' 'Done.\n' ) assert result.exit_code == 0, result.output assert result.output == expected_output # Check scheduled tasks tasks = indexer_scheduler.search_tasks() assert len(tasks) == 3 _assert_tasks_for_origins( tasks, [1, 11, 21, 31, 41, 51, 61, 71, 81, 91, 101, 2, 12, 22, 32, 42, 52, 62, 72, 82, 92, 102]) @patch('swh.scheduler.cli.utils.TASK_BATCH_SIZE', 3) @patch('swh.scheduler.cli_utils.TASK_BATCH_SIZE', 3) def test_origin_metadata_reindex_filter_one_tool( indexer_scheduler, idx_storage, storage): """Tests the re-indexing when origin_batch_size*task_batch_size is a divisor of nb_origins.""" tool_ids = fill_idx_storage(idx_storage, 110) result = invoke(indexer_scheduler, False, [ 'schedule', 'reindex_origin_metadata', '--tool-id', str(tool_ids[0]), ]) # Check the output expected_output = ( 'Scheduled 3 tasks (30 origins).\n' 'Scheduled 6 tasks (55 origins).\n' 'Done.\n' ) assert result.exit_code == 0, result.output assert result.output == expected_output # Check scheduled tasks tasks = indexer_scheduler.search_tasks() assert len(tasks) == 6 _assert_tasks_for_origins( tasks, [x*2 for x in range(55)]) def test_journal_client(storage, indexer_scheduler): """Test the 'swh indexer journal-client' cli tool.""" message = FakeKafkaMessage('swh.journal.objects.origin_visit', 'bogus', { 'status': 'full', 'origin': { 'url': 'file://dev/0000', } }) consumer = MockedKafkaConsumer([message]) with patch('swh.journal.client.Consumer', return_value=consumer): result = invoke(indexer_scheduler, False, [ 'journal-client', '--max-messages', '1', '--broker', '192.0.2.1', '--prefix', 'swh.journal.objects', '--group-id', 'test-consumer', ]) # Check the output expected_output = ( 'Processed 1 messages.\n' 'Done.\n' ) assert result.exit_code == 0, result.output assert result.output == expected_output # Check scheduled tasks tasks = indexer_scheduler.search_tasks() assert len(tasks) == 1 _assert_tasks_for_origins( tasks, [0]) diff --git a/swh/indexer/tests/test_ctags.py b/swh/indexer/tests/test_ctags.py index c0dff65..6ce1627 100644 --- a/swh/indexer/tests/test_ctags.py +++ b/swh/indexer/tests/test_ctags.py @@ -1,184 +1,184 @@ # Copyright (C) 2017-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import json import unittest from unittest.mock import patch import pytest import swh.indexer.ctags from swh.indexer.ctags import ( CtagsIndexer, run_ctags ) from swh.indexer.tests.utils import ( CommonContentIndexerTest, SHA1_TO_CTAGS, BASE_TEST_CONFIG, OBJ_STORAGE_DATA, fill_storage, fill_obj_storage, filter_dict, ) class BasicTest(unittest.TestCase): - @patch('swh.indexer.ctags.subprocess') + @patch("swh.indexer.ctags.subprocess") def test_run_ctags(self, mock_subprocess): """Computing licenses from a raw content should return results """ output0 = """ {"name":"defun","kind":"function","line":1,"language":"scheme"} {"name":"name","kind":"symbol","line":5,"language":"else"}""" output1 = """ {"name":"let","kind":"var","line":10,"language":"something"}""" expected_result0 = [ { 'name': 'defun', 'kind': 'function', 'line': 1, 'lang': 'scheme' }, { 'name': 'name', 'kind': 'symbol', 'line': 5, 'lang': 'else' } ] expected_result1 = [ { 'name': 'let', 'kind': 'var', 'line': 10, 'lang': 'something' } ] for path, lang, intermediary_result, expected_result in [ (b'some/path', 'lisp', output0, expected_result0), (b'some/path/2', 'markdown', output1, expected_result1) ]: mock_subprocess.check_output.return_value = intermediary_result actual_result = list(run_ctags(path, lang=lang)) self.assertEqual(actual_result, expected_result) class InjectCtagsIndexer: """Override ctags computations. """ def compute_ctags(self, path, lang): """Inject fake ctags given path (sha1 identifier). """ return { 'lang': lang, **SHA1_TO_CTAGS.get(path) } CONFIG = { **BASE_TEST_CONFIG, 'tools': { 'name': 'universal-ctags', 'version': '~git7859817b', 'configuration': { 'command_line': '''ctags --fields=+lnz --sort=no ''' ''' --links=no ''', 'max_content_size': 1000, }, }, 'languages': { 'python': 'python', 'haskell': 'haskell', 'bar': 'bar', }, 'workdir': '/tmp', } class TestCtagsIndexer(CommonContentIndexerTest, unittest.TestCase): """Ctags indexer test scenarios: - Known sha1s in the input list have their data indexed - Unknown sha1 in the input list are not indexed """ legacy_get_format = True def get_indexer_results(self, ids): yield from self.idx_storage.content_ctags_get(ids) def setUp(self): super().setUp() self.indexer = CtagsIndexer(config=CONFIG) self.indexer.catch_exceptions = False self.idx_storage = self.indexer.idx_storage fill_storage(self.indexer.storage) fill_obj_storage(self.indexer.objstorage) # Prepare test input self.id0 = '01c9379dfc33803963d07c1ccc748d3fe4c96bb5' self.id1 = 'd4c647f0fc257591cc9ba1722484229780d1c607' self.id2 = '688a5ef812c53907562fe379d4b3851e69c7cb15' tool = {k.replace('tool_', ''): v for (k, v) in self.indexer.tool.items()} self.expected_results = { self.id0: { 'id': self.id0, 'tool': tool, **SHA1_TO_CTAGS[self.id0][0], }, self.id1: { 'id': self.id1, 'tool': tool, **SHA1_TO_CTAGS[self.id1][0], }, self.id2: { 'id': self.id2, 'tool': tool, **SHA1_TO_CTAGS[self.id2][0], } } self._set_mocks() def _set_mocks(self): def find_ctags_for_content(raw_content): for (sha1, ctags) in SHA1_TO_CTAGS.items(): if OBJ_STORAGE_DATA[sha1] == raw_content: return ctags else: raise ValueError(('%r not found in objstorage, can\'t mock ' 'its ctags.') % raw_content) def fake_language(raw_content, *args, **kwargs): ctags = find_ctags_for_content(raw_content) return {'lang': ctags[0]['lang']} self._real_compute_language = swh.indexer.ctags.compute_language swh.indexer.ctags.compute_language = fake_language def fake_check_output(cmd, *args, **kwargs): print(cmd) id_ = cmd[-1].split('/')[-1] return '\n'.join( json.dumps({'language': ctag['lang'], **ctag}) for ctag in SHA1_TO_CTAGS[id_]) self._real_check_output = swh.indexer.ctags.subprocess.check_output swh.indexer.ctags.subprocess.check_output = fake_check_output def tearDown(self): swh.indexer.ctags.compute_language = self._real_compute_language swh.indexer.ctags.subprocess.check_output = self._real_check_output super().tearDown() def test_ctags_w_no_tool(): with pytest.raises(ValueError): CtagsIndexer(config=filter_dict(CONFIG, 'tools')) diff --git a/swh/indexer/tests/test_fossology_license.py b/swh/indexer/tests/test_fossology_license.py index cd6030b..8b15b90 100644 --- a/swh/indexer/tests/test_fossology_license.py +++ b/swh/indexer/tests/test_fossology_license.py @@ -1,181 +1,181 @@ # Copyright (C) 2017-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import unittest import pytest from unittest.mock import patch from typing import Any, Dict from swh.indexer import fossology_license from swh.indexer.fossology_license import ( FossologyLicenseIndexer, FossologyLicenseRangeIndexer, compute_license ) from swh.indexer.tests.utils import ( SHA1_TO_LICENSES, CommonContentIndexerTest, CommonContentIndexerRangeTest, BASE_TEST_CONFIG, fill_storage, fill_obj_storage, filter_dict, ) class BasicTest(unittest.TestCase): - @patch('swh.indexer.fossology_license.subprocess') + @patch("swh.indexer.fossology_license.subprocess") def test_compute_license(self, mock_subprocess): """Computing licenses from a raw content should return results """ for path, intermediary_result, output in [ (b'some/path', None, []), (b'some/path/2', [], []), (b'other/path', ' contains license(s) GPL,AGPL', ['GPL', 'AGPL'])]: mock_subprocess.check_output.return_value = intermediary_result actual_result = compute_license(path, log=None) self.assertEqual(actual_result, { 'licenses': output, 'path': path, }) def mock_compute_license(path, log=None): """path is the content identifier """ if isinstance(id, bytes): path = path.decode('utf-8') # path is something like /tmp/tmpXXX/ so we keep only the sha1 part path = path.split('/')[-1] return { 'licenses': SHA1_TO_LICENSES.get(path) } CONFIG = { **BASE_TEST_CONFIG, 'workdir': '/tmp', 'tools': { 'name': 'nomos', 'version': '3.1.0rc2-31-ga2cbb8c', 'configuration': { 'command_line': 'nomossa ', }, }, } # type: Dict[str, Any] RANGE_CONFIG = dict(list(CONFIG.items()) + [('write_batch_size', 100)]) class TestFossologyLicenseIndexer(CommonContentIndexerTest, unittest.TestCase): """Language indexer test scenarios: - Known sha1s in the input list have their data indexed - Unknown sha1 in the input list are not indexed """ def get_indexer_results(self, ids): yield from self.idx_storage.content_fossology_license_get(ids) def setUp(self): super().setUp() # replace actual license computation with a mock self.orig_compute_license = fossology_license.compute_license fossology_license.compute_license = mock_compute_license self.indexer = FossologyLicenseIndexer(CONFIG) self.indexer.catch_exceptions = False self.idx_storage = self.indexer.idx_storage fill_storage(self.indexer.storage) fill_obj_storage(self.indexer.objstorage) self.id0 = '01c9379dfc33803963d07c1ccc748d3fe4c96bb5' self.id1 = '688a5ef812c53907562fe379d4b3851e69c7cb15' self.id2 = 'da39a3ee5e6b4b0d3255bfef95601890afd80709' # empty content tool = {k.replace('tool_', ''): v for (k, v) in self.indexer.tool.items()} # then self.expected_results = { self.id0: { 'tool': tool, 'licenses': SHA1_TO_LICENSES[self.id0], }, self.id1: { 'tool': tool, 'licenses': SHA1_TO_LICENSES[self.id1], }, self.id2: { 'tool': tool, 'licenses': SHA1_TO_LICENSES[self.id2], } } def tearDown(self): super().tearDown() fossology_license.compute_license = self.orig_compute_license class TestFossologyLicenseRangeIndexer( CommonContentIndexerRangeTest, unittest.TestCase): """Range Fossology License Indexer tests. - new data within range are indexed - no data outside a range are indexed - with filtering existing indexed data prior to compute new index - without filtering existing indexed data prior to compute new index """ def setUp(self): super().setUp() # replace actual license computation with a mock self.orig_compute_license = fossology_license.compute_license fossology_license.compute_license = mock_compute_license self.indexer = FossologyLicenseRangeIndexer(config=RANGE_CONFIG) self.indexer.catch_exceptions = False fill_storage(self.indexer.storage) fill_obj_storage(self.indexer.objstorage) self.id0 = '01c9379dfc33803963d07c1ccc748d3fe4c96bb5' self.id1 = '02fb2c89e14f7fab46701478c83779c7beb7b069' self.id2 = '103bc087db1d26afc3a0283f38663d081e9b01e6' tool_id = self.indexer.tool['id'] self.expected_results = { self.id0: { 'id': self.id0, 'indexer_configuration_id': tool_id, 'licenses': SHA1_TO_LICENSES[self.id0] }, self.id1: { 'id': self.id1, 'indexer_configuration_id': tool_id, 'licenses': SHA1_TO_LICENSES[self.id1] }, self.id2: { 'id': self.id2, 'indexer_configuration_id': tool_id, 'licenses': SHA1_TO_LICENSES[self.id2] } } def tearDown(self): super().tearDown() fossology_license.compute_license = self.orig_compute_license def test_fossology_w_no_tool(): with pytest.raises(ValueError): FossologyLicenseIndexer(config=filter_dict(CONFIG, 'tools')) def test_fossology_range_w_no_tool(): with pytest.raises(ValueError): FossologyLicenseRangeIndexer(config=filter_dict(RANGE_CONFIG, 'tools')) diff --git a/swh/indexer/tests/test_metadata.py b/swh/indexer/tests/test_metadata.py index ccdd7e7..20fe1cc 100644 --- a/swh/indexer/tests/test_metadata.py +++ b/swh/indexer/tests/test_metadata.py @@ -1,1211 +1,1287 @@ # Copyright (C) 2017-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import json import unittest import attr from hypothesis import given, strategies, settings, HealthCheck from swh.model.hashutil import hash_to_bytes from swh.indexer.codemeta import CODEMETA_TERMS, CROSSWALK_TABLE from swh.indexer.codemeta import merge_documents from swh.indexer.metadata_dictionary import MAPPINGS from swh.indexer.metadata_dictionary.base import merge_values from swh.indexer.metadata_dictionary.maven import MavenMapping from swh.indexer.metadata_dictionary.npm import NpmMapping from swh.indexer.metadata_dictionary.ruby import GemspecMapping from swh.indexer.metadata_detector import ( detect_metadata ) from swh.indexer.metadata import ( ContentMetadataIndexer, RevisionMetadataIndexer ) from .utils import ( BASE_TEST_CONFIG, fill_obj_storage, fill_storage, YARN_PARSER_METADATA, json_document_strategy, xml_document_strategy, ) TRANSLATOR_TOOL = { 'name': 'swh-metadata-translator', 'version': '0.0.2', 'configuration': { 'type': 'local', 'context': 'NpmMapping' } } class ContentMetadataTestIndexer(ContentMetadataIndexer): """Specific Metadata whose configuration is enough to satisfy the indexing tests. """ def parse_config_file(self, *args, **kwargs): assert False, 'should not be called; the rev indexer configures it.' REVISION_METADATA_CONFIG = { **BASE_TEST_CONFIG, 'tools': TRANSLATOR_TOOL, } class Metadata(unittest.TestCase): """ Tests metadata_mock_tool tool for Metadata detection """ def setUp(self): """ shows the entire diff in the results """ self.maxDiff = None self.npm_mapping = MAPPINGS['NpmMapping']() self.codemeta_mapping = MAPPINGS['CodemetaMapping']() self.maven_mapping = MAPPINGS['MavenMapping']() self.pkginfo_mapping = MAPPINGS['PythonPkginfoMapping']() self.gemspec_mapping = MAPPINGS['GemspecMapping']() def test_crosstable(self): self.assertEqual(CROSSWALK_TABLE['NodeJS'], { 'repository': 'http://schema.org/codeRepository', 'os': 'http://schema.org/operatingSystem', 'cpu': 'http://schema.org/processorRequirements', 'engines': 'http://schema.org/processorRequirements', 'author': 'http://schema.org/author', 'author.email': 'http://schema.org/email', 'author.name': 'http://schema.org/name', 'contributor': 'http://schema.org/contributor', 'keywords': 'http://schema.org/keywords', 'license': 'http://schema.org/license', 'version': 'http://schema.org/version', 'description': 'http://schema.org/description', 'name': 'http://schema.org/name', 'bugs': 'https://codemeta.github.io/terms/issueTracker', 'homepage': 'http://schema.org/url' }) def test_merge_values(self): self.assertEqual( merge_values('a', 'b'), ['a', 'b']) self.assertEqual( merge_values(['a', 'b'], 'c'), ['a', 'b', 'c']) self.assertEqual( merge_values('a', ['b', 'c']), ['a', 'b', 'c']) self.assertEqual( merge_values({'@list': ['a']}, {'@list': ['b']}), {'@list': ['a', 'b']}) self.assertEqual( merge_values({'@list': ['a', 'b']}, {'@list': ['c']}), {'@list': ['a', 'b', 'c']}) with self.assertRaises(ValueError): merge_values({'@list': ['a']}, 'b') with self.assertRaises(ValueError): merge_values('a', {'@list': ['b']}) with self.assertRaises(ValueError): merge_values({'@list': ['a']}, ['b']) with self.assertRaises(ValueError): merge_values(['a'], {'@list': ['b']}) self.assertEqual( merge_values('a', None), 'a') self.assertEqual( merge_values(['a', 'b'], None), ['a', 'b']) self.assertEqual( merge_values(None, ['b', 'c']), ['b', 'c']) self.assertEqual( merge_values({'@list': ['a']}, None), {'@list': ['a']}) self.assertEqual( merge_values(None, {'@list': ['a']}), {'@list': ['a']}) def test_compute_metadata_none(self): """ testing content empty content is empty should return None """ # given content = b"" # None if no metadata was found or an error occurred declared_metadata = None # when result = self.npm_mapping.translate(content) # then self.assertEqual(declared_metadata, result) def test_compute_metadata_npm(self): """ testing only computation of metadata with hard_mapping_npm """ # given content = b""" { "name": "test_metadata", "version": "0.0.2", "description": "Simple package.json test for indexer", "repository": { "type": "git", "url": "https://github.com/moranegg/metadata_test" }, "author": { "email": "moranegg@example.com", "name": "Morane G" } } """ declared_metadata = { '@context': 'https://doi.org/10.5063/schema/codemeta-2.0', 'type': 'SoftwareSourceCode', 'name': 'test_metadata', 'version': '0.0.2', 'description': 'Simple package.json test for indexer', 'codeRepository': 'git+https://github.com/moranegg/metadata_test', 'author': [{ 'type': 'Person', 'name': 'Morane G', 'email': 'moranegg@example.com', }], } # when result = self.npm_mapping.translate(content) # then self.assertEqual(declared_metadata, result) def test_merge_documents(self): """ Test the creation of a coherent minimal metadata set """ # given metadata_list = [{ '@context': 'https://doi.org/10.5063/schema/codemeta-2.0', 'name': 'test_1', 'version': '0.0.2', 'description': 'Simple package.json test for indexer', 'codeRepository': 'git+https://github.com/moranegg/metadata_test', }, { '@context': 'https://doi.org/10.5063/schema/codemeta-2.0', 'name': 'test_0_1', 'version': '0.0.2', 'description': 'Simple package.json test for indexer', 'codeRepository': 'git+https://github.com/moranegg/metadata_test' }, { '@context': 'https://doi.org/10.5063/schema/codemeta-2.0', 'name': 'test_metadata', 'version': '0.0.2', - 'author': 'moranegg', + 'author': { + 'type': 'Person', + 'name': 'moranegg', + }, }] # when results = merge_documents(metadata_list) # then expected_results = { '@context': 'https://doi.org/10.5063/schema/codemeta-2.0', "version": '0.0.2', "description": 'Simple package.json test for indexer', "name": ['test_1', 'test_0_1', 'test_metadata'], - "author": ['moranegg'], + "author": [{ + 'type': 'Person', + 'name': 'moranegg' + }], "codeRepository": 'git+https://github.com/moranegg/metadata_test', } self.assertEqual(expected_results, results) + def test_merge_documents_ids(self): + # given + metadata_list = [{ + '@context': 'https://doi.org/10.5063/schema/codemeta-2.0', + 'id': 'http://example.org/test1', + 'name': 'test_1', + }, { + '@context': 'https://doi.org/10.5063/schema/codemeta-2.0', + 'id': 'http://example.org/test2', + 'name': 'test_2', + }] + + # when + results = merge_documents(metadata_list) + + # then + expected_results = { + '@context': 'https://doi.org/10.5063/schema/codemeta-2.0', + 'id': 'http://example.org/test1', + 'schema:sameAs': 'http://example.org/test2', + "name": ['test_1', 'test_2'] + } + self.assertEqual(expected_results, results) + + def test_merge_documents_duplicate_ids(self): + # given + metadata_list = [{ + '@context': 'https://doi.org/10.5063/schema/codemeta-2.0', + 'id': 'http://example.org/test1', + 'name': 'test_1', + }, { + '@context': 'https://doi.org/10.5063/schema/codemeta-2.0', + 'id': 'http://example.org/test1', + 'name': 'test_1b', + }, { + '@context': 'https://doi.org/10.5063/schema/codemeta-2.0', + 'id': 'http://example.org/test2', + 'name': 'test_2', + }] + + # when + results = merge_documents(metadata_list) + + # then + expected_results = { + '@context': 'https://doi.org/10.5063/schema/codemeta-2.0', + 'id': 'http://example.org/test1', + 'schema:sameAs': 'http://example.org/test2', + "name": ['test_1', 'test_1b', 'test_2'] + } + self.assertEqual(expected_results, results) + def test_index_content_metadata_npm(self): """ testing NPM with package.json - one sha1 uses a file that can't be translated to metadata and should return None in the translated metadata """ # given sha1s = [ hash_to_bytes('26a9f72a7c87cc9205725cfd879f514ff4f3d8d5'), hash_to_bytes('d4c647f0fc257591cc9ba1722484229780d1c607'), hash_to_bytes('02fb2c89e14f7fab46701478c83779c7beb7b069'), ] # this metadata indexer computes only metadata for package.json # in npm context with a hard mapping config = BASE_TEST_CONFIG.copy() config['tools'] = [TRANSLATOR_TOOL] metadata_indexer = ContentMetadataTestIndexer(config=config) fill_obj_storage(metadata_indexer.objstorage) fill_storage(metadata_indexer.storage) # when metadata_indexer.run(sha1s, policy_update='ignore-dups') results = list(metadata_indexer.idx_storage.content_metadata_get( sha1s)) expected_results = [{ 'metadata': { '@context': 'https://doi.org/10.5063/schema/codemeta-2.0', 'type': 'SoftwareSourceCode', 'codeRepository': 'git+https://github.com/moranegg/metadata_test', 'description': 'Simple package.json test for indexer', 'name': 'test_metadata', 'version': '0.0.1' }, 'id': hash_to_bytes('26a9f72a7c87cc9205725cfd879f514ff4f3d8d5'), }, { 'metadata': { '@context': 'https://doi.org/10.5063/schema/codemeta-2.0', 'type': 'SoftwareSourceCode', 'issueTracker': 'https://github.com/npm/npm/issues', 'author': [{ 'type': 'Person', 'name': 'Isaac Z. Schlueter', 'email': 'i@izs.me', 'url': 'http://blog.izs.me', }], 'codeRepository': 'git+https://github.com/npm/npm', 'description': 'a package manager for JavaScript', 'license': 'https://spdx.org/licenses/Artistic-2.0', 'version': '5.0.3', 'name': 'npm', 'keywords': [ 'install', 'modules', 'package manager', 'package.json' ], 'url': 'https://docs.npmjs.com/' }, 'id': hash_to_bytes('d4c647f0fc257591cc9ba1722484229780d1c607') }] for result in results: del result['tool'] # The assertion below returns False sometimes because of nested lists self.assertEqual(expected_results, results) def test_npm_bugs_normalization(self): # valid dictionary package_json = b"""{ "name": "foo", "bugs": { "url": "https://github.com/owner/project/issues", "email": "foo@example.com" } }""" result = self.npm_mapping.translate(package_json) self.assertEqual(result, { '@context': 'https://doi.org/10.5063/schema/codemeta-2.0', 'name': 'foo', 'issueTracker': 'https://github.com/owner/project/issues', 'type': 'SoftwareSourceCode', }) # "invalid" dictionary package_json = b"""{ "name": "foo", "bugs": { "email": "foo@example.com" } }""" result = self.npm_mapping.translate(package_json) self.assertEqual(result, { '@context': 'https://doi.org/10.5063/schema/codemeta-2.0', 'name': 'foo', 'type': 'SoftwareSourceCode', }) # string package_json = b"""{ "name": "foo", "bugs": "https://github.com/owner/project/issues" }""" result = self.npm_mapping.translate(package_json) self.assertEqual(result, { '@context': 'https://doi.org/10.5063/schema/codemeta-2.0', 'name': 'foo', 'issueTracker': 'https://github.com/owner/project/issues', 'type': 'SoftwareSourceCode', }) def test_npm_repository_normalization(self): # normal package_json = b"""{ "name": "foo", "repository": { "type" : "git", "url" : "https://github.com/npm/cli.git" } }""" result = self.npm_mapping.translate(package_json) self.assertEqual(result, { '@context': 'https://doi.org/10.5063/schema/codemeta-2.0', 'name': 'foo', 'codeRepository': 'git+https://github.com/npm/cli.git', 'type': 'SoftwareSourceCode', }) # missing url package_json = b"""{ "name": "foo", "repository": { "type" : "git" } }""" result = self.npm_mapping.translate(package_json) self.assertEqual(result, { '@context': 'https://doi.org/10.5063/schema/codemeta-2.0', 'name': 'foo', 'type': 'SoftwareSourceCode', }) # github shortcut package_json = b"""{ "name": "foo", "repository": "github:npm/cli" }""" result = self.npm_mapping.translate(package_json) expected_result = { '@context': 'https://doi.org/10.5063/schema/codemeta-2.0', 'name': 'foo', 'codeRepository': 'git+https://github.com/npm/cli.git', 'type': 'SoftwareSourceCode', } self.assertEqual(result, expected_result) # github shortshortcut package_json = b"""{ "name": "foo", "repository": "npm/cli" }""" result = self.npm_mapping.translate(package_json) self.assertEqual(result, expected_result) # gitlab shortcut package_json = b"""{ "name": "foo", "repository": "gitlab:user/repo" }""" result = self.npm_mapping.translate(package_json) self.assertEqual(result, { '@context': 'https://doi.org/10.5063/schema/codemeta-2.0', 'name': 'foo', 'codeRepository': 'git+https://gitlab.com/user/repo.git', 'type': 'SoftwareSourceCode', }) def test_detect_metadata_package_json(self): # given df = [{ 'sha1_git': b'abc', 'name': b'index.js', 'target': b'abc', 'length': 897, 'status': 'visible', 'type': 'file', 'perms': 33188, 'dir_id': b'dir_a', 'sha1': b'bcd' }, { 'sha1_git': b'aab', 'name': b'package.json', 'target': b'aab', 'length': 712, 'status': 'visible', 'type': 'file', 'perms': 33188, 'dir_id': b'dir_a', 'sha1': b'cde' }] # when results = detect_metadata(df) expected_results = { 'NpmMapping': [ b'cde' ] } # then self.assertEqual(expected_results, results) def test_compute_metadata_valid_codemeta(self): raw_content = ( b"""{ "@context": "https://doi.org/10.5063/schema/codemeta-2.0", "@type": "SoftwareSourceCode", "identifier": "CodeMeta", "description": "CodeMeta is a concept vocabulary that can be used to standardize the exchange of software metadata across repositories and organizations.", "name": "CodeMeta: Minimal metadata schemas for science software and code, in JSON-LD", "codeRepository": "https://github.com/codemeta/codemeta", "issueTracker": "https://github.com/codemeta/codemeta/issues", "license": "https://spdx.org/licenses/Apache-2.0", "version": "2.0", "author": [ { "@type": "Person", "givenName": "Carl", "familyName": "Boettiger", "email": "cboettig@gmail.com", "@id": "http://orcid.org/0000-0002-1642-628X" }, { "@type": "Person", "givenName": "Matthew B.", "familyName": "Jones", "email": "jones@nceas.ucsb.edu", "@id": "http://orcid.org/0000-0003-0077-4738" } ], "maintainer": { "@type": "Person", "givenName": "Carl", "familyName": "Boettiger", "email": "cboettig@gmail.com", "@id": "http://orcid.org/0000-0002-1642-628X" }, "contIntegration": "https://travis-ci.org/codemeta/codemeta", "developmentStatus": "active", "downloadUrl": "https://github.com/codemeta/codemeta/archive/2.0.zip", "funder": { "@id": "https://doi.org/10.13039/100000001", "@type": "Organization", "name": "National Science Foundation" }, "funding":"1549758; Codemeta: A Rosetta Stone for Metadata in Scientific Software", "keywords": [ "metadata", "software" ], "version":"2.0", "dateCreated":"2017-06-05", "datePublished":"2017-06-05", "programmingLanguage": "JSON-LD" }""") # noqa expected_result = { "@context": "https://doi.org/10.5063/schema/codemeta-2.0", "type": "SoftwareSourceCode", "identifier": "CodeMeta", "description": "CodeMeta is a concept vocabulary that can " "be used to standardize the exchange of software metadata " "across repositories and organizations.", "name": "CodeMeta: Minimal metadata schemas for science " "software and code, in JSON-LD", "codeRepository": "https://github.com/codemeta/codemeta", "issueTracker": "https://github.com/codemeta/codemeta/issues", "license": "https://spdx.org/licenses/Apache-2.0", "version": "2.0", "author": [ { "type": "Person", "givenName": "Carl", "familyName": "Boettiger", "email": "cboettig@gmail.com", "id": "http://orcid.org/0000-0002-1642-628X" }, { "type": "Person", "givenName": "Matthew B.", "familyName": "Jones", "email": "jones@nceas.ucsb.edu", "id": "http://orcid.org/0000-0003-0077-4738" } ], "maintainer": { "type": "Person", "givenName": "Carl", "familyName": "Boettiger", "email": "cboettig@gmail.com", "id": "http://orcid.org/0000-0002-1642-628X" }, "contIntegration": "https://travis-ci.org/codemeta/codemeta", "developmentStatus": "active", "downloadUrl": "https://github.com/codemeta/codemeta/archive/2.0.zip", "funder": { "id": "https://doi.org/10.13039/100000001", "type": "Organization", "name": "National Science Foundation" }, "funding": "1549758; Codemeta: A Rosetta Stone for Metadata " "in Scientific Software", "keywords": [ "metadata", "software" ], "version": "2.0", "dateCreated": "2017-06-05", "datePublished": "2017-06-05", "programmingLanguage": "JSON-LD" } result = self.codemeta_mapping.translate(raw_content) self.assertEqual(result, expected_result) def test_compute_metadata_codemeta_alternate_context(self): raw_content = ( b"""{ "@context": "https://raw.githubusercontent.com/codemeta/codemeta/master/codemeta.jsonld", "@type": "SoftwareSourceCode", "identifier": "CodeMeta" }""") # noqa expected_result = { "@context": "https://doi.org/10.5063/schema/codemeta-2.0", "type": "SoftwareSourceCode", "identifier": "CodeMeta", } result = self.codemeta_mapping.translate(raw_content) self.assertEqual(result, expected_result) def test_compute_metadata_maven(self): raw_content = b""" Maven Default Project 4.0.0 com.mycompany.app my-app 1.2.3 central Maven Repository Switchboard default http://repo1.maven.org/maven2 false Apache License, Version 2.0 https://www.apache.org/licenses/LICENSE-2.0.txt repo A business-friendly OSS license """ result = self.maven_mapping.translate(raw_content) self.assertEqual(result, { '@context': 'https://doi.org/10.5063/schema/codemeta-2.0', 'type': 'SoftwareSourceCode', 'name': 'Maven Default Project', 'identifier': 'com.mycompany.app', 'version': '1.2.3', 'license': 'https://www.apache.org/licenses/LICENSE-2.0.txt', 'codeRepository': 'http://repo1.maven.org/maven2/com/mycompany/app/my-app', }) def test_compute_metadata_maven_empty(self): raw_content = b""" """ result = self.maven_mapping.translate(raw_content) self.assertEqual(result, { '@context': 'https://doi.org/10.5063/schema/codemeta-2.0', 'type': 'SoftwareSourceCode', }) def test_compute_metadata_maven_almost_empty(self): raw_content = b""" """ result = self.maven_mapping.translate(raw_content) self.assertEqual(result, { '@context': 'https://doi.org/10.5063/schema/codemeta-2.0', 'type': 'SoftwareSourceCode', }) def test_compute_metadata_maven_invalid_xml(self): expected_warning = ( 'WARNING:swh.indexer.metadata_dictionary.maven.MavenMapping:' 'Error parsing XML from foo') raw_content = b""" """ with self.assertLogs('swh.indexer.metadata_dictionary', level='WARNING') as cm: result = MAPPINGS["MavenMapping"]('foo').translate(raw_content) self.assertEqual(cm.output, [expected_warning]) self.assertEqual(result, None) raw_content = b""" """ with self.assertLogs('swh.indexer.metadata_dictionary', level='WARNING') as cm: result = MAPPINGS["MavenMapping"]('foo').translate(raw_content) self.assertEqual(cm.output, [expected_warning]) self.assertEqual(result, None) def test_compute_metadata_maven_unknown_encoding(self): expected_warning = ( 'WARNING:swh.indexer.metadata_dictionary.maven.MavenMapping:' 'Error detecting XML encoding from foo') raw_content = b""" """ with self.assertLogs('swh.indexer.metadata_dictionary', level='WARNING') as cm: result = MAPPINGS["MavenMapping"]('foo').translate(raw_content) self.assertEqual(cm.output, [expected_warning]) self.assertEqual(result, None) raw_content = b""" """ with self.assertLogs('swh.indexer.metadata_dictionary', level='WARNING') as cm: result = MAPPINGS["MavenMapping"]('foo').translate(raw_content) self.assertEqual(cm.output, [expected_warning]) self.assertEqual(result, None) def test_compute_metadata_maven_invalid_encoding(self): expected_warning = ( 'WARNING:swh.indexer.metadata_dictionary.maven.MavenMapping:' 'Error unidecoding XML from foo') raw_content = b""" """ with self.assertLogs('swh.indexer.metadata_dictionary', level='WARNING') as cm: result = MAPPINGS["MavenMapping"]('foo').translate(raw_content) self.assertEqual(cm.output, [expected_warning]) self.assertEqual(result, None) def test_compute_metadata_maven_minimal(self): raw_content = b""" Maven Default Project 4.0.0 com.mycompany.app my-app 1.2.3 """ result = self.maven_mapping.translate(raw_content) self.assertEqual(result, { '@context': 'https://doi.org/10.5063/schema/codemeta-2.0', 'type': 'SoftwareSourceCode', 'name': 'Maven Default Project', 'identifier': 'com.mycompany.app', 'version': '1.2.3', 'codeRepository': 'https://repo.maven.apache.org/maven2/com/mycompany/app/my-app', }) def test_compute_metadata_maven_empty_nodes(self): raw_content = b""" Maven Default Project 4.0.0 com.mycompany.app my-app 1.2.3 """ result = self.maven_mapping.translate(raw_content) self.assertEqual(result, { '@context': 'https://doi.org/10.5063/schema/codemeta-2.0', 'type': 'SoftwareSourceCode', 'name': 'Maven Default Project', 'identifier': 'com.mycompany.app', 'version': '1.2.3', 'codeRepository': 'https://repo.maven.apache.org/maven2/com/mycompany/app/my-app', }) raw_content = b""" Maven Default Project 4.0.0 com.mycompany.app my-app """ result = self.maven_mapping.translate(raw_content) self.assertEqual(result, { '@context': 'https://doi.org/10.5063/schema/codemeta-2.0', 'type': 'SoftwareSourceCode', 'name': 'Maven Default Project', 'identifier': 'com.mycompany.app', 'codeRepository': 'https://repo.maven.apache.org/maven2/com/mycompany/app/my-app', }) raw_content = b""" 4.0.0 com.mycompany.app my-app 1.2.3 """ result = self.maven_mapping.translate(raw_content) self.assertEqual(result, { '@context': 'https://doi.org/10.5063/schema/codemeta-2.0', 'type': 'SoftwareSourceCode', 'identifier': 'com.mycompany.app', 'version': '1.2.3', 'codeRepository': 'https://repo.maven.apache.org/maven2/com/mycompany/app/my-app', }) raw_content = b""" Maven Default Project 4.0.0 com.mycompany.app my-app 1.2.3 """ result = self.maven_mapping.translate(raw_content) self.assertEqual(result, { '@context': 'https://doi.org/10.5063/schema/codemeta-2.0', 'type': 'SoftwareSourceCode', 'name': 'Maven Default Project', 'identifier': 'com.mycompany.app', 'version': '1.2.3', 'codeRepository': 'https://repo.maven.apache.org/maven2/com/mycompany/app/my-app', }) raw_content = b""" 1.2.3 """ result = self.maven_mapping.translate(raw_content) self.assertEqual(result, { '@context': 'https://doi.org/10.5063/schema/codemeta-2.0', 'type': 'SoftwareSourceCode', 'version': '1.2.3', }) def test_compute_metadata_maven_invalid_licenses(self): raw_content = b""" Maven Default Project 4.0.0 com.mycompany.app my-app 1.2.3 foo """ result = self.maven_mapping.translate(raw_content) self.assertEqual(result, { '@context': 'https://doi.org/10.5063/schema/codemeta-2.0', 'type': 'SoftwareSourceCode', 'name': 'Maven Default Project', 'identifier': 'com.mycompany.app', 'version': '1.2.3', 'codeRepository': 'https://repo.maven.apache.org/maven2/com/mycompany/app/my-app', }) def test_compute_metadata_maven_multiple(self): '''Tests when there are multiple code repos and licenses.''' raw_content = b""" Maven Default Project 4.0.0 com.mycompany.app my-app 1.2.3 central Maven Repository Switchboard default http://repo1.maven.org/maven2 false example Example Maven Repo default http://example.org/maven2 Apache License, Version 2.0 https://www.apache.org/licenses/LICENSE-2.0.txt repo A business-friendly OSS license MIT license https://opensource.org/licenses/MIT """ result = self.maven_mapping.translate(raw_content) self.assertEqual(result, { '@context': 'https://doi.org/10.5063/schema/codemeta-2.0', 'type': 'SoftwareSourceCode', 'name': 'Maven Default Project', 'identifier': 'com.mycompany.app', 'version': '1.2.3', 'license': [ 'https://www.apache.org/licenses/LICENSE-2.0.txt', 'https://opensource.org/licenses/MIT', ], 'codeRepository': [ 'http://repo1.maven.org/maven2/com/mycompany/app/my-app', 'http://example.org/maven2/com/mycompany/app/my-app', ] }) def test_compute_metadata_pkginfo(self): raw_content = (b"""\ Metadata-Version: 2.1 Name: swh.core Version: 0.0.49 Summary: Software Heritage core utilities Home-page: https://forge.softwareheritage.org/diffusion/DCORE/ Author: Software Heritage developers Author-email: swh-devel@inria.fr License: UNKNOWN Project-URL: Bug Reports, https://forge.softwareheritage.org/maniphest Project-URL: Funding, https://www.softwareheritage.org/donate Project-URL: Source, https://forge.softwareheritage.org/source/swh-core Description: swh-core ======== \x20 core library for swh's modules: - config parser - hash computations - serialization - logging mechanism \x20 Platform: UNKNOWN Classifier: Programming Language :: Python :: 3 Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3) Classifier: Operating System :: OS Independent Classifier: Development Status :: 5 - Production/Stable Description-Content-Type: text/markdown Provides-Extra: testing """) # noqa result = self.pkginfo_mapping.translate(raw_content) self.assertCountEqual(result['description'], [ 'Software Heritage core utilities', # note the comma here 'swh-core\n' '========\n' '\n' "core library for swh's modules:\n" '- config parser\n' '- hash computations\n' '- serialization\n' '- logging mechanism\n' ''], result) del result['description'] self.assertEqual(result, { '@context': 'https://doi.org/10.5063/schema/codemeta-2.0', 'type': 'SoftwareSourceCode', 'url': 'https://forge.softwareheritage.org/diffusion/DCORE/', 'name': 'swh.core', 'author': [{ 'type': 'Person', 'name': 'Software Heritage developers', 'email': 'swh-devel@inria.fr', }], 'version': '0.0.49', }) def test_compute_metadata_pkginfo_utf8(self): raw_content = (b'''\ Metadata-Version: 1.1 Name: snowpyt Description-Content-Type: UNKNOWN Description: foo Hydrology N\xc2\xb083 ''') # noqa result = self.pkginfo_mapping.translate(raw_content) self.assertEqual(result, { '@context': 'https://doi.org/10.5063/schema/codemeta-2.0', 'type': 'SoftwareSourceCode', 'name': 'snowpyt', 'description': 'foo\nHydrology N°83', }) def test_compute_metadata_pkginfo_keywords(self): raw_content = (b"""\ Metadata-Version: 2.1 Name: foo Keywords: foo bar baz """) # noqa result = self.pkginfo_mapping.translate(raw_content) self.assertEqual(result, { '@context': 'https://doi.org/10.5063/schema/codemeta-2.0', 'type': 'SoftwareSourceCode', 'name': 'foo', 'keywords': ['foo', 'bar', 'baz'], }) def test_compute_metadata_pkginfo_license(self): raw_content = (b"""\ Metadata-Version: 2.1 Name: foo License: MIT """) # noqa result = self.pkginfo_mapping.translate(raw_content) self.assertEqual(result, { '@context': 'https://doi.org/10.5063/schema/codemeta-2.0', 'type': 'SoftwareSourceCode', 'name': 'foo', 'license': 'MIT', }) def test_gemspec_base(self): raw_content = b""" Gem::Specification.new do |s| s.name = 'example' s.version = '0.1.0' s.licenses = ['MIT'] s.summary = "This is an example!" s.description = "Much longer explanation of the example!" s.authors = ["Ruby Coder"] s.email = 'rubycoder@example.com' s.files = ["lib/example.rb"] s.homepage = 'https://rubygems.org/gems/example' s.metadata = { "source_code_uri" => "https://github.com/example/example" } end""" result = self.gemspec_mapping.translate(raw_content) self.assertCountEqual(result.pop('description'), [ "This is an example!", "Much longer explanation of the example!" ]) self.assertEqual(result, { '@context': 'https://doi.org/10.5063/schema/codemeta-2.0', 'type': 'SoftwareSourceCode', - 'author': ['Ruby Coder'], + 'author': [ + { + 'type': 'Person', + 'name': 'Ruby Coder' + } + ], 'name': 'example', 'license': 'https://spdx.org/licenses/MIT', 'codeRepository': 'https://rubygems.org/gems/example', 'email': 'rubycoder@example.com', 'version': '0.1.0', }) def test_gemspec_two_author_fields(self): raw_content = b""" Gem::Specification.new do |s| s.authors = ["Ruby Coder1"] s.author = "Ruby Coder2" end""" result = self.gemspec_mapping.translate(raw_content) self.assertCountEqual(result.pop('author'), [ - 'Ruby Coder1', 'Ruby Coder2']) + { + 'type': 'Person', + 'name': 'Ruby Coder1' + }, + { + 'type': 'Person', + 'name': 'Ruby Coder2' + }, + ]) self.assertEqual(result, { '@context': 'https://doi.org/10.5063/schema/codemeta-2.0', 'type': 'SoftwareSourceCode', }) def test_gemspec_invalid_author(self): raw_content = b""" Gem::Specification.new do |s| s.author = ["Ruby Coder"] end""" result = self.gemspec_mapping.translate(raw_content) self.assertEqual(result, { '@context': 'https://doi.org/10.5063/schema/codemeta-2.0', 'type': 'SoftwareSourceCode', }) raw_content = b""" Gem::Specification.new do |s| s.author = "Ruby Coder1", end""" result = self.gemspec_mapping.translate(raw_content) self.assertEqual(result, { '@context': 'https://doi.org/10.5063/schema/codemeta-2.0', 'type': 'SoftwareSourceCode', }) raw_content = b""" Gem::Specification.new do |s| s.authors = ["Ruby Coder1", ["Ruby Coder2"]] end""" result = self.gemspec_mapping.translate(raw_content) self.assertEqual(result, { '@context': 'https://doi.org/10.5063/schema/codemeta-2.0', 'type': 'SoftwareSourceCode', - 'author': ['Ruby Coder1'], + 'author': [ + { + 'type': 'Person', + 'name': 'Ruby Coder1' + } + ], }) def test_gemspec_alternative_header(self): raw_content = b""" require './lib/version' Gem::Specification.new { |s| s.name = 'rb-system-with-aliases' s.summary = 'execute system commands with aliases' } """ result = self.gemspec_mapping.translate(raw_content) self.assertEqual(result, { '@context': 'https://doi.org/10.5063/schema/codemeta-2.0', 'type': 'SoftwareSourceCode', 'name': 'rb-system-with-aliases', 'description': 'execute system commands with aliases', }) @settings(suppress_health_check=[HealthCheck.too_slow]) @given(json_document_strategy(keys=list(NpmMapping.mapping))) def test_npm_adversarial(self, doc): raw = json.dumps(doc).encode() self.npm_mapping.translate(raw) @settings(suppress_health_check=[HealthCheck.too_slow]) @given(json_document_strategy(keys=CODEMETA_TERMS)) def test_codemeta_adversarial(self, doc): raw = json.dumps(doc).encode() self.codemeta_mapping.translate(raw) @settings(suppress_health_check=[HealthCheck.too_slow]) @given(xml_document_strategy( keys=list(MavenMapping.mapping), root='project', xmlns='http://maven.apache.org/POM/4.0.0')) def test_maven_adversarial(self, doc): self.maven_mapping.translate(doc) @settings(suppress_health_check=[HealthCheck.too_slow]) @given(strategies.dictionaries( # keys strategies.one_of( strategies.text(), *map(strategies.just, GemspecMapping.mapping) ), # values strategies.recursive( strategies.characters(), lambda children: strategies.lists(children, 1) ) )) def test_gemspec_adversarial(self, doc): parts = [b'Gem::Specification.new do |s|\n'] for (k, v) in doc.items(): parts.append(' s.{} = {}\n'.format(k, repr(v)).encode()) parts.append(b'end\n') self.gemspec_mapping.translate(b''.join(parts)) def test_revision_metadata_indexer(self): metadata_indexer = RevisionMetadataIndexer( config=REVISION_METADATA_CONFIG) fill_obj_storage(metadata_indexer.objstorage) fill_storage(metadata_indexer.storage) tool = metadata_indexer.idx_storage.indexer_configuration_get( {'tool_'+k: v for (k, v) in TRANSLATOR_TOOL.items()}) assert tool is not None metadata_indexer.idx_storage.content_metadata_add([{ 'indexer_configuration_id': tool['id'], 'id': b'cde', 'metadata': YARN_PARSER_METADATA, }]) sha1_gits = [ hash_to_bytes('8dbb6aeb036e7fd80664eb8bfd1507881af1ba9f'), ] metadata_indexer.run(sha1_gits, 'update-dups') results = list( metadata_indexer.idx_storage. revision_intrinsic_metadata_get(sha1_gits)) expected_results = [{ 'id': hash_to_bytes('8dbb6aeb036e7fd80664eb8bfd1507881af1ba9f'), 'tool': TRANSLATOR_TOOL, 'metadata': YARN_PARSER_METADATA, 'mappings': ['npm'], }] for result in results: del result['tool']['id'] # then self.assertEqual(expected_results, results) def test_revision_metadata_indexer_single_root_dir(self): metadata_indexer = RevisionMetadataIndexer( config=REVISION_METADATA_CONFIG) fill_obj_storage(metadata_indexer.objstorage) fill_storage(metadata_indexer.storage) # Add a parent directory, that is the only directory at the root # of the revision rev_id = hash_to_bytes('8dbb6aeb036e7fd80664eb8bfd1507881af1ba9f') rev = metadata_indexer.storage._revisions[rev_id] subdir_id = rev.directory rev = attr.evolve(rev, directory=b'123456') metadata_indexer.storage.directory_add([{ 'id': b'123456', 'entries': [{ 'name': b'foobar-1.0.0', 'type': 'dir', 'target': subdir_id, 'perms': 16384, }], }]) tool = metadata_indexer.idx_storage.indexer_configuration_get( {'tool_'+k: v for (k, v) in TRANSLATOR_TOOL.items()}) assert tool is not None metadata_indexer.idx_storage.content_metadata_add([{ 'indexer_configuration_id': tool['id'], 'id': b'cde', 'metadata': YARN_PARSER_METADATA, }]) sha1_gits = [ hash_to_bytes('8dbb6aeb036e7fd80664eb8bfd1507881af1ba9f'), ] metadata_indexer.run(sha1_gits, 'update-dups') results = list( metadata_indexer.idx_storage. revision_intrinsic_metadata_get(sha1_gits)) expected_results = [{ 'id': hash_to_bytes('8dbb6aeb036e7fd80664eb8bfd1507881af1ba9f'), 'tool': TRANSLATOR_TOOL, 'metadata': YARN_PARSER_METADATA, 'mappings': ['npm'], }] for result in results: del result['tool']['id'] # then self.assertEqual(expected_results, results) diff --git a/swh/indexer/tests/utils.py b/swh/indexer/tests/utils.py index 7d8d0c6..d645799 100644 --- a/swh/indexer/tests/utils.py +++ b/swh/indexer/tests/utils.py @@ -1,760 +1,762 @@ # Copyright (C) 2017-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import abc import datetime import functools import random +from typing import Dict, Any import unittest from hypothesis import strategies from swh.model import hashutil from swh.model.hashutil import hash_to_bytes, hash_to_hex from swh.indexer.storage import INDEXER_CFG_KEY -BASE_TEST_CONFIG = { +BASE_TEST_CONFIG: Dict[str, Dict[str, Any]] = { 'storage': { 'cls': 'memory', - 'args': { - }, }, 'objstorage': { 'cls': 'memory', 'args': { }, }, INDEXER_CFG_KEY: { 'cls': 'memory', 'args': { }, }, } ORIGIN_VISITS = [ { 'type': 'git', 'url': 'https://github.com/SoftwareHeritage/swh-storage'}, { 'type': 'ftp', 'url': 'rsync://ftp.gnu.org/gnu/3dldf'}, { 'type': 'deposit', 'url': 'https://forge.softwareheritage.org/source/jesuisgpl/'}, { 'type': 'pypi', 'url': 'https://pypi.org/project/limnoria/'}, { 'type': 'svn', 'url': 'http://0-512-md.googlecode.com/svn/'}, { 'type': 'git', 'url': 'https://github.com/librariesio/yarn-parser'}, { 'type': 'git', 'url': 'https://github.com/librariesio/yarn-parser.git'}, ] SNAPSHOTS = [ { 'origin': 'https://github.com/SoftwareHeritage/swh-storage', 'branches': { b'refs/heads/add-revision-origin-cache': { 'target': b'L[\xce\x1c\x88\x8eF\t\xf1"\x19\x1e\xfb\xc0' b's\xe7/\xe9l\x1e', 'target_type': 'revision'}, - b'HEAD': { + b'refs/head/master': { 'target': b'8K\x12\x00d\x03\xcc\xe4]bS\xe3\x8f{\xd7}' b'\xac\xefrm', 'target_type': 'revision'}, + b'HEAD': { + 'target': b'refs/head/master', + 'target_type': 'alias'}, b'refs/tags/v0.0.103': { 'target': b'\xb6"Im{\xfdLb\xb0\x94N\xea\x96m\x13x\x88+' b'\x0f\xdd', 'target_type': 'release'}, }}, { 'origin': 'rsync://ftp.gnu.org/gnu/3dldf', 'branches': { b'3DLDF-1.1.4.tar.gz': { 'target': b'dJ\xfb\x1c\x91\xf4\x82B%]6\xa2\x90|\xd3\xfc' b'"G\x99\x11', 'target_type': 'revision'}, b'3DLDF-2.0.2.tar.gz': { 'target': b'\xb6\x0e\xe7\x9e9\xac\xaa\x19\x9e=' b'\xd1\xc5\x00\\\xc6\xfc\xe0\xa6\xb4V', 'target_type': 'revision'}, b'3DLDF-2.0.3-examples.tar.gz': { 'target': b'!H\x19\xc0\xee\x82-\x12F1\xbd\x97' b'\xfe\xadZ\x80\x80\xc1\x83\xff', 'target_type': 'revision'}, b'3DLDF-2.0.3.tar.gz': { 'target': b'\x8e\xa9\x8e/\xea}\x9feF\xf4\x9f\xfd\xee' b'\xcc\x1a\xb4`\x8c\x8by', 'target_type': 'revision'}, b'3DLDF-2.0.tar.gz': { 'target': b'F6*\xff(?\x19a\xef\xb6\xc2\x1fv$S\xe3G' b'\xd3\xd1m', 'target_type': 'revision'} }}, { 'origin': 'https://forge.softwareheritage.org/source/jesuisgpl/', 'branches': { b'master': { 'target': b'\xe7n\xa4\x9c\x9f\xfb\xb7\xf76\x11\x08{' b'\xa6\xe9\x99\xb1\x9e]q\xeb', 'target_type': 'revision'} }, 'id': b"h\xc0\xd2a\x04\xd4~'\x8d\xd6\xbe\x07\xeda\xfa\xfbV" b"\x1d\r "}, { 'origin': 'https://pypi.org/project/limnoria/', 'branches': { b'HEAD': { 'target': b'releases/2018.09.09', 'target_type': 'alias'}, b'releases/2018.09.01': { 'target': b'<\xee1(\xe8\x8d_\xc1\xc9\xa6rT\xf1\x1d' b'\xbb\xdfF\xfdw\xcf', 'target_type': 'revision'}, b'releases/2018.09.09': { 'target': b'\x83\xb9\xb6\xc7\x05\xb1%\xd0\xfem\xd8k' b'A\x10\x9d\xc5\xfa2\xf8t', 'target_type': 'revision'}}, 'id': b'{\xda\x8e\x84\x7fX\xff\x92\x80^\x93V\x18\xa3\xfay' b'\x12\x9e\xd6\xb3'}, { 'origin': 'http://0-512-md.googlecode.com/svn/', 'branches': { b'master': { 'target': b'\xe4?r\xe1,\x88\xab\xec\xe7\x9a\x87\xb8' b'\xc9\xad#.\x1bw=\x18', 'target_type': 'revision'}}, 'id': b'\xa1\xa2\x8c\n\xb3\x87\xa8\xf9\xe0a\x8c\xb7' b'\x05\xea\xb8\x1f\xc4H\xf4s'}, { 'origin': 'https://github.com/librariesio/yarn-parser', 'branches': { b'HEAD': { 'target': hash_to_bytes( '8dbb6aeb036e7fd80664eb8bfd1507881af1ba9f'), 'target_type': 'revision'}}}, { 'origin': 'https://github.com/librariesio/yarn-parser.git', 'branches': { b'HEAD': { 'target': hash_to_bytes( '8dbb6aeb036e7fd80664eb8bfd1507881af1ba9f'), 'target_type': 'revision'}}}, ] REVISIONS = [{ 'id': hash_to_bytes('8dbb6aeb036e7fd80664eb8bfd1507881af1ba9f'), 'message': 'Improve search functionality', 'author': { 'name': b'Andrew Nesbitt', 'fullname': b'Andrew Nesbitt ', 'email': b'andrewnez@gmail.com' }, 'committer': { 'name': b'Andrew Nesbitt', 'fullname': b'Andrew Nesbitt ', 'email': b'andrewnez@gmail.com' }, 'committer_date': { 'negative_utc': None, 'offset': 120, 'timestamp': { 'microseconds': 0, 'seconds': 1380883849 } }, 'type': 'git', 'synthetic': False, 'date': { 'negative_utc': False, 'timestamp': { 'seconds': 1487596456, 'microseconds': 0 }, 'offset': 0 }, 'directory': b'10' }] DIRECTORY_ID = b'10' DIRECTORY_ENTRIES = [{ 'name': b'index.js', 'type': 'file', 'target': b'abc', 'perms': 33188, }, { 'name': b'package.json', 'type': 'file', 'target': b'cde', 'perms': 33188, }, { 'name': b'.github', 'type': 'dir', 'target': b'11', 'perms': 16384, } ] SHA1_TO_LICENSES = { '01c9379dfc33803963d07c1ccc748d3fe4c96bb5': ['GPL'], '02fb2c89e14f7fab46701478c83779c7beb7b069': ['Apache2.0'], '103bc087db1d26afc3a0283f38663d081e9b01e6': ['MIT'], '688a5ef812c53907562fe379d4b3851e69c7cb15': ['AGPL'], 'da39a3ee5e6b4b0d3255bfef95601890afd80709': [], } SHA1_TO_CTAGS = { '01c9379dfc33803963d07c1ccc748d3fe4c96bb5': [{ 'name': 'foo', 'kind': 'str', 'line': 10, 'lang': 'bar', }], 'd4c647f0fc257591cc9ba1722484229780d1c607': [{ 'name': 'let', 'kind': 'int', 'line': 100, 'lang': 'haskell', }], '688a5ef812c53907562fe379d4b3851e69c7cb15': [{ 'name': 'symbol', 'kind': 'float', 'line': 99, 'lang': 'python', }], } OBJ_STORAGE_DATA = { '01c9379dfc33803963d07c1ccc748d3fe4c96bb5': b'this is some text', '688a5ef812c53907562fe379d4b3851e69c7cb15': b'another text', '8986af901dd2043044ce8f0d8fc039153641cf17': b'yet another text', '02fb2c89e14f7fab46701478c83779c7beb7b069': b""" import unittest import logging from swh.indexer.mimetype import MimetypeIndexer from swh.indexer.tests.test_utils import MockObjStorage class MockStorage(): def content_mimetype_add(self, mimetypes): self.state = mimetypes self.conflict_update = conflict_update def indexer_configuration_add(self, tools): return [{ 'id': 10, }] """, '103bc087db1d26afc3a0283f38663d081e9b01e6': b""" #ifndef __AVL__ #define __AVL__ typedef struct _avl_tree avl_tree; typedef struct _data_t { int content; } data_t; """, '93666f74f1cf635c8c8ac118879da6ec5623c410': b""" (should 'pygments (recognize 'lisp 'easily)) """, '26a9f72a7c87cc9205725cfd879f514ff4f3d8d5': b""" { "name": "test_metadata", "version": "0.0.1", "description": "Simple package.json test for indexer", "repository": { "type": "git", "url": "https://github.com/moranegg/metadata_test" } } """, 'd4c647f0fc257591cc9ba1722484229780d1c607': b""" { "version": "5.0.3", "name": "npm", "description": "a package manager for JavaScript", "keywords": [ "install", "modules", "package manager", "package.json" ], "preferGlobal": true, "config": { "publishtest": false }, "homepage": "https://docs.npmjs.com/", "author": "Isaac Z. Schlueter (http://blog.izs.me)", "repository": { "type": "git", "url": "https://github.com/npm/npm" }, "bugs": { "url": "https://github.com/npm/npm/issues" }, "dependencies": { "JSONStream": "~1.3.1", "abbrev": "~1.1.0", "ansi-regex": "~2.1.1", "ansicolors": "~0.3.2", "ansistyles": "~0.1.3" }, "devDependencies": { "tacks": "~1.2.6", "tap": "~10.3.2" }, "license": "Artistic-2.0" } """, 'a7ab314d8a11d2c93e3dcf528ca294e7b431c449': b""" """, 'da39a3ee5e6b4b0d3255bfef95601890afd80709': b'', # 626364 hash_to_hex(b'bcd'): b'unimportant content for bcd', # 636465 hash_to_hex(b'cde'): b""" { "name": "yarn-parser", "version": "1.0.0", "description": "Tiny web service for parsing yarn.lock files", "main": "index.js", "scripts": { "start": "node index.js", "test": "mocha" }, "engines": { "node": "9.8.0" }, "repository": { "type": "git", "url": "git+https://github.com/librariesio/yarn-parser.git" }, "keywords": [ "yarn", "parse", "lock", "dependencies" ], "author": "Andrew Nesbitt", "license": "AGPL-3.0", "bugs": { "url": "https://github.com/librariesio/yarn-parser/issues" }, "homepage": "https://github.com/librariesio/yarn-parser#readme", "dependencies": { "@yarnpkg/lockfile": "^1.0.0", "body-parser": "^1.15.2", "express": "^4.14.0" }, "devDependencies": { "chai": "^4.1.2", "mocha": "^5.2.0", "request": "^2.87.0", "test": "^0.6.0" } } """ } YARN_PARSER_METADATA = { '@context': 'https://doi.org/10.5063/schema/codemeta-2.0', 'url': 'https://github.com/librariesio/yarn-parser#readme', 'codeRepository': 'git+git+https://github.com/librariesio/yarn-parser.git', 'author': [{ 'type': 'Person', 'name': 'Andrew Nesbitt' }], 'license': 'https://spdx.org/licenses/AGPL-3.0', 'version': '1.0.0', 'description': - 'Tiny web service for parsing yarn.lock files', + "Tiny web service for parsing yarn.lock files", 'issueTracker': 'https://github.com/librariesio/yarn-parser/issues', 'name': 'yarn-parser', 'keywords': ['yarn', 'parse', 'lock', 'dependencies'], 'type': 'SoftwareSourceCode', } json_dict_keys = strategies.one_of( strategies.characters(), strategies.just('type'), strategies.just('url'), strategies.just('name'), strategies.just('email'), strategies.just('@id'), strategies.just('@context'), strategies.just('repository'), strategies.just('license'), strategies.just('repositories'), strategies.just('licenses'), ) """Hypothesis strategy that generates strings, with an emphasis on those that are often used as dictionary keys in metadata files.""" generic_json_document = strategies.recursive( strategies.none() | strategies.booleans() | strategies.floats() | strategies.characters(), lambda children: ( strategies.lists(children, 1) | strategies.dictionaries(json_dict_keys, children, min_size=1) ) ) """Hypothesis strategy that generates possible values for values of JSON metadata files.""" def json_document_strategy(keys=None): """Generates an hypothesis strategy that generates metadata files for a JSON-based format that uses the given keys.""" if keys is None: keys = strategies.characters() else: keys = strategies.one_of(map(strategies.just, keys)) return strategies.dictionaries(keys, generic_json_document, min_size=1) def _tree_to_xml(root, xmlns, data): def encode(s): "Skips unpaired surrogates generated by json_document_strategy" return s.encode('utf8', 'replace') def to_xml(data, indent=b' '): if data is None: return b'' elif isinstance(data, (bool, str, int, float)): return indent + encode(str(data)) elif isinstance(data, list): return b'\n'.join(to_xml(v, indent=indent) for v in data) elif isinstance(data, dict): lines = [] for (key, value) in data.items(): lines.append(indent + encode('<{}>'.format(key))) lines.append(to_xml(value, indent=indent+b' ')) lines.append(indent + encode(''.format(key))) return b'\n'.join(lines) else: raise TypeError(data) return b'\n'.join([ '<{} xmlns="{}">'.format(root, xmlns).encode(), to_xml(data), ''.format(root).encode(), ]) class TreeToXmlTest(unittest.TestCase): def test_leaves(self): self.assertEqual( _tree_to_xml('root', 'http://example.com', None), b'\n\n' ) self.assertEqual( _tree_to_xml('root', 'http://example.com', True), b'\n True\n' ) self.assertEqual( _tree_to_xml('root', 'http://example.com', 'abc'), b'\n abc\n' ) self.assertEqual( _tree_to_xml('root', 'http://example.com', 42), b'\n 42\n' ) self.assertEqual( _tree_to_xml('root', 'http://example.com', 3.14), b'\n 3.14\n' ) def test_dict(self): self.assertIn( _tree_to_xml('root', 'http://example.com', { 'foo': 'bar', 'baz': 'qux' }), [ b'\n' b' \n bar\n \n' b' \n qux\n \n' b'', b'\n' b' \n qux\n \n' b' \n bar\n \n' b'' ] ) def test_list(self): self.assertEqual( _tree_to_xml('root', 'http://example.com', [ {'foo': 'bar'}, {'foo': 'baz'}, ]), b'\n' b' \n bar\n \n' b' \n baz\n \n' b'' ) def xml_document_strategy(keys, root, xmlns): """Generates an hypothesis strategy that generates metadata files for an XML format that uses the given keys.""" return strategies.builds( functools.partial(_tree_to_xml, root, xmlns), json_document_strategy(keys)) def filter_dict(d, keys): 'return a copy of the dict with keys deleted' if not isinstance(keys, (list, tuple)): keys = (keys, ) return dict((k, v) for (k, v) in d.items() if k not in keys) def fill_obj_storage(obj_storage): """Add some content in an object storage.""" for (obj_id, content) in OBJ_STORAGE_DATA.items(): obj_storage.add(content, obj_id=hash_to_bytes(obj_id)) def fill_storage(storage): visit_types = {} for visit in ORIGIN_VISITS: storage.origin_add_one({'url': visit['url']}) visit_types[visit['url']] = visit['type'] for snap in SNAPSHOTS: origin_url = snap['origin'] visit = storage.origin_visit_add( origin=origin_url, date=datetime.datetime.now(), type=visit_types[origin_url]) snap_id = snap.get('id') or \ bytes([random.randint(0, 255) for _ in range(32)]) storage.snapshot_add([{ 'id': snap_id, 'branches': snap['branches'] }]) storage.origin_visit_update( origin_url, visit['visit'], status='full', snapshot=snap_id) storage.revision_add(REVISIONS) contents = [] for (obj_id, content) in OBJ_STORAGE_DATA.items(): content_hashes = hashutil.MultiHash.from_data(content).digest() contents.append({ 'data': content, 'length': len(content), 'status': 'visible', 'sha1': hash_to_bytes(obj_id), 'sha1_git': hash_to_bytes(obj_id), 'sha256': content_hashes['sha256'], 'blake2s256': content_hashes['blake2s256'] }) storage.content_add(contents) storage.directory_add([{ 'id': DIRECTORY_ID, 'entries': DIRECTORY_ENTRIES, }]) class CommonContentIndexerTest(metaclass=abc.ABCMeta): legacy_get_format = False """True if and only if the tested indexer uses the legacy format. see: https://forge.softwareheritage.org/T1433 """ def get_indexer_results(self, ids): """Override this for indexers that don't have a mock storage.""" return self.indexer.idx_storage.state def assert_legacy_results_ok(self, sha1s, expected_results=None): # XXX old format, remove this when all endpoints are # updated to the new one # see: https://forge.softwareheritage.org/T1433 sha1s = [sha1 if isinstance(sha1, bytes) else hash_to_bytes(sha1) for sha1 in sha1s] actual_results = list(self.get_indexer_results(sha1s)) if expected_results is None: expected_results = self.expected_results self.assertEqual(len(expected_results), len(actual_results), (expected_results, actual_results)) for indexed_data in actual_results: _id = indexed_data['id'] expected_data = expected_results[hashutil.hash_to_hex(_id)].copy() expected_data['id'] = _id self.assertEqual(indexed_data, expected_data) def assert_results_ok(self, sha1s, expected_results=None): if self.legacy_get_format: self.assert_legacy_results_ok(sha1s, expected_results) return sha1s = [sha1 if isinstance(sha1, bytes) else hash_to_bytes(sha1) for sha1 in sha1s] actual_results = list(self.get_indexer_results(sha1s)) if expected_results is None: expected_results = self.expected_results self.assertEqual(len(expected_results), len(actual_results), (expected_results, actual_results)) for indexed_data in actual_results: (_id, indexed_data) = list(indexed_data.items())[0] expected_data = expected_results[hashutil.hash_to_hex(_id)].copy() expected_data = [expected_data] self.assertEqual(indexed_data, expected_data) def test_index(self): """Known sha1 have their data indexed """ sha1s = [self.id0, self.id1, self.id2] # when self.indexer.run(sha1s, policy_update='update-dups') self.assert_results_ok(sha1s) # 2nd pass self.indexer.run(sha1s, policy_update='ignore-dups') self.assert_results_ok(sha1s) def test_index_one_unknown_sha1(self): """Unknown sha1 are not indexed""" sha1s = [self.id1, '799a5ef812c53907562fe379d4b3851e69c7cb15', # unknown '800a5ef812c53907562fe379d4b3851e69c7cb15'] # unknown # when self.indexer.run(sha1s, policy_update='update-dups') # then expected_results = { k: v for k, v in self.expected_results.items() if k in sha1s } self.assert_results_ok(sha1s, expected_results) class CommonContentIndexerRangeTest: """Allows to factorize tests on range indexer. """ def setUp(self): self.contents = sorted(OBJ_STORAGE_DATA) def assert_results_ok(self, start, end, actual_results, expected_results=None): if expected_results is None: expected_results = self.expected_results actual_results = list(actual_results) for indexed_data in actual_results: _id = indexed_data['id'] assert isinstance(_id, bytes) indexed_data = indexed_data.copy() indexed_data['id'] = hash_to_hex(indexed_data['id']) self.assertEqual(indexed_data, expected_results[hash_to_hex(_id)]) self.assertTrue(start <= _id <= end) _tool_id = indexed_data['indexer_configuration_id'] self.assertEqual(_tool_id, self.indexer.tool['id']) def test__index_contents(self): """Indexing contents without existing data results in indexed data """ _start, _end = [self.contents[0], self.contents[2]] # output hex ids start, end = map(hashutil.hash_to_bytes, (_start, _end)) # given actual_results = list(self.indexer._index_contents( start, end, indexed={})) self.assert_results_ok(start, end, actual_results) def test__index_contents_with_indexed_data(self): """Indexing contents with existing data results in less indexed data """ _start, _end = [self.contents[0], self.contents[2]] # output hex ids start, end = map(hashutil.hash_to_bytes, (_start, _end)) data_indexed = [self.id0, self.id2] # given actual_results = self.indexer._index_contents( start, end, indexed=set(map(hash_to_bytes, data_indexed))) # craft the expected results expected_results = self.expected_results.copy() for already_indexed_key in data_indexed: expected_results.pop(already_indexed_key) self.assert_results_ok( start, end, actual_results, expected_results) def test_generate_content_get(self): """Optimal indexing should result in indexed data """ _start, _end = [self.contents[0], self.contents[2]] # output hex ids start, end = map(hashutil.hash_to_bytes, (_start, _end)) # given actual_results = self.indexer.run(start, end) # then self.assertTrue(actual_results) def test_generate_content_get_input_as_bytes(self): """Optimal indexing should result in indexed data Input are in bytes here. """ _start, _end = [self.contents[0], self.contents[2]] # output hex ids start, end = map(hashutil.hash_to_bytes, (_start, _end)) # given actual_results = self.indexer.run( # checks the bytes input this time start, end, skip_existing=False) # no already indexed data so same result as prior test # then self.assertTrue(actual_results) def test_generate_content_get_no_result(self): """No result indexed returns False""" _start, _end = ['0000000000000000000000000000000000000000', '0000000000000000000000000000000000000001'] start, end = map(hashutil.hash_to_bytes, (_start, _end)) # given actual_results = self.indexer.run( start, end, incremental=False) # then self.assertFalse(actual_results) diff --git a/version.txt b/version.txt index 35afbd1..bac0b8f 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -v0.0.158-0-g5d32ac7 \ No newline at end of file +v0.0.159-0-g15811b9 \ No newline at end of file