diff --git a/PKG-INFO b/PKG-INFO index 22f5e93..43df598 100644 --- a/PKG-INFO +++ b/PKG-INFO @@ -1,71 +1,71 @@ Metadata-Version: 2.1 Name: swh.indexer -Version: 2.0.2 +Version: 2.1.0 Summary: Software Heritage Content Indexer Home-page: https://forge.softwareheritage.org/diffusion/78/ Author: Software Heritage developers Author-email: swh-devel@inria.fr Project-URL: Bug Reports, https://forge.softwareheritage.org/maniphest Project-URL: Funding, https://www.softwareheritage.org/donate Project-URL: Source, https://forge.softwareheritage.org/source/swh-indexer Project-URL: Documentation, https://docs.softwareheritage.org/devel/swh-indexer/ Classifier: Programming Language :: Python :: 3 Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3) Classifier: Operating System :: OS Independent Classifier: Development Status :: 5 - Production/Stable Requires-Python: >=3.7 Description-Content-Type: text/markdown Provides-Extra: testing License-File: LICENSE License-File: AUTHORS swh-indexer ============ Tools to compute multiple indexes on SWH's raw contents: - content: - mimetype - ctags - language - fossology-license - metadata - revision: - metadata An indexer is in charge of: - looking up objects - extracting information from those objects - store those information in the swh-indexer db There are multiple indexers working on different object types: - content indexer: works with content sha1 hashes - revision indexer: works with revision sha1 hashes - origin indexer: works with origin identifiers Indexation procedure: - receive batch of ids - retrieve the associated data depending on object type - compute for that object some index - store the result to swh's storage Current content indexers: - mimetype (queue swh_indexer_content_mimetype): detect the encoding and mimetype - language (queue swh_indexer_content_language): detect the programming language - ctags (queue swh_indexer_content_ctags): compute tags information - fossology-license (queue swh_indexer_fossology_license): compute the license - metadata: translate file into translated_metadata dict Current revision indexers: - metadata: detects files containing metadata and retrieves translated_metadata in content_metadata table in storage or run content indexer to translate files. diff --git a/docs/images/tasks-metadata-indexers.uml b/docs/images/tasks-metadata-indexers.uml index 954e079..d3e4bde 100644 --- a/docs/images/tasks-metadata-indexers.uml +++ b/docs/images/tasks-metadata-indexers.uml @@ -1,84 +1,77 @@ @startuml participant LOADERS as "Loaders" participant JOURNAL as "Journal" - participant SCHEDULER as "Scheduler" + participant IDX_ORIG_META as "Origin Metadata Indexer" participant IDX_ORIG_HEAD as "Origin-Head Indexer" - participant IDX_REV_META as "Revision Metadata Indexer" + participant IDX_DIR_META as "Directory Metadata Indexer" participant IDX_CONT_META as "Content Metadata Indexer" - participant IDX_ORIG_META as "Origin Metadata Indexer" participant IDX_STORAGE as "Indexer Storage" participant STORAGE as "Graph Storage" participant OBJ_STORAGE as "Object Storage" activate OBJ_STORAGE activate IDX_STORAGE activate STORAGE activate JOURNAL - activate SCHEDULER + activate IDX_ORIG_META activate LOADERS - LOADERS->>JOURNAL: Origin 42 was added/revisited + LOADERS->>JOURNAL: Origin http://example.org/repo.git\nwas added/revisited deactivate LOADERS - JOURNAL->>SCHEDULER: run indexers on origin 42 + JOURNAL->>IDX_ORIG_META: run indexers on origin\nhttp://example.org/repo.git - SCHEDULER->>IDX_ORIG_HEAD: Find HEAD revision of 42 + IDX_ORIG_META->>IDX_ORIG_HEAD: Find HEAD revision of\nhttp://example.org/repo.git activate IDX_ORIG_HEAD - IDX_ORIG_HEAD->>STORAGE: snapshot_get_latest(origin=42) + IDX_ORIG_HEAD->>STORAGE: snapshot_get_latest(origin="http://example.org/repo.git") STORAGE->>IDX_ORIG_HEAD: branches - IDX_ORIG_HEAD->>SCHEDULER: run Revision Metadata Indexer\non revision 42abcdef\n(head of origin 42) + IDX_ORIG_HEAD->>IDX_ORIG_META: run Revision Metadata Indexer\non revision 42abcdef (head of origin\nhttp://example.org/repo.git) deactivate IDX_ORIG_HEAD - SCHEDULER->>IDX_REV_META: Index revision 42abcdef\n(head of origin 42) - activate IDX_REV_META + IDX_ORIG_META->>STORAGE: revision_get(sha1=42abcdef) + STORAGE->>IDX_ORIG_META: {id: 42abcdef, message: "Commit message", directory: 456789ab, ...} - IDX_REV_META->>STORAGE: revision_get(sha1=42abcdef) - STORAGE->>IDX_REV_META: {id: 42abcdef, message: "Commit message", directory: 456789ab, ...} + IDX_ORIG_META->>IDX_DIR_META: Index directory 456789ab\n(head of origin http://example.org/repo.git) + activate IDX_DIR_META - IDX_REV_META->>STORAGE: directory_ls(sha1=456789ab) - STORAGE->>IDX_REV_META: [{id: 1234cafe, name: "package.json", type: file, ...}, {id: cafe4321, name: "README", type: file, ...}, ...] + IDX_DIR_META->>STORAGE: directory_ls(sha1=456789ab) + STORAGE->>IDX_DIR_META: [{id: 1234cafe, name: "package.json", type: file, ...}, {id: cafe4321, name: "README", type: file, ...}, ...] - IDX_REV_META->>IDX_REV_META: package.json is a metadata file + IDX_DIR_META->>IDX_DIR_META: package.json is a metadata file - IDX_REV_META->>IDX_STORAGE: content_metadata_get(sha1=1234cafe) - IDX_STORAGE->>IDX_REV_META: none / {author: "Jane Doe", ...} + IDX_DIR_META->>IDX_STORAGE: content_metadata_get(sha1=1234cafe) + IDX_STORAGE->>IDX_DIR_META: none / {author: "Jane Doe", ...} alt If the storage answered "none" - IDX_REV_META->>IDX_CONT_META: Index file 1234cafe as an NPM metadata file + IDX_DIR_META->>IDX_CONT_META: Index file 1234cafe as an NPM metadata file activate IDX_CONT_META IDX_CONT_META->>OBJ_STORAGE: content_get 1234cafe OBJ_STORAGE->>IDX_CONT_META: raw content is: '{"name": "FooPackage", "author": "Jane Doe"...' IDX_CONT_META->>IDX_CONT_META: "Jane Doe" is the author IDX_CONT_META->>IDX_STORAGE: content_metadata_add(sha1=1234cafe, {author: "Jane Doe", ...}) IDX_STORAGE->>IDX_CONT_META: ok - IDX_CONT_META->>IDX_REV_META: extracted: {author: "Jane Doe", ...} + IDX_CONT_META->>IDX_DIR_META: extracted: {author: "Jane Doe", ...} deactivate IDX_CONT_META - end - IDX_REV_META->>IDX_STORAGE: revision_metadata_add(sha1=42abcdef, {author: "Jane Doe", ...}) - IDX_STORAGE->>IDX_REV_META: ok - - IDX_REV_META->>SCHEDULER: run Origin Metadata Indexer\non origin 42; the head is 42abcdef - deactivate IDX_REV_META - - SCHEDULER->>IDX_ORIG_META: Index origin 42; the head is 42abcdef - activate IDX_ORIG_META + IDX_DIR_META->>IDX_STORAGE: directory_metadata_add(sha1=456789ab, {author: "Jane Doe", ...}) + IDX_STORAGE->>IDX_DIR_META: ok + end - IDX_ORIG_META->>IDX_STORAGE: revision_metadata_get(sha1=42abcdef) - IDX_STORAGE->>IDX_ORIG_META: {author: "Jane Doe", ...} + IDX_DIR_META->>IDX_ORIG_META: extracted: {author: "Jane Doe", ...} + deactivate IDX_DIR_META - IDX_ORIG_META->>IDX_STORAGE: origin_metadata_add(id=42, {author: "Jane Doe", ...}) + IDX_ORIG_META->>IDX_STORAGE: origin_metadata_add(id="http://example.org/repo.git", {author: "Jane Doe", ...}, from_directory=456789ab) IDX_STORAGE->>IDX_ORIG_META: ok deactivate IDX_ORIG_META @enduml diff --git a/docs/metadata-workflow.rst b/docs/metadata-workflow.rst index f913f49..972cf74 100644 --- a/docs/metadata-workflow.rst +++ b/docs/metadata-workflow.rst @@ -1,208 +1,274 @@ Metadata workflow ================= Intrinsic metadata ------------------ Indexing :term:`intrinsic metadata` requires extracting information from the lowest levels of the :ref:`Merkle DAG ` (directories, files, and content blobs) and associate them to the highest ones (origins). In order to deduplicate the work between origins, we split this work between multiple indexers, which coordinate with each other and save their results at each step in the indexer storage. Indexer architecture --------------------- +^^^^^^^^^^^^^^^^^^^^ .. thumbnail:: images/tasks-metadata-indexers.svg Origin-Head Indexer -___________________ +^^^^^^^^^^^^^^^^^^^ First, the Origin-Head indexer gets called externally, with an origin as argument (or multiple origins, that are handled sequentially). For now, its tasks are scheduled manually via recurring Scheduler tasks; but in the near future, the :term:`journal` will be used to do that. It first looks up the last :term:`snapshot` and determines what the main branch of origin is (the "Head branch") and what revision it points to (the "Head"). Intrinsic metadata for that origin will be extracted from that revision. -It schedules a Revision Metadata Indexer task for that revision, with a -hint that the revision is the Head of that particular origin. +It schedules a Directory Metadata Indexer task for the root directory of +that revision. -Revision and Content Metadata Indexers -______________________________________ +Directory and Content Metadata Indexers +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -These two indexers do the hard part of the work. The Revision Metadata +These two indexers do the hard part of the work. The Directory Metadata Indexer fetches the root directory associated with a revision, then extracts the metadata from that directory. To do so, it lists files in that directory, and looks for known names, such as :file:`codemeta.json`, :file:`package.json`, or :file:`pom.xml`. If there are any, it runs the Content Metadata Indexer on them, which in turn fetches their contents and runs them through extraction dictionaries/mappings. See below for details. Their results are saved in a database (the indexer storage), associated with -the content and revision hashes. - -If it received a hint that this revision is the head of an origin, the -Revision Metadata Indexer then schedules the Origin Metadata Indexer -to run on that origin. +the content and directory hashes. Origin Metadata Indexer -_______________________ +^^^^^^^^^^^^^^^^^^^^^^^ The job of this indexer is very simple: it takes an origin identifier and -a revision hash, and copies the metadata of the former to a new table, to -associate it with the latter. +uses the Origin-Head and Directory indexers to get metadata from the head +directory of an origin, and copies the metadata of the former to a new table, +to associate it with the latter. The reason for this is to be able to perform searches on metadata, and efficiently find out which origins matched the pattern. -Running that search on the ``revision_metadata`` table would require either -a reverse lookup from revisions to origins, which is costly. +Running that search on the ``directory_metadata`` table would require either +a reverse lookup from directories to origins, which is costly. -Translation from language-specific metadata to CodeMeta -------------------------------------------------------- +Translation from ecosystem-specific metadata to CodeMeta +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Intrinsic metadata are extracted from files provided with a project's source +Intrinsic metadata is extracted from files provided with a project's source code, and translated using `CodeMeta`_'s `crosswalk table`_. All input formats supported so far are straightforward dictionaries (eg. JSON) or can be accessed as such (eg. XML); and the first part of the translation is to map their keys to a term in the CodeMeta vocabulary. This is done by parsing the crosswalk table's `CSV file`_ and using it as a map between these two vocabularies; and this does not require any format-specific code in the indexers. The second part is to normalize values. As language-specific metadata files each have their way(s) of formatting these values, we need to turn them into the data type required by CodeMeta. This normalization makes up for most of the code of :py:mod:`swh.indexer.metadata_dictionary`. .. _CodeMeta: https://codemeta.github.io/ .. _crosswalk table: https://codemeta.github.io/crosswalk/ .. _CSV file: https://github.com/codemeta/codemeta/blob/master/crosswalk.csv +Extrinsic metadata +------------------ + +The :term:`extrinsic metadata` indexer works very differently from +the :term:`intrinsic metadata` indexers we saw above. +While the latter extract metadata from software artefacts (files and directories) +which are already a core part of the archive, the former extracts such data from +API calls pulled from forges and package managers, or pushed via the +:ref:`SWORD deposit `. + +In order to preserve original information verbatim, the Software Heritage itself +stores the result of these calls, independently of indexers, in their own archive +as described in the :ref:`extrinsic-metadata-specification`. +In this section, we assume this information is already present in the archive, +but in the "raw extrinsic metadata" form, which needs to be translated to a common +vocabulary to be useful, as with intrinsic metadata. + +The common vocabulary we chose is JSON-LD, with both CodeMeta and +`ForgeFed's vocabulary`_ (including `ActivityStream's vocabulary`_) + +.. _ForgeFed's vocabulary: https://forgefed.org/vocabulary.html +.. _ActivityStream's vocabulary: https://www.w3.org/TR/activitystreams-vocabulary/ + +Instead of the four-step architecture above, the extrinsic-metadata indexer +is standalone: it reads "raw extrinsic metadata" from the :ref:`swh-journal`, +and produces new indexed entries in the database as they come. + +The caveat is that, while intrinsic metadata are always unambiguously authoritative +(they are contained by their own origin repository, therefore they were added by +the origin's "owners"), extrinsic metadata can be authored by third-parties. +Support for third-party authorities is currently not implemented for this reason; +so extrinsic metadata is only indexed when provided by the same +forge/package-repository as the origin the metadata is about. +Metadata on non-origin objects (typically, directories), is also ignored for +this reason, for now. + +Assuming the metadata was provided by such an authority, it is then passed +to metadata mappings; identified by a mimetype (or custom format name) +they declared rather than filenames. + + +Implementation status +--------------------- + Supported intrinsic metadata ----------------------------- +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The following sources of intrinsic metadata are supported: * CodeMeta's `codemeta.json`_, * Maven's `pom.xml`_, * NPM's `package.json`_, * Python's `PKG-INFO`_, * Ruby's `.gemspec`_ .. _codemeta.json: https://codemeta.github.io/terms/ .. _pom.xml: https://maven.apache.org/pom.html .. _package.json: https://docs.npmjs.com/files/package.json .. _PKG-INFO: https://www.python.org/dev/peps/pep-0314/ .. _.gemspec: https://guides.rubygems.org/specification-reference/ +Supported extrinsic metadata +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The following sources of extrinsic metadata are supported: + +* GitHub's `"repo" API `__ + + Supported CodeMeta terms ------------------------- +^^^^^^^^^^^^^^^^^^^^^^^^ The following terms may be found in the output of the metadata translation (other than the `codemeta` mapping, which is the identity function, and therefore supports all terms): .. program-output:: python3 -m swh.indexer.cli mapping list-terms --exclude-mapping codemeta :nostderr: -Adding support for additional ecosystem-specific metadata ---------------------------------------------------------- + + +Tutorials +--------- + +The rest of this page is made of two tutorials: one to index +:term:`intrinsic metadata` (ie. from a file in a VCS or in a tarball), +and one to index :term:`extrinsic metadata` (ie. obtained via external means, +such as GitHub's or GitLab's APIs). + +Adding support for additional ecosystem-specific intrinsic metadata +------------------------------------------------------------------- This section will guide you through adding code to the metadata indexer to detect and translate new metadata formats. First, you should start by picking one of the `CodeMeta crosswalks`_. Then create a new file in :file:`swh-indexer/swh/indexer/metadata_dictionary/`, that will contain your code, and create a new class that inherits from helper classes, with some documentation about your indexer: .. code-block:: python - from .base import DictMapping, SingleFileMapping + from .base import DictMapping, SingleFileIntrinsicMapping from swh.indexer.codemeta import CROSSWALK_TABLE - class MyMapping(DictMapping, SingleFileMapping): + class MyMapping(DictMapping, SingleFileIntrinsicMapping): """Dedicated class for ...""" name = 'my-mapping' filename = b'the-filename' mapping = CROSSWALK_TABLE['Name of the CodeMeta crosswalk'] .. _CodeMeta crosswalks: https://github.com/codemeta/codemeta/tree/master/crosswalks +And reference it from :const:`swh.indexer.metadata_dictionary.INTRINSIC_MAPPINGS`. + Then, add a ``string_fields`` attribute, that is the list of all keys whose values are simple text values. For instance, to `translate Python PKG-INFO`_, it's: .. code-block:: python string_fields = ['name', 'version', 'description', 'summary', 'author', 'author-email'] These values will be automatically added to the above list of supported terms. .. _translate Python PKG-INFO: https://forge.softwareheritage.org/source/swh-indexer/browse/master/swh/indexer/metadata_dictionary/python.py Last step to get your code working: add a ``translate`` method that will take a single byte string as argument, turn it into a Python dictionary, whose keys are the ones of the input document, and pass it to ``_translate_dict``. For instance, if the input document is in JSON, it can be as simple as: .. code-block:: python def translate(self, raw_content): raw_content = raw_content.decode() # bytes to str content_dict = json.loads(raw_content) # str to dict return self._translate_dict(content_dict) # convert to CodeMeta ``_translate_dict`` will do the heavy work of reading the crosswalk table for each of ``string_fields``, read the corresponding value in the ``content_dict``, and build a CodeMeta dictionary with the corresponding names from the crosswalk table. One last thing to run your code: add it to the list in :file:`swh-indexer/swh/indexer/metadata_dictionary/__init__.py`, so the rest of the code is aware of it. Now, you can run it: .. code-block:: shell python3 -m swh.indexer.metadata_dictionary MyMapping path/to/input/file and it will (hopefully) returns a CodeMeta object. If it works, well done! You can now improve your translation code further, by adding methods that will do more advanced conversion. For example, if there is a field named ``license`` containing an SPDX identifier, you must convert it to an URI, like this: .. code-block:: python def normalize_license(self, s): if isinstance(s, str): return {"@id": "https://spdx.org/licenses/" + s} This method will automatically get called by ``_translate_dict`` when it finds a ``license`` field in ``content_dict``. + +Adding support for additional ecosystem-specific extrinsic metadata +------------------------------------------------------------------- + +[this section is a work in progress] diff --git a/swh.indexer.egg-info/PKG-INFO b/swh.indexer.egg-info/PKG-INFO index 22f5e93..43df598 100644 --- a/swh.indexer.egg-info/PKG-INFO +++ b/swh.indexer.egg-info/PKG-INFO @@ -1,71 +1,71 @@ Metadata-Version: 2.1 Name: swh.indexer -Version: 2.0.2 +Version: 2.1.0 Summary: Software Heritage Content Indexer Home-page: https://forge.softwareheritage.org/diffusion/78/ Author: Software Heritage developers Author-email: swh-devel@inria.fr Project-URL: Bug Reports, https://forge.softwareheritage.org/maniphest Project-URL: Funding, https://www.softwareheritage.org/donate Project-URL: Source, https://forge.softwareheritage.org/source/swh-indexer Project-URL: Documentation, https://docs.softwareheritage.org/devel/swh-indexer/ Classifier: Programming Language :: Python :: 3 Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3) Classifier: Operating System :: OS Independent Classifier: Development Status :: 5 - Production/Stable Requires-Python: >=3.7 Description-Content-Type: text/markdown Provides-Extra: testing License-File: LICENSE License-File: AUTHORS swh-indexer ============ Tools to compute multiple indexes on SWH's raw contents: - content: - mimetype - ctags - language - fossology-license - metadata - revision: - metadata An indexer is in charge of: - looking up objects - extracting information from those objects - store those information in the swh-indexer db There are multiple indexers working on different object types: - content indexer: works with content sha1 hashes - revision indexer: works with revision sha1 hashes - origin indexer: works with origin identifiers Indexation procedure: - receive batch of ids - retrieve the associated data depending on object type - compute for that object some index - store the result to swh's storage Current content indexers: - mimetype (queue swh_indexer_content_mimetype): detect the encoding and mimetype - language (queue swh_indexer_content_language): detect the programming language - ctags (queue swh_indexer_content_ctags): compute tags information - fossology-license (queue swh_indexer_fossology_license): compute the license - metadata: translate file into translated_metadata dict Current revision indexers: - metadata: detects files containing metadata and retrieves translated_metadata in content_metadata table in storage or run content indexer to translate files. diff --git a/swh.indexer.egg-info/SOURCES.txt b/swh.indexer.egg-info/SOURCES.txt index fd68651..c094096 100644 --- a/swh.indexer.egg-info/SOURCES.txt +++ b/swh.indexer.egg-info/SOURCES.txt @@ -1,145 +1,163 @@ .git-blame-ignore-revs .gitignore .pre-commit-config.yaml AUTHORS CODE_OF_CONDUCT.md CONTRIBUTORS LICENSE MANIFEST.in Makefile Makefile.local README.md codemeta.json conftest.py mypy.ini pyproject.toml pytest.ini requirements-swh.txt requirements-test.txt requirements.txt setup.cfg setup.py tox.ini docs/.gitignore docs/Makefile docs/Makefile.local docs/README.md docs/cli.rst docs/conf.py docs/dev-info.rst docs/index.rst docs/metadata-workflow.rst docs/_static/.placeholder docs/_templates/.placeholder docs/images/.gitignore docs/images/Makefile docs/images/tasks-metadata-indexers.uml sql/bin/db-upgrade sql/bin/dot_add_content sql/doc/json sql/doc/json/.gitignore sql/doc/json/Makefile sql/doc/json/indexer_configuration.tool_configuration.schema.json sql/doc/json/revision_metadata.translated_metadata.json sql/json/.gitignore sql/json/Makefile sql/json/indexer_configuration.tool_configuration.schema.json sql/json/revision_metadata.translated_metadata.json swh/__init__.py swh.indexer.egg-info/PKG-INFO swh.indexer.egg-info/SOURCES.txt swh.indexer.egg-info/dependency_links.txt swh.indexer.egg-info/entry_points.txt swh.indexer.egg-info/requires.txt swh.indexer.egg-info/top_level.txt swh/indexer/__init__.py swh/indexer/cli.py swh/indexer/codemeta.py swh/indexer/ctags.py swh/indexer/fossology_license.py swh/indexer/indexer.py swh/indexer/journal_client.py swh/indexer/metadata.py swh/indexer/metadata_detector.py swh/indexer/mimetype.py swh/indexer/origin_head.py swh/indexer/py.typed swh/indexer/rehash.py swh/indexer/tasks.py +swh/indexer/data/composer.csv +swh/indexer/data/pubspec.csv swh/indexer/data/codemeta/CITATION swh/indexer/data/codemeta/LICENSE swh/indexer/data/codemeta/codemeta.jsonld swh/indexer/data/codemeta/crosswalk.csv swh/indexer/metadata_dictionary/__init__.py swh/indexer/metadata_dictionary/base.py swh/indexer/metadata_dictionary/cff.py swh/indexer/metadata_dictionary/codemeta.py +swh/indexer/metadata_dictionary/composer.py +swh/indexer/metadata_dictionary/dart.py +swh/indexer/metadata_dictionary/github.py swh/indexer/metadata_dictionary/maven.py swh/indexer/metadata_dictionary/npm.py swh/indexer/metadata_dictionary/python.py swh/indexer/metadata_dictionary/ruby.py swh/indexer/sql/10-superuser-init.sql swh/indexer/sql/20-enums.sql swh/indexer/sql/30-schema.sql swh/indexer/sql/50-data.sql swh/indexer/sql/50-func.sql swh/indexer/sql/60-indexes.sql swh/indexer/sql/upgrades/115.sql swh/indexer/sql/upgrades/116.sql swh/indexer/sql/upgrades/117.sql swh/indexer/sql/upgrades/118.sql swh/indexer/sql/upgrades/119.sql swh/indexer/sql/upgrades/120.sql swh/indexer/sql/upgrades/121.sql swh/indexer/sql/upgrades/122.sql swh/indexer/sql/upgrades/123.sql swh/indexer/sql/upgrades/124.sql swh/indexer/sql/upgrades/125.sql swh/indexer/sql/upgrades/126.sql swh/indexer/sql/upgrades/127.sql swh/indexer/sql/upgrades/128.sql swh/indexer/sql/upgrades/129.sql swh/indexer/sql/upgrades/130.sql swh/indexer/sql/upgrades/131.sql swh/indexer/sql/upgrades/132.sql swh/indexer/sql/upgrades/133.sql swh/indexer/sql/upgrades/134.sql +swh/indexer/sql/upgrades/135.sql swh/indexer/storage/__init__.py swh/indexer/storage/converters.py swh/indexer/storage/db.py swh/indexer/storage/exc.py swh/indexer/storage/in_memory.py swh/indexer/storage/interface.py swh/indexer/storage/metrics.py swh/indexer/storage/model.py swh/indexer/storage/writer.py swh/indexer/storage/api/__init__.py swh/indexer/storage/api/client.py swh/indexer/storage/api/serializers.py swh/indexer/storage/api/server.py swh/indexer/tests/__init__.py swh/indexer/tests/conftest.py swh/indexer/tests/tasks.py swh/indexer/tests/test_cli.py swh/indexer/tests/test_codemeta.py swh/indexer/tests/test_ctags.py swh/indexer/tests/test_fossology_license.py swh/indexer/tests/test_indexer.py swh/indexer/tests/test_journal_client.py swh/indexer/tests/test_metadata.py swh/indexer/tests/test_mimetype.py swh/indexer/tests/test_origin_head.py swh/indexer/tests/test_origin_metadata.py -swh/indexer/tests/test_tasks.py swh/indexer/tests/utils.py +swh/indexer/tests/metadata_dictionary/__init__.py +swh/indexer/tests/metadata_dictionary/test_cff.py +swh/indexer/tests/metadata_dictionary/test_codemeta.py +swh/indexer/tests/metadata_dictionary/test_composer.py +swh/indexer/tests/metadata_dictionary/test_dart.py +swh/indexer/tests/metadata_dictionary/test_github.py +swh/indexer/tests/metadata_dictionary/test_maven.py +swh/indexer/tests/metadata_dictionary/test_npm.py +swh/indexer/tests/metadata_dictionary/test_python.py +swh/indexer/tests/metadata_dictionary/test_ruby.py swh/indexer/tests/storage/__init__.py swh/indexer/tests/storage/conftest.py swh/indexer/tests/storage/generate_data_test.py swh/indexer/tests/storage/test_api_client.py swh/indexer/tests/storage/test_converters.py swh/indexer/tests/storage/test_in_memory.py swh/indexer/tests/storage/test_init.py swh/indexer/tests/storage/test_metrics.py swh/indexer/tests/storage/test_model.py swh/indexer/tests/storage/test_server.py -swh/indexer/tests/storage/test_storage.py \ No newline at end of file +swh/indexer/tests/storage/test_storage.py +swh/indexer/tests/zz_celery/README +swh/indexer/tests/zz_celery/__init__.py +swh/indexer/tests/zz_celery/test_tasks.py \ No newline at end of file diff --git a/swh/indexer/codemeta.py b/swh/indexer/codemeta.py index b157232..8f492a5 100644 --- a/swh/indexer/codemeta.py +++ b/swh/indexer/codemeta.py @@ -1,204 +1,220 @@ # Copyright (C) 2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import collections import csv import itertools import json import os.path import re +from typing import Any, List from pyld import jsonld import swh.indexer _DATA_DIR = os.path.join(os.path.dirname(swh.indexer.__file__), "data") CROSSWALK_TABLE_PATH = os.path.join(_DATA_DIR, "codemeta", "crosswalk.csv") CODEMETA_CONTEXT_PATH = os.path.join(_DATA_DIR, "codemeta", "codemeta.jsonld") with open(CODEMETA_CONTEXT_PATH) as fd: CODEMETA_CONTEXT = json.load(fd) +_EMPTY_PROCESSED_CONTEXT: Any = {"mappings": {}} +_PROCESSED_CODEMETA_CONTEXT = jsonld.JsonLdProcessor().process_context( + _EMPTY_PROCESSED_CONTEXT, CODEMETA_CONTEXT, None +) + CODEMETA_CONTEXT_URL = "https://doi.org/10.5063/schema/codemeta-2.0" CODEMETA_ALTERNATE_CONTEXT_URLS = { ("https://raw.githubusercontent.com/codemeta/codemeta/master/codemeta.jsonld") } CODEMETA_URI = "https://codemeta.github.io/terms/" SCHEMA_URI = "http://schema.org/" +FORGEFED_URI = "https://forgefed.org/ns#" +ACTIVITYSTREAMS_URI = "https://www.w3.org/ns/activitystreams#" PROPERTY_BLACKLIST = { # CodeMeta properties that we cannot properly represent. SCHEMA_URI + "softwareRequirements", CODEMETA_URI + "softwareSuggestions", # Duplicate of 'author' SCHEMA_URI + "creator", } _codemeta_field_separator = re.compile(r"\s*[,/]\s*") def make_absolute_uri(local_name): - definition = CODEMETA_CONTEXT["@context"][local_name] - if isinstance(definition, str): - return definition - elif isinstance(definition, dict): - prefixed_name = definition["@id"] - (prefix, local_name) = prefixed_name.split(":") - if prefix == "schema": - canonical_name = SCHEMA_URI + local_name - elif prefix == "codemeta": - canonical_name = CODEMETA_URI + local_name - else: - assert False, prefix - return canonical_name - else: - assert False, definition + """Parses codemeta.jsonld, and returns the @id of terms it defines. + + >>> make_absolute_uri("name") + 'http://schema.org/name' + >>> make_absolute_uri("downloadUrl") + 'http://schema.org/downloadUrl' + >>> make_absolute_uri("referencePublication") + 'https://codemeta.github.io/terms/referencePublication' + """ + uri = jsonld.JsonLdProcessor.get_context_value( + _PROCESSED_CODEMETA_CONTEXT, local_name, "@id" + ) + assert uri.startswith(("@", CODEMETA_URI, SCHEMA_URI)), (local_name, uri) + return uri def _read_crosstable(fd): reader = csv.reader(fd) try: header = next(reader) except StopIteration: raise ValueError("empty file") data_sources = set(header) - {"Parent Type", "Property", "Type", "Description"} - assert "codemeta-V1" in data_sources codemeta_translation = {data_source: {} for data_source in data_sources} terms = set() for line in reader: # For each canonical name local_name = dict(zip(header, line))["Property"] if not local_name: continue canonical_name = make_absolute_uri(local_name) if canonical_name in PROPERTY_BLACKLIST: continue terms.add(canonical_name) for (col, value) in zip(header, line): # For each cell in the row if col in data_sources: # If that's not the parentType/property/type/description for local_name in _codemeta_field_separator.split(value): # For each of the data source's properties that maps # to this canonical name if local_name.strip(): codemeta_translation[col][local_name.strip()] = canonical_name return (terms, codemeta_translation) with open(CROSSWALK_TABLE_PATH) as fd: (CODEMETA_TERMS, CROSSWALK_TABLE) = _read_crosstable(fd) def _document_loader(url, options=None): """Document loader for pyld. Reads the local codemeta.jsonld file instead of fetching it from the Internet every single time.""" if url == CODEMETA_CONTEXT_URL or url in CODEMETA_ALTERNATE_CONTEXT_URLS: return { "contextUrl": None, "documentUrl": url, "document": CODEMETA_CONTEXT, } elif url == CODEMETA_URI: raise Exception( "{} is CodeMeta's URI, use {} as context url".format( CODEMETA_URI, CODEMETA_CONTEXT_URL ) ) else: raise Exception(url) -def compact(doc): - """Same as `pyld.jsonld.compact`, but in the context of CodeMeta.""" - return jsonld.compact( - doc, CODEMETA_CONTEXT_URL, options={"documentLoader": _document_loader} - ) +def compact(doc, forgefed: bool): + """Same as `pyld.jsonld.compact`, but in the context of CodeMeta. + + Args: + forgefed: Whether to add ForgeFed and ActivityStreams as compact URIs. + This is typically used for extrinsic metadata documents, which frequently + use properties from these namespaces. + """ + contexts: List[Any] = [CODEMETA_CONTEXT_URL] + if forgefed: + contexts.append({"as": ACTIVITYSTREAMS_URI, "forge": FORGEFED_URI}) + return jsonld.compact(doc, contexts, options={"documentLoader": _document_loader}) def expand(doc): """Same as `pyld.jsonld.expand`, but in the context of CodeMeta.""" return jsonld.expand(doc, options={"documentLoader": _document_loader}) def merge_values(v1, v2): """If v1 and v2 are of the form `{"@list": l1}` and `{"@list": l2}`, returns `{"@list": l1 + l2}`. Otherwise, make them lists (if they are not already) and concatenate them. >>> merge_values('a', 'b') ['a', 'b'] >>> merge_values(['a', 'b'], 'c') ['a', 'b', 'c'] >>> merge_values({'@list': ['a', 'b']}, {'@list': ['c']}) {'@list': ['a', 'b', 'c']} """ if v1 is None: return v2 elif v2 is None: return v1 elif isinstance(v1, dict) and set(v1) == {"@list"}: assert isinstance(v1["@list"], list) if isinstance(v2, dict) and set(v2) == {"@list"}: assert isinstance(v2["@list"], list) return {"@list": v1["@list"] + v2["@list"]} else: raise ValueError("Cannot merge %r and %r" % (v1, v2)) else: if isinstance(v2, dict) and "@list" in v2: raise ValueError("Cannot merge %r and %r" % (v1, v2)) if not isinstance(v1, list): v1 = [v1] if not isinstance(v2, list): v2 = [v2] return v1 + v2 def merge_documents(documents): """Takes a list of metadata dicts, each generated from a different metadata file, and merges them. Removes duplicates, if any.""" documents = list(itertools.chain.from_iterable(map(expand, documents))) merged_document = collections.defaultdict(list) for document in documents: for (key, values) in document.items(): if key == "@id": # @id does not get expanded to a list value = values # Only one @id is allowed, move it to sameAs if "@id" not in merged_document: merged_document["@id"] = value elif value != merged_document["@id"]: if value not in merged_document[SCHEMA_URI + "sameAs"]: merged_document[SCHEMA_URI + "sameAs"].append(value) else: for value in values: if isinstance(value, dict) and set(value) == {"@list"}: # Value is of the form {'@list': [item1, item2]} # instead of the usual [item1, item2]. # We need to merge the inner lists (and mostly # preserve order). merged_value = merged_document.setdefault(key, {"@list": []}) for subvalue in value["@list"]: # merged_value must be of the form # {'@list': [item1, item2]}; as it is the same # type as value, which is an @list. if subvalue not in merged_value["@list"]: merged_value["@list"].append(subvalue) elif value not in merged_document[key]: merged_document[key].append(value) - return compact(merged_document) + # XXX: we should set forgefed=True when merging extrinsic-metadata documents. + # however, this function is only used to merge multiple files of the same + # directory (which is only for intrinsic-metadata), so it is not an issue for now + return compact(merged_document, forgefed=False) diff --git a/swh/indexer/data/composer.csv b/swh/indexer/data/composer.csv new file mode 100644 index 0000000..599a931 --- /dev/null +++ b/swh/indexer/data/composer.csv @@ -0,0 +1,68 @@ +Property,Composer +codeRepository,support.source +programmingLanguage, +runtimePlatform, +targetProduct, +applicationCategory, +applicationSubCategory, +downloadUrl, +fileSize, +installUrl, +memoryRequirements, +operatingSystem, +permissions, +processorRequirements, +releaseNotes, +softwareHelp, +softwareRequirements,require +softwareVersion,version +storageRequirements, +supportingData, +author,authors +citation, +contributor, +copyrightHolder, +copyrightYear, +dateCreated, +dateModified, +datePublished, +editor, +encoding, +fileFormat, +funder, +keywords,keywords +license,license +producer, +provider, +publisher, +sponsor, +version,version +isAccessibleForFree, +isPartOf, +hasPart, +position, +description,description +identifier,name +name,name +sameAs, +url,homepage +relatedLink, +givenName, +familyName, +email,author.email +affiliation, +identifier, +name,author.name +address, +type, +id, +softwareSuggestions,suggest +maintainer, +contIntegration, +buildInstructions, +developmentStatus, +embargoDate, +funding, +issueTracker,support.issues +referencePublication, +readme, \ No newline at end of file diff --git a/swh/indexer/data/pubspec.csv b/swh/indexer/data/pubspec.csv new file mode 100644 index 0000000..3032feb --- /dev/null +++ b/swh/indexer/data/pubspec.csv @@ -0,0 +1,68 @@ +Property,Pubspec +codeRepository,repository +programmingLanguage, +runtimePlatform,platforms +targetProduct, +applicationCategory, +applicationSubCategory, +downloadUrl, +fileSize, +installUrl, +memoryRequirements, +operatingSystem, +permissions, +processorRequirements, +releaseNotes, +softwareHelp, +softwareRequirements, +softwareVersion,version +storageRequirements, +supportingData, +author,author/authors +citation, +contributor, +copyrightHolder, +copyrightYear, +dateCreated, +dateModified, +datePublished, +editor, +encoding, +fileFormat, +funder, +keywords,keywords +license,license +producer, +provider, +publisher, +sponsor, +version,version +isAccessibleForFree, +isPartOf, +hasPart, +position, +description,description +identifier, +name,name +sameAs, +url,homepage +relatedLink, +givenName, +familyName, +email,author.email/authors.email +affiliation, +identifier, +name, +address, +type, +id, +softwareSuggestions, +maintainer, +contIntegration, +buildInstructions, +developmentStatus, +embargoDate, +funding, +issueTracker,issue_tracker +referencePublication, +readme, diff --git a/swh/indexer/indexer.py b/swh/indexer/indexer.py index 7651421..f02102c 100644 --- a/swh/indexer/indexer.py +++ b/swh/indexer/indexer.py @@ -1,657 +1,658 @@ # Copyright (C) 2016-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import abc from contextlib import contextmanager import logging import os import shutil import tempfile from typing import ( Any, Dict, Generic, Iterable, Iterator, List, Optional, Set, Tuple, TypeVar, Union, ) import warnings import sentry_sdk from typing_extensions import TypedDict from swh.core import utils from swh.core.config import load_from_envvar, merge_configs from swh.indexer.storage import INDEXER_CFG_KEY, Sha1, get_indexer_storage from swh.indexer.storage.interface import IndexerStorageInterface from swh.model import hashutil from swh.model.model import Directory, Origin, Sha1Git from swh.objstorage.exc import ObjNotFoundError from swh.objstorage.factory import get_objstorage from swh.scheduler import CONFIG as SWH_CONFIG from swh.storage import get_storage from swh.storage.interface import StorageInterface class ObjectsDict(TypedDict, total=False): directory: List[Dict] origin: List[Dict] origin_visit_status: List[Dict] + raw_extrinsic_metadata: List[Dict] @contextmanager def write_to_temp(filename: str, data: bytes, working_directory: str) -> Iterator[str]: """Write the sha1's content in a temporary file. Args: filename: one of sha1's many filenames data: the sha1's content to write in temporary file working_directory: the directory into which the file is written Returns: The path to the temporary file created. That file is filled in with the raw content's data. """ os.makedirs(working_directory, exist_ok=True) temp_dir = tempfile.mkdtemp(dir=working_directory) content_path = os.path.join(temp_dir, filename) with open(content_path, "wb") as f: f.write(data) yield content_path shutil.rmtree(temp_dir) DEFAULT_CONFIG = { INDEXER_CFG_KEY: {"cls": "memory"}, "storage": {"cls": "memory"}, "objstorage": {"cls": "memory"}, } TId = TypeVar("TId") """type of the ids of index()ed objects.""" TData = TypeVar("TData") """type of the objects passed to index().""" TResult = TypeVar("TResult") """return type of index()""" class BaseIndexer(Generic[TId, TData, TResult], metaclass=abc.ABCMeta): """Base class for indexers to inherit from. The main entry point is the :func:`run` function which is in charge of triggering the computations on the batch dict/ids received. Indexers can: - filter out ids whose data has already been indexed. - retrieve ids data from storage or objstorage - index this data depending on the object and store the result in storage. To implement a new object type indexer, inherit from the BaseIndexer and implement indexing: :meth:`~BaseIndexer.run`: object_ids are different depending on object. For example: sha1 for content, sha1_git for revision, directory, release, and id for origin To implement a new concrete indexer, inherit from the object level classes: :class:`ContentIndexer`, :class:`DirectoryIndexer`, :class:`OriginIndexer`. Then you need to implement the following functions: :meth:`~BaseIndexer.filter`: filter out data already indexed (in storage). :meth:`~BaseIndexer.index_object`: compute index on id with data (retrieved from the storage or the objstorage by the id key) and return the resulting index computation. :meth:`~BaseIndexer.persist_index_computations`: persist the results of multiple index computations in the storage. The new indexer implementation can also override the following functions: :meth:`~BaseIndexer.prepare`: Configuration preparation for the indexer. When overriding, this must call the `super().prepare()` instruction. :meth:`~BaseIndexer.check`: Configuration check for the indexer. When overriding, this must call the `super().check()` instruction. :meth:`~BaseIndexer.register_tools`: This should return a dict of the tool(s) to use when indexing or filtering. """ results: List[TResult] USE_TOOLS = True catch_exceptions = True """Prevents exceptions in `index()` from raising too high. Set to False in tests to properly catch all exceptions.""" scheduler: Any storage: StorageInterface objstorage: Any idx_storage: IndexerStorageInterface def __init__(self, config=None, **kw) -> None: """Prepare and check that the indexer is ready to run.""" super().__init__() if config is not None: self.config = config elif SWH_CONFIG: self.config = SWH_CONFIG.copy() else: self.config = load_from_envvar() self.config = merge_configs(DEFAULT_CONFIG, self.config) self.prepare() self.check() self.log.debug("%s: config=%s", self, self.config) def prepare(self) -> None: """Prepare the indexer's needed runtime configuration. Without this step, the indexer cannot possibly run. """ config_storage = self.config.get("storage") if config_storage: self.storage = get_storage(**config_storage) self.objstorage = get_objstorage(**self.config["objstorage"]) idx_storage = self.config[INDEXER_CFG_KEY] self.idx_storage = get_indexer_storage(**idx_storage) _log = logging.getLogger("requests.packages.urllib3.connectionpool") _log.setLevel(logging.WARN) self.log = logging.getLogger("swh.indexer") if self.USE_TOOLS: self.tools = list(self.register_tools(self.config.get("tools", []))) self.results = [] @property def tool(self) -> Dict: return self.tools[0] def check(self) -> None: """Check the indexer's configuration is ok before proceeding. If ok, does nothing. If not raise error. """ if self.USE_TOOLS and not self.tools: raise ValueError("Tools %s is unknown, cannot continue" % self.tools) def _prepare_tool(self, tool: Dict[str, Any]) -> Dict[str, Any]: """Prepare the tool dict to be compliant with the storage api.""" return {"tool_%s" % key: value for key, value in tool.items()} def register_tools( self, tools: Union[Dict[str, Any], List[Dict[str, Any]]] ) -> List[Dict[str, Any]]: """Permit to register tools to the storage. Add a sensible default which can be overridden if not sufficient. (For now, all indexers use only one tool) Expects the self.config['tools'] property to be set with one or more tools. Args: tools: Either a dict or a list of dict. Returns: list: List of dicts with additional id key. Raises: ValueError: if not a list nor a dict. """ if isinstance(tools, list): tools = list(map(self._prepare_tool, tools)) elif isinstance(tools, dict): tools = [self._prepare_tool(tools)] else: raise ValueError("Configuration tool(s) must be a dict or list!") if tools: return self.idx_storage.indexer_configuration_add(tools) else: return [] def index(self, id: TId, data: Optional[TData], **kwargs) -> List[TResult]: """Index computation for the id and associated raw data. Args: id: identifier or Dict object data: id's data from storage or objstorage depending on object type Returns: dict: a dict that makes sense for the :meth:`.persist_index_computations` method. """ raise NotImplementedError() def filter(self, ids: List[TId]) -> Iterator[TId]: """Filter missing ids for that particular indexer. Args: ids: list of ids Yields: iterator of missing ids """ yield from ids @abc.abstractmethod def persist_index_computations(self, results: List[TResult]) -> Dict[str, int]: """Persist the computation resulting from the index. Args: results: List of results. One result is the result of the index function. Returns: a summary dict of what has been inserted in the storage """ return {} class ContentIndexer(BaseIndexer[Sha1, bytes, TResult], Generic[TResult]): """A content indexer working on a list of ids directly. To work on indexer partition, use the :class:`ContentPartitionIndexer` instead. Note: :class:`ContentIndexer` is not an instantiable object. To use it, one should inherit from this class and override the methods mentioned in the :class:`BaseIndexer` class. """ def run(self, ids: List[Sha1], **kwargs) -> Dict: """Given a list of ids: - retrieve the content from the storage - execute the indexing computations - store the results Args: ids (Iterable[Union[bytes, str]]): sha1's identifier list **kwargs: passed to the `index` method Returns: A summary Dict of the task's status """ if "policy_update" in kwargs: warnings.warn( "'policy_update' argument is deprecated and ignored.", DeprecationWarning, ) del kwargs["policy_update"] sha1s = [ hashutil.hash_to_bytes(id_) if isinstance(id_, str) else id_ for id_ in ids ] results = [] summary: Dict = {"status": "uneventful"} try: for sha1 in sha1s: try: raw_content = self.objstorage.get(sha1) except ObjNotFoundError: self.log.warning( "Content %s not found in objstorage" % hashutil.hash_to_hex(sha1) ) continue res = self.index(sha1, raw_content, **kwargs) if res: # If no results, skip it results.extend(res) summary["status"] = "eventful" summary = self.persist_index_computations(results) self.results = results except Exception: if not self.catch_exceptions: raise self.log.exception("Problem when reading contents metadata.") sentry_sdk.capture_exception() summary["status"] = "failed" return summary class ContentPartitionIndexer(BaseIndexer[Sha1, bytes, TResult], Generic[TResult]): """A content partition indexer. This expects as input a partition_id and a nb_partitions. This will then index the contents within that partition. To work on a list of ids, use the :class:`ContentIndexer` instead. Note: :class:`ContentPartitionIndexer` is not an instantiable object. To use it, one should inherit from this class and override the methods mentioned in the :class:`BaseIndexer` class. """ @abc.abstractmethod def indexed_contents_in_partition( self, partition_id: int, nb_partitions: int ) -> Iterable[Sha1]: """Retrieve indexed contents within range [start, end]. Args: partition_id: Index of the partition to fetch nb_partitions: Total number of partitions to split into page_token: opaque token used for pagination """ pass def _list_contents_to_index( self, partition_id: int, nb_partitions: int, indexed: Set[Sha1] ) -> Iterable[Sha1]: """Compute from storage the new contents to index in the partition_id . The already indexed contents are skipped. Args: partition_id: Index of the partition to fetch data from nb_partitions: Total number of partition indexed: Set of content already indexed. Yields: Sha1 id (bytes) of contents to index """ if not isinstance(partition_id, int) or not isinstance(nb_partitions, int): raise TypeError( f"identifiers must be int, not {partition_id!r} and {nb_partitions!r}." ) next_page_token = None while True: result = self.storage.content_get_partition( partition_id, nb_partitions, page_token=next_page_token ) contents = result.results for c in contents: _id = hashutil.hash_to_bytes(c.sha1) if _id in indexed: continue yield _id next_page_token = result.next_page_token if next_page_token is None: break def _index_contents( self, partition_id: int, nb_partitions: int, indexed: Set[Sha1], **kwargs: Any ) -> Iterator[TResult]: """Index the contents within the partition_id. Args: start: Starting bound from range identifier end: End range identifier indexed: Set of content already indexed. Yields: indexing result as dict to persist in the indexer backend """ for sha1 in self._list_contents_to_index(partition_id, nb_partitions, indexed): try: raw_content = self.objstorage.get(sha1) except ObjNotFoundError: self.log.warning(f"Content {sha1.hex()} not found in objstorage") continue yield from self.index(sha1, raw_content, **kwargs) def _index_with_skipping_already_done( self, partition_id: int, nb_partitions: int ) -> Iterator[TResult]: """Index not already indexed contents within the partition partition_id Args: partition_id: Index of the partition to fetch nb_partitions: Total number of partitions to split into Yields: indexing result as dict to persist in the indexer backend """ already_indexed_contents = set( self.indexed_contents_in_partition(partition_id, nb_partitions) ) return self._index_contents( partition_id, nb_partitions, already_indexed_contents ) def run( self, partition_id: int, nb_partitions: int, skip_existing: bool = True, **kwargs, ) -> Dict: """Given a partition of content ids, index the contents within. Either the indexer is incremental (filter out existing computed data) or it computes everything from scratch. Args: partition_id: Index of the partition to fetch nb_partitions: Total number of partitions to split into skip_existing: Skip existing indexed data (default) or not **kwargs: passed to the `index` method Returns: dict with the indexing task status """ summary: Dict[str, Any] = {"status": "uneventful"} count = 0 try: if skip_existing: gen = self._index_with_skipping_already_done( partition_id, nb_partitions ) else: gen = self._index_contents(partition_id, nb_partitions, indexed=set([])) count_object_added_key: Optional[str] = None for contents in utils.grouper(gen, n=self.config["write_batch_size"]): res = self.persist_index_computations(list(contents)) if not count_object_added_key: count_object_added_key = list(res.keys())[0] count += res[count_object_added_key] if count > 0: summary["status"] = "eventful" except Exception: if not self.catch_exceptions: raise self.log.exception("Problem when computing metadata.") sentry_sdk.capture_exception() summary["status"] = "failed" if count > 0 and count_object_added_key: summary[count_object_added_key] = count return summary class OriginIndexer(BaseIndexer[str, None, TResult], Generic[TResult]): """An object type indexer, inherits from the :class:`BaseIndexer` and implements Origin indexing using the run method Note: the :class:`OriginIndexer` is not an instantiable object. To use it in another context one should inherit from this class and override the methods mentioned in the :class:`BaseIndexer` class. """ def run(self, origin_urls: List[str], **kwargs) -> Dict: """Given a list of origin urls: - retrieve origins from storage - execute the indexing computations - store the results Args: origin_urls: list of origin urls. **kwargs: passed to the `index` method """ if "policy_update" in kwargs: warnings.warn( "'policy_update' argument is deprecated and ignored.", DeprecationWarning, ) del kwargs["policy_update"] origins = [{"url": url} for url in origin_urls] return self.process_journal_objects({"origin": origins}) def process_journal_objects(self, objects: ObjectsDict) -> Dict: - """Worker function for ``JournalClient``. Expects ``objects`` to have a single - key, either ``origin`` or ``"origin_visit_status"``.""" + """Worker function for ``JournalClient``.""" origins = [ Origin(url=status["origin"]) for status in objects.get("origin_visit_status", []) if status["status"] == "full" ] + [Origin(url=origin["url"]) for origin in objects.get("origin", [])] summary: Dict[str, Any] = {"status": "uneventful"} try: results = self.index_list( origins, check_origin_known=False, # no need to check they exist, as we just received either an origin or # visit status; which cannot be created by swh-storage unless the origin # already exists ) except Exception: if not self.catch_exceptions: raise summary["status"] = "failed" return summary summary_persist = self.persist_index_computations(results) self.results = results if summary_persist: for value in summary_persist.values(): if value > 0: summary["status"] = "eventful" summary.update(summary_persist) return summary def index_list(self, origins: List[Origin], **kwargs) -> List[TResult]: results = [] for origin in origins: try: results.extend(self.index(origin.url, **kwargs)) except Exception: self.log.exception("Problem when processing origin %s", origin.url) sentry_sdk.capture_exception() raise return results class DirectoryIndexer(BaseIndexer[Sha1Git, Directory, TResult], Generic[TResult]): """An object type indexer, inherits from the :class:`BaseIndexer` and implements Directory indexing using the run method Note: the :class:`DirectoryIndexer` is not an instantiable object. To use it in another context one should inherit from this class and override the methods mentioned in the :class:`BaseIndexer` class. """ def run(self, ids: List[Sha1Git], **kwargs) -> Dict: """Given a list of sha1_gits: - retrieve directories from storage - execute the indexing computations - store the results Args: ids: sha1_git's identifier list """ if "policy_update" in kwargs: warnings.warn( "'policy_update' argument is deprecated and ignored.", DeprecationWarning, ) del kwargs["policy_update"] directory_ids = [ hashutil.hash_to_bytes(id_) if isinstance(id_, str) else id_ for id_ in ids ] return self._process_directories([(dir_id, None) for dir_id in directory_ids]) def process_journal_objects(self, objects: ObjectsDict) -> Dict: - """Worker function for ``JournalClient``. Expects ``objects`` to have a single - key, ``"directory"``.""" - assert set(objects) == {"directory"} + """Worker function for ``JournalClient``.""" return self._process_directories( - [(dir_["id"], Directory.from_dict(dir_)) for dir_ in objects["directory"]] + [ + (dir_["id"], Directory.from_dict(dir_)) + for dir_ in objects.get("directory", []) + ] ) def _process_directories( self, directories: Union[List[Tuple[Sha1Git, Directory]], List[Tuple[Sha1Git, None]]], ) -> Dict: summary: Dict[str, Any] = {"status": "uneventful"} results = [] # TODO: fetch raw_manifest when useful? for (dir_id, dir_) in directories: try: results.extend(self.index(dir_id, dir_)) except Exception: if not self.catch_exceptions: raise self.log.exception("Problem when processing directory") sentry_sdk.capture_exception() summary["status"] = "failed" summary_persist = self.persist_index_computations(results) if summary_persist: for value in summary_persist.values(): if value > 0: summary["status"] = "eventful" summary.update(summary_persist) self.results = results return summary diff --git a/swh/indexer/metadata.py b/swh/indexer/metadata.py index ac0920b..76be504 100644 --- a/swh/indexer/metadata.py +++ b/swh/indexer/metadata.py @@ -1,450 +1,538 @@ # Copyright (C) 2017-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from copy import deepcopy from typing import ( Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple, TypeVar, + cast, ) +from urllib.parse import urlparse import sentry_sdk from swh.core.config import merge_configs from swh.core.utils import grouper from swh.indexer.codemeta import merge_documents -from swh.indexer.indexer import ContentIndexer, DirectoryIndexer, OriginIndexer +from swh.indexer.indexer import ( + BaseIndexer, + ContentIndexer, + DirectoryIndexer, + ObjectsDict, + OriginIndexer, +) from swh.indexer.metadata_detector import detect_metadata -from swh.indexer.metadata_dictionary import MAPPINGS +from swh.indexer.metadata_dictionary import EXTRINSIC_MAPPINGS, INTRINSIC_MAPPINGS +from swh.indexer.metadata_dictionary.base import DirectoryLsEntry from swh.indexer.origin_head import get_head_swhid from swh.indexer.storage import INDEXER_CFG_KEY, Sha1 from swh.indexer.storage.model import ( ContentMetadataRow, DirectoryIntrinsicMetadataRow, + OriginExtrinsicMetadataRow, OriginIntrinsicMetadataRow, ) from swh.model import hashutil -from swh.model.model import Directory +from swh.model.model import Directory, MetadataAuthorityType from swh.model.model import ObjectType as ModelObjectType -from swh.model.model import Origin, Sha1Git -from swh.model.swhids import CoreSWHID, ObjectType +from swh.model.model import Origin, RawExtrinsicMetadata, Sha1Git +from swh.model.swhids import CoreSWHID, ExtendedObjectType, ObjectType REVISION_GET_BATCH_SIZE = 10 RELEASE_GET_BATCH_SIZE = 10 ORIGIN_GET_BATCH_SIZE = 10 T1 = TypeVar("T1") T2 = TypeVar("T2") def call_with_batches( f: Callable[[List[T1]], Iterable[T2]], args: List[T1], batch_size: int, ) -> Iterator[T2]: """Calls a function with batches of args, and concatenates the results.""" groups = grouper(args, batch_size) for group in groups: yield from f(list(group)) +class ExtrinsicMetadataIndexer( + BaseIndexer[Sha1Git, RawExtrinsicMetadata, OriginExtrinsicMetadataRow] +): + def process_journal_objects(self, objects: ObjectsDict) -> Dict: + summary: Dict[str, Any] = {"status": "uneventful"} + try: + results = [] + for item in objects.get("raw_extrinsic_metadata", []): + results.extend( + self.index(item["id"], data=RawExtrinsicMetadata.from_dict(item)) + ) + except Exception: + if not self.catch_exceptions: + raise + summary["status"] = "failed" + return summary + + summary_persist = self.persist_index_computations(results) + self.results = results + if summary_persist: + for value in summary_persist.values(): + if value > 0: + summary["status"] = "eventful" + summary.update(summary_persist) + return summary + + def index( + self, + id: Sha1Git, + data: Optional[RawExtrinsicMetadata], + **kwargs, + ) -> List[OriginExtrinsicMetadataRow]: + if data is None: + raise NotImplementedError( + "ExtrinsicMetadataIndexer.index() without RawExtrinsicMetadata data" + ) + if data.target.object_type != ExtendedObjectType.ORIGIN: + # other types are not supported yet + return [] + + if data.authority.type != MetadataAuthorityType.FORGE: + # metadata provided by a third-party; don't trust it + # (technically this could be handled below, but we check it here + # to return early; sparing a translation and origin lookup) + # TODO: add ways to define trusted authorities + return [] + + metadata_items = [] + mappings = [] + for (mapping_name, mapping) in EXTRINSIC_MAPPINGS.items(): + if data.format in mapping.extrinsic_metadata_formats(): + metadata_item = mapping().translate(data.metadata) + if metadata_item is not None: + metadata_items.append(metadata_item) + mappings.append(mapping_name) + + if not metadata_items: + # Don't have any mapping to parse it, ignore + return [] + + # TODO: batch requests to origin_get_by_sha1() + origins = self.storage.origin_get_by_sha1([data.target.object_id]) + try: + (origin,) = origins + if origin is None: + raise ValueError() + except ValueError: + raise ValueError(f"Unknown origin {data.target}") from None + + if urlparse(data.authority.url).netloc != urlparse(origin["url"]).netloc: + # metadata provided by a third-party; don't trust it + # TODO: add ways to define trusted authorities + return [] + + metadata = merge_documents(metadata_items) + + return [ + OriginExtrinsicMetadataRow( + id=origin["url"], + indexer_configuration_id=self.tool["id"], + from_remd_id=data.id, + mappings=mappings, + metadata=metadata, + ) + ] + + def persist_index_computations( + self, results: List[OriginExtrinsicMetadataRow] + ) -> Dict[str, int]: + """Persist the results in storage.""" + return self.idx_storage.origin_extrinsic_metadata_add(results) + + class ContentMetadataIndexer(ContentIndexer[ContentMetadataRow]): """Content-level indexer This indexer is in charge of: - filtering out content already indexed in content_metadata - reading content from objstorage with the content's id sha1 - computing metadata by given context - using the metadata_dictionary as the 'swh-metadata-translator' tool - store result in content_metadata table """ def filter(self, ids): """Filter out known sha1s and return only missing ones.""" yield from self.idx_storage.content_metadata_missing( ( { "id": sha1, "indexer_configuration_id": self.tool["id"], } for sha1 in ids ) ) def index( self, id: Sha1, data: Optional[bytes] = None, log_suffix="unknown directory", **kwargs, ) -> List[ContentMetadataRow]: """Index sha1s' content and store result. Args: id: content's identifier data: raw content in bytes Returns: dict: dictionary representing a content_metadata. If the translation wasn't successful the metadata keys will be returned as None """ assert isinstance(id, bytes) assert data is not None metadata = None try: mapping_name = self.tool["tool_configuration"]["context"] log_suffix += ", content_id=%s" % hashutil.hash_to_hex(id) - metadata = MAPPINGS[mapping_name](log_suffix).translate(data) + metadata = INTRINSIC_MAPPINGS[mapping_name](log_suffix).translate(data) except Exception: self.log.exception( "Problem during metadata translation " "for content %s" % hashutil.hash_to_hex(id) ) sentry_sdk.capture_exception() if metadata is None: return [] return [ ContentMetadataRow( id=id, indexer_configuration_id=self.tool["id"], metadata=metadata, ) ] def persist_index_computations( self, results: List[ContentMetadataRow] ) -> Dict[str, int]: - """Persist the results in storage. - - Args: - results: list of content_metadata, dict with the - following keys: - - id (bytes): content's identifier (sha1) - - metadata (jsonb): detected metadata - - """ + """Persist the results in storage.""" return self.idx_storage.content_metadata_add(results) DEFAULT_CONFIG: Dict[str, Any] = { "tools": { "name": "swh-metadata-detector", "version": "0.0.2", "configuration": {}, }, } class DirectoryMetadataIndexer(DirectoryIndexer[DirectoryIntrinsicMetadataRow]): """Directory-level indexer This indexer is in charge of: - filtering directories already indexed in directory_intrinsic_metadata table with defined computation tool - retrieve all entry_files in directory - use metadata_detector for file_names containing metadata - compute metadata translation if necessary and possible (depends on tool) - send sha1s to content indexing if possible - store the results for directory """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.config = merge_configs(DEFAULT_CONFIG, self.config) def filter(self, sha1_gits): """Filter out known sha1s and return only missing ones.""" yield from self.idx_storage.directory_intrinsic_metadata_missing( ( { "id": sha1_git, "indexer_configuration_id": self.tool["id"], } for sha1_git in sha1_gits ) ) def index( self, id: Sha1Git, data: Optional[Directory] = None, **kwargs ) -> List[DirectoryIntrinsicMetadataRow]: """Index directory by processing it and organizing result. - use metadata_detector to iterate on filenames - - - if one filename detected -> sends file to content indexer - - if multiple file detected -> translation needed at directory level + use metadata_detector to iterate on filenames, passes them to the content + indexers, then merges (if more than one) Args: id: sha1_git of the directory - data: directory model object from storage + data: should always be None Returns: dict: dictionary representing a directory_intrinsic_metadata, with keys: - id: directory's identifier (sha1_git) - indexer_configuration_id (bytes): tool used - metadata: dict of retrieved metadata """ - if data is None: - dir_ = list(self.storage.directory_ls(id, recursive=False)) - else: - assert isinstance(data, Directory) - dir_ = data.to_dict() + dir_: List[DirectoryLsEntry] + assert data is None, "Unexpected directory object" + dir_ = cast( + List[DirectoryLsEntry], + list(self.storage.directory_ls(id, recursive=False)), + ) try: if [entry["type"] for entry in dir_] == ["dir"]: # If the root is just a single directory, recurse into it # eg. PyPI packages, GNU tarballs subdir = dir_[0]["target"] - dir_ = list(self.storage.directory_ls(subdir, recursive=False)) + dir_ = cast( + List[DirectoryLsEntry], + list(self.storage.directory_ls(subdir, recursive=False)), + ) files = [entry for entry in dir_ if entry["type"] == "file"] - detected_files = detect_metadata(files) (mappings, metadata) = self.translate_directory_intrinsic_metadata( - detected_files, + files, log_suffix="directory=%s" % hashutil.hash_to_hex(id), ) except Exception as e: self.log.exception("Problem when indexing dir: %r", e) sentry_sdk.capture_exception() + return [] return [ DirectoryIntrinsicMetadataRow( id=id, indexer_configuration_id=self.tool["id"], mappings=mappings, metadata=metadata, ) ] def persist_index_computations( self, results: List[DirectoryIntrinsicMetadataRow] ) -> Dict[str, int]: - """Persist the results in storage. - - Args: - results: list of content_mimetype, dict with the - following keys: - - id (bytes): content's identifier (sha1) - - mimetype (bytes): mimetype in bytes - - encoding (bytes): encoding in bytes - - """ + """Persist the results in storage.""" # TODO: add functions in storage to keep data in # directory_intrinsic_metadata return self.idx_storage.directory_intrinsic_metadata_add(results) def translate_directory_intrinsic_metadata( - self, detected_files: Dict[str, List[Any]], log_suffix: str + self, files: List[DirectoryLsEntry], log_suffix: str ) -> Tuple[List[Any], Any]: """ - Determine plan of action to translate metadata when containing - one or multiple detected files: + Determine plan of action to translate metadata in the given root directory Args: - detected_files: dictionary mapping context names (e.g., - "npm", "authors") to list of sha1 + files: list of file entries, as returned by + :meth:`swh.storage.interface.StorageInterface.directory_ls` Returns: (List[str], dict): list of mappings used and dict with translated metadata according to the CodeMeta vocabulary """ - used_mappings = [MAPPINGS[context].name for context in detected_files] metadata = [] tool = { "name": "swh-metadata-translator", "version": "0.0.2", "configuration": {}, } # TODO: iterate on each context, on each file # -> get raw_contents # -> translate each content config = {k: self.config[k] for k in [INDEXER_CFG_KEY, "objstorage", "storage"]} config["tools"] = [tool] - for context in detected_files.keys(): + all_detected_files = detect_metadata(files) + used_mappings = [ + INTRINSIC_MAPPINGS[context].name for context in all_detected_files + ] + for (mapping_name, detected_files) in all_detected_files.items(): cfg = deepcopy(config) - cfg["tools"][0]["configuration"]["context"] = context + cfg["tools"][0]["configuration"]["context"] = mapping_name c_metadata_indexer = ContentMetadataIndexer(config=cfg) # sha1s that are in content_metadata table sha1s_in_storage = [] - metadata_generator = self.idx_storage.content_metadata_get( - detected_files[context] - ) + metadata_generator = self.idx_storage.content_metadata_get(detected_files) for c in metadata_generator: # extracting metadata sha1 = c.id sha1s_in_storage.append(sha1) local_metadata = c.metadata # local metadata is aggregated if local_metadata: metadata.append(local_metadata) sha1s_filtered = [ - item for item in detected_files[context] if item not in sha1s_in_storage + item for item in detected_files if item not in sha1s_in_storage ] if sha1s_filtered: # content indexing try: c_metadata_indexer.run( sha1s_filtered, log_suffix=log_suffix, ) # on the fly possibility: for result in c_metadata_indexer.results: local_metadata = result.metadata metadata.append(local_metadata) except Exception: self.log.exception("Exception while indexing metadata on contents") sentry_sdk.capture_exception() metadata = merge_documents(metadata) return (used_mappings, metadata) class OriginMetadataIndexer( OriginIndexer[Tuple[OriginIntrinsicMetadataRow, DirectoryIntrinsicMetadataRow]] ): USE_TOOLS = False def __init__(self, config=None, **kwargs) -> None: super().__init__(config=config, **kwargs) self.directory_metadata_indexer = DirectoryMetadataIndexer(config=config) def index_list( self, origins: List[Origin], check_origin_known: bool = True, **kwargs ) -> List[Tuple[OriginIntrinsicMetadataRow, DirectoryIntrinsicMetadataRow]]: head_rev_ids = [] head_rel_ids = [] origin_heads: Dict[Origin, CoreSWHID] = {} # Filter out origins not in the storage if check_origin_known: known_origins = list( call_with_batches( self.storage.origin_get, [origin.url for origin in origins], ORIGIN_GET_BATCH_SIZE, ) ) else: known_origins = list(origins) for origin in known_origins: if origin is None: continue head_swhid = get_head_swhid(self.storage, origin.url) if head_swhid: origin_heads[origin] = head_swhid if head_swhid.object_type == ObjectType.REVISION: head_rev_ids.append(head_swhid.object_id) elif head_swhid.object_type == ObjectType.RELEASE: head_rel_ids.append(head_swhid.object_id) else: assert False, head_swhid head_revs = dict( zip( head_rev_ids, call_with_batches( self.storage.revision_get, head_rev_ids, REVISION_GET_BATCH_SIZE ), ) ) head_rels = dict( zip( head_rel_ids, call_with_batches( self.storage.release_get, head_rel_ids, RELEASE_GET_BATCH_SIZE ), ) ) results = [] for (origin, head_swhid) in origin_heads.items(): if head_swhid.object_type == ObjectType.REVISION: rev = head_revs[head_swhid.object_id] if not rev: self.log.warning( "Missing head object %s of origin %r", head_swhid, origin.url ) continue directory_id = rev.directory elif head_swhid.object_type == ObjectType.RELEASE: rel = head_rels[head_swhid.object_id] if not rel: self.log.warning( "Missing head object %s of origin %r", head_swhid, origin.url ) continue if rel.target_type != ModelObjectType.DIRECTORY: # TODO self.log.warning( "Head release %s of %r has unexpected target type %s", head_swhid, origin.url, rel.target_type, ) continue assert rel.target, rel directory_id = rel.target else: assert False, head_swhid for dir_metadata in self.directory_metadata_indexer.index(directory_id): # There is at most one dir_metadata orig_metadata = OriginIntrinsicMetadataRow( from_directory=dir_metadata.id, id=origin.url, metadata=dir_metadata.metadata, mappings=dir_metadata.mappings, indexer_configuration_id=dir_metadata.indexer_configuration_id, ) results.append((orig_metadata, dir_metadata)) return results def persist_index_computations( self, results: List[Tuple[OriginIntrinsicMetadataRow, DirectoryIntrinsicMetadataRow]], ) -> Dict[str, int]: # Deduplicate directories dir_metadata: List[DirectoryIntrinsicMetadataRow] = [] orig_metadata: List[OriginIntrinsicMetadataRow] = [] summary: Dict = {} for (orig_item, dir_item) in results: assert dir_item.metadata == orig_item.metadata if dir_item.metadata and not (dir_item.metadata.keys() <= {"@context"}): # Only store non-empty metadata sets if dir_item not in dir_metadata: dir_metadata.append(dir_item) if orig_item not in orig_metadata: orig_metadata.append(orig_item) if dir_metadata: summary_dir = self.idx_storage.directory_intrinsic_metadata_add( dir_metadata ) summary.update(summary_dir) if orig_metadata: summary_ori = self.idx_storage.origin_intrinsic_metadata_add(orig_metadata) summary.update(summary_ori) return summary diff --git a/swh/indexer/metadata_detector.py b/swh/indexer/metadata_detector.py index b8e99b5..9482d0d 100644 --- a/swh/indexer/metadata_detector.py +++ b/swh/indexer/metadata_detector.py @@ -1,24 +1,28 @@ -# Copyright (C) 2017 The Software Heritage developers +# Copyright (C) 2017-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information -from swh.indexer.metadata_dictionary import MAPPINGS +from typing import Dict, List +from swh.indexer.metadata_dictionary import INTRINSIC_MAPPINGS +from swh.indexer.metadata_dictionary.base import DirectoryLsEntry +from swh.indexer.storage.interface import Sha1 -def detect_metadata(files): + +def detect_metadata(files: List[DirectoryLsEntry]) -> Dict[str, List[Sha1]]: """ Detects files potentially containing metadata Args: file_entries (list): list of files Returns: dict: {mapping_filenames[name]:f['sha1']} (may be empty) """ results = {} - for (mapping_name, mapping) in MAPPINGS.items(): + for (mapping_name, mapping) in INTRINSIC_MAPPINGS.items(): matches = mapping.detect_metadata_files(files) if matches: results[mapping_name] = matches return results diff --git a/swh/indexer/metadata_dictionary/__init__.py b/swh/indexer/metadata_dictionary/__init__.py index d33bc98..2d67c15 100644 --- a/swh/indexer/metadata_dictionary/__init__.py +++ b/swh/indexer/metadata_dictionary/__init__.py @@ -1,40 +1,56 @@ +# Copyright (C) 2017-2022 The Software Heritage developers +# See the AUTHORS file at the top-level directory of this distribution +# License: GNU General Public License version 3, or any later version +# See top-level LICENSE file for more information + import collections +from typing import Dict, Type import click -from . import cff, codemeta, maven, npm, python, ruby +from . import cff, codemeta, composer, dart, github, maven, npm, python, ruby +from .base import BaseExtrinsicMapping, BaseIntrinsicMapping, BaseMapping -MAPPINGS = { +INTRINSIC_MAPPINGS: Dict[str, Type[BaseIntrinsicMapping]] = { + "CffMapping": cff.CffMapping, "CodemetaMapping": codemeta.CodemetaMapping, + "GemspecMapping": ruby.GemspecMapping, "MavenMapping": maven.MavenMapping, "NpmMapping": npm.NpmMapping, + "PubMapping": dart.PubspecMapping, "PythonPkginfoMapping": python.PythonPkginfoMapping, - "GemspecMapping": ruby.GemspecMapping, - "CffMapping": cff.CffMapping, + "ComposerMapping": composer.ComposerMapping, } +EXTRINSIC_MAPPINGS: Dict[str, Type[BaseExtrinsicMapping]] = { + "GitHubMapping": github.GitHubMapping, +} + + +MAPPINGS: Dict[str, Type[BaseMapping]] = {**INTRINSIC_MAPPINGS, **EXTRINSIC_MAPPINGS} + def list_terms(): """Returns a dictionary with all supported CodeMeta terms as keys, and the mappings that support each of them as values.""" d = collections.defaultdict(set) for mapping in MAPPINGS.values(): for term in mapping.supported_terms(): d[term].add(mapping) return d @click.command() @click.argument("mapping_name") @click.argument("file_name") def main(mapping_name: str, file_name: str): from pprint import pprint with open(file_name, "rb") as fd: file_content = fd.read() res = MAPPINGS[mapping_name]().translate(file_content) pprint(res) if __name__ == "__main__": main() diff --git a/swh/indexer/metadata_dictionary/base.py b/swh/indexer/metadata_dictionary/base.py index 774875c..601dc6b 100644 --- a/swh/indexer/metadata_dictionary/base.py +++ b/swh/indexer/metadata_dictionary/base.py @@ -1,180 +1,270 @@ -# Copyright (C) 2017-2019 The Software Heritage developers +# Copyright (C) 2017-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import json import logging -from typing import Any, Dict, List, Optional +from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar + +from typing_extensions import TypedDict +import yaml from swh.indexer.codemeta import SCHEMA_URI, compact, merge_values +from swh.indexer.storage.interface import Sha1 -class BaseMapping: - """Base class for mappings to inherit from +class DirectoryLsEntry(TypedDict): + target: Sha1 + sha1: Sha1 + name: bytes + type: str - To implement a new mapping: - - inherit this class - - override translate function - """ +TTranslateCallable = TypeVar( + "TTranslateCallable", bound=Callable[[Any, Dict[str, Any], Any], None] +) + + +def produce_terms( + namespace: str, terms: List[str] +) -> Callable[[TTranslateCallable], TTranslateCallable]: + """Returns a decorator that marks the decorated function as adding + the given terms to the ``translated_metadata`` dict""" + + def decorator(f: TTranslateCallable) -> TTranslateCallable: + if not hasattr(f, "produced_terms"): + f.produced_terms = [] # type: ignore + f.produced_terms.extend(namespace + term for term in terms) # type: ignore + return f + + return decorator + + +class BaseMapping: + """Base class for :class:`BaseExtrinsicMapping` and :class:`BaseIntrinsicMapping`, + not to be inherited directly.""" def __init__(self, log_suffix=""): self.log_suffix = log_suffix self.log = logging.getLogger( "%s.%s" % (self.__class__.__module__, self.__class__.__name__) ) @property def name(self): """A name of this mapping, used as an identifier in the indexer storage.""" raise NotImplementedError(f"{self.__class__.__name__}.name") + def translate(self, file_content: bytes) -> Optional[Dict]: + """Translates metadata, from the content of a file or of a RawExtrinsicMetadata + object.""" + raise NotImplementedError(f"{self.__class__.__name__}.translate") + + def normalize_translation(self, metadata: Dict[str, Any]) -> Dict[str, Any]: + raise NotImplementedError(f"{self.__class__.__name__}.normalize_translation") + + +class BaseExtrinsicMapping(BaseMapping): + """Base class for extrinsic-metadata mappings to inherit from + + To implement a new mapping: + + - inherit this class + - override translate function + """ + @classmethod - def detect_metadata_files(cls, files: List[Dict[str, str]]) -> List[str]: + def extrinsic_metadata_formats(cls) -> Tuple[str, ...]: + """ + Returns the list of extrinsic metadata formats which can be translated + by this mapping """ - Detects files potentially containing metadata + raise NotImplementedError(f"{cls.__name__}.extrinsic_metadata_formats") - Args: - file_entries (list): list of files + def normalize_translation(self, metadata: Dict[str, Any]) -> Dict[str, Any]: + return compact(metadata, forgefed=True) - Returns: - list: list of sha1 (possibly empty) + +class BaseIntrinsicMapping(BaseMapping): + """Base class for intrinsic-metadata mappings to inherit from + + To implement a new mapping: + + - inherit this class + - override translate function + """ + + @classmethod + def detect_metadata_files(cls, file_entries: List[DirectoryLsEntry]) -> List[Sha1]: + """ + Returns the sha1 hashes of files which can be translated by this mapping """ raise NotImplementedError(f"{cls.__name__}.detect_metadata_files") - def translate(self, file_content: bytes) -> Optional[Dict]: - raise NotImplementedError(f"{self.__class__.__name__}.translate") - def normalize_translation(self, metadata: Dict[str, Any]) -> Dict[str, Any]: - return compact(metadata) + return compact(metadata, forgefed=False) -class SingleFileMapping(BaseMapping): - """Base class for all mappings that use a single file as input.""" +class SingleFileIntrinsicMapping(BaseIntrinsicMapping): + """Base class for all intrinsic metadata mappings that use a single file as input.""" @property def filename(self): """The .json file to extract metadata from.""" raise NotImplementedError(f"{self.__class__.__name__}.filename") @classmethod - def detect_metadata_files(cls, file_entries: List[Dict[str, str]]) -> List[str]: + def detect_metadata_files(cls, file_entries: List[DirectoryLsEntry]) -> List[Sha1]: for entry in file_entries: if entry["name"].lower() == cls.filename: return [entry["sha1"]] return [] class DictMapping(BaseMapping): """Base class for mappings that take as input a file that is mostly a key-value store (eg. a shallow JSON dict).""" string_fields = [] # type: List[str] """List of fields that are simple strings, and don't need any normalization.""" @property def mapping(self): """A translation dict to map dict keys into a canonical name.""" raise NotImplementedError(f"{self.__class__.__name__}.mapping") @staticmethod def _normalize_method_name(name: str) -> str: return name.replace("-", "_") @classmethod def supported_terms(cls): - return { + # one-to-one mapping from the original key to a CodeMeta term + simple_terms = { term for (key, term) in cls.mapping.items() if key in cls.string_fields - or hasattr(cls, "translate_" + cls._normalize_method_name(key)) or hasattr(cls, "normalize_" + cls._normalize_method_name(key)) } + # more complex mapping from the original key to JSON-LD + complex_terms = { + term + for meth_name in dir(cls) + if meth_name.startswith("translate_") + for term in getattr(getattr(cls, meth_name), "produced_terms", []) + } + + return simple_terms | complex_terms + def _translate_dict( self, content_dict: Dict, *, normalize: bool = True ) -> Dict[str, str]: """ Translates content by parsing content from a dict object and translating with the appropriate mapping Args: content_dict (dict): content dict to translate Returns: dict: translated metadata in json-friendly form needed for the indexer """ translated_metadata = {"@type": SCHEMA_URI + "SoftwareSourceCode"} for k, v in content_dict.items(): # First, check if there is a specific translation # method for this key translation_method = getattr( self, "translate_" + self._normalize_method_name(k), None ) if translation_method: translation_method(translated_metadata, v) elif k in self.mapping: # if there is no method, but the key is known from the # crosswalk table codemeta_key = self.mapping[k] # if there is a normalization method, use it on the value normalization_method = getattr( self, "normalize_" + self._normalize_method_name(k), None ) if normalization_method: v = normalization_method(v) elif k in self.string_fields and isinstance(v, str): pass elif k in self.string_fields and isinstance(v, list): v = [x for x in v if isinstance(x, str)] else: continue # set the translation metadata with the normalized value if codemeta_key in translated_metadata: translated_metadata[codemeta_key] = merge_values( translated_metadata[codemeta_key], v ) else: translated_metadata[codemeta_key] = v + if normalize: return self.normalize_translation(translated_metadata) else: return translated_metadata -class JsonMapping(DictMapping, SingleFileMapping): - """Base class for all mappings that use a JSON file as input.""" +class JsonMapping(DictMapping): + """Base class for all mappings that use JSON data as input.""" def translate(self, raw_content: bytes) -> Optional[Dict]: """ Translates content by parsing content from a bytestring containing json data and translating with the appropriate mapping Args: raw_content (bytes): raw content to translate Returns: dict: translated metadata in json-friendly form needed for the indexer """ try: raw_content_string: str = raw_content.decode() except UnicodeDecodeError: self.log.warning("Error unidecoding from %s", self.log_suffix) return None try: content_dict = json.loads(raw_content_string) except json.JSONDecodeError: self.log.warning("Error unjsoning from %s", self.log_suffix) return None if isinstance(content_dict, dict): return self._translate_dict(content_dict) return None + + +class SafeLoader(yaml.SafeLoader): + yaml_implicit_resolvers = { + k: [r for r in v if r[0] != "tag:yaml.org,2002:timestamp"] + for k, v in yaml.SafeLoader.yaml_implicit_resolvers.items() + } + + +class YamlMapping(DictMapping, SingleFileIntrinsicMapping): + """Base class for all mappings that use Yaml data as input.""" + + def translate(self, raw_content: bytes) -> Optional[Dict[str, str]]: + raw_content_string: str = raw_content.decode() + try: + content_dict = yaml.load(raw_content_string, Loader=SafeLoader) + except yaml.scanner.ScannerError: + return None + + if isinstance(content_dict, dict): + return self._translate_dict(content_dict) + + return None diff --git a/swh/indexer/metadata_dictionary/cff.py b/swh/indexer/metadata_dictionary/cff.py index c5aa5a0..286ec77 100644 --- a/swh/indexer/metadata_dictionary/cff.py +++ b/swh/indexer/metadata_dictionary/cff.py @@ -1,76 +1,53 @@ from typing import Dict, List, Optional, Union -import yaml +from swh.indexer.codemeta import CROSSWALK_TABLE, SCHEMA_URI -from swh.indexer.codemeta import CODEMETA_CONTEXT_URL, CROSSWALK_TABLE, SCHEMA_URI +from .base import YamlMapping -from .base import DictMapping, SingleFileMapping - -class SafeLoader(yaml.SafeLoader): - yaml_implicit_resolvers = { - k: [r for r in v if r[0] != "tag:yaml.org,2002:timestamp"] - for k, v in yaml.SafeLoader.yaml_implicit_resolvers.items() - } - - -class CffMapping(DictMapping, SingleFileMapping): +class CffMapping(YamlMapping): """Dedicated class for Citation (CITATION.cff) mapping and translation""" name = "cff" filename = b"CITATION.cff" mapping = CROSSWALK_TABLE["Citation File Format Core (CFF-Core) 1.0.2"] string_fields = ["keywords", "license", "abstract", "version", "doi"] - def translate(self, raw_content: bytes) -> Optional[Dict[str, str]]: - raw_content_string: str = raw_content.decode() - try: - content_dict = yaml.load(raw_content_string, Loader=SafeLoader) - except yaml.scanner.ScannerError: - return None - - if isinstance(content_dict, dict): - metadata = self._translate_dict(content_dict) - metadata["@context"] = CODEMETA_CONTEXT_URL - return metadata - - return None - def normalize_authors(self, d: List[dict]) -> Dict[str, list]: result = [] for author in d: author_data: Dict[str, Optional[Union[str, Dict]]] = { "@type": SCHEMA_URI + "Person" } - if "orcid" in author: + if "orcid" in author and isinstance(author["orcid"], str): author_data["@id"] = author["orcid"] - if "affiliation" in author: + if "affiliation" in author and isinstance(author["affiliation"], str): author_data[SCHEMA_URI + "affiliation"] = { "@type": SCHEMA_URI + "Organization", SCHEMA_URI + "name": author["affiliation"], } - if "family-names" in author: + if "family-names" in author and isinstance(author["family-names"], str): author_data[SCHEMA_URI + "familyName"] = author["family-names"] - if "given-names" in author: + if "given-names" in author and isinstance(author["given-names"], str): author_data[SCHEMA_URI + "givenName"] = author["given-names"] result.append(author_data) result_final = {"@list": result} return result_final def normalize_doi(self, s: str) -> Dict[str, str]: if isinstance(s, str): return {"@id": "https://doi.org/" + s} def normalize_license(self, s: str) -> Dict[str, str]: if isinstance(s, str): return {"@id": "https://spdx.org/licenses/" + s} def normalize_repository_code(self, s: str) -> Dict[str, str]: if isinstance(s, str): return {"@id": s} def normalize_date_released(self, s: str) -> Dict[str, str]: if isinstance(s, str): return {"@value": s, "@type": SCHEMA_URI + "Date"} diff --git a/swh/indexer/metadata_dictionary/codemeta.py b/swh/indexer/metadata_dictionary/codemeta.py index 0bbb3fa..f0f0d09 100644 --- a/swh/indexer/metadata_dictionary/codemeta.py +++ b/swh/indexer/metadata_dictionary/codemeta.py @@ -1,31 +1,31 @@ # Copyright (C) 2018-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import json from typing import Any, Dict, List, Optional from swh.indexer.codemeta import CODEMETA_TERMS, expand -from .base import SingleFileMapping +from .base import SingleFileIntrinsicMapping -class CodemetaMapping(SingleFileMapping): +class CodemetaMapping(SingleFileIntrinsicMapping): """ dedicated class for CodeMeta (codemeta.json) mapping and translation """ name = "codemeta" filename = b"codemeta.json" string_fields = None @classmethod def supported_terms(cls) -> List[str]: return [term for term in CODEMETA_TERMS if not term.startswith("@")] def translate(self, content: bytes) -> Optional[Dict[str, Any]]: try: return self.normalize_translation(expand(json.loads(content.decode()))) except Exception: return None diff --git a/swh/indexer/metadata_dictionary/composer.py b/swh/indexer/metadata_dictionary/composer.py new file mode 100644 index 0000000..c02f5d8 --- /dev/null +++ b/swh/indexer/metadata_dictionary/composer.py @@ -0,0 +1,56 @@ +# Copyright (C) 2022 The Software Heritage developers +# See the AUTHORS file at the top-level directory of this distribution +# License: GNU General Public License version 3, or any later version +# See top-level LICENSE file for more information + +import os.path + +from swh.indexer.codemeta import _DATA_DIR, SCHEMA_URI, _read_crosstable + +from .base import JsonMapping, SingleFileIntrinsicMapping + +COMPOSER_TABLE_PATH = os.path.join(_DATA_DIR, "composer.csv") + +with open(COMPOSER_TABLE_PATH) as fd: + (CODEMETA_TERMS, COMPOSER_TABLE) = _read_crosstable(fd) + + +class ComposerMapping(JsonMapping, SingleFileIntrinsicMapping): + """Dedicated class for Packagist(composer.json) mapping and translation""" + + name = "composer" + mapping = COMPOSER_TABLE["Composer"] + filename = b"composer.json" + string_fields = [ + "name", + "description", + "version", + "keywords", + "homepage", + "license", + "author", + "authors", + ] + + def normalize_homepage(self, s): + if isinstance(s, str): + return {"@id": s} + + def normalize_license(self, s): + if isinstance(s, str): + return {"@id": "https://spdx.org/licenses/" + s} + + def normalize_authors(self, author_list): + authors = [] + for author in author_list: + author_obj = {"@type": SCHEMA_URI + "Person"} + + if isinstance(author, dict): + if isinstance(author.get("name", None), str): + author_obj[SCHEMA_URI + "name"] = author.get("name", None) + if isinstance(author.get("email", None), str): + author_obj[SCHEMA_URI + "email"] = author.get("email", None) + + authors.append(author_obj) + + return {"@list": authors} diff --git a/swh/indexer/metadata_dictionary/dart.py b/swh/indexer/metadata_dictionary/dart.py new file mode 100644 index 0000000..26cd7d5 --- /dev/null +++ b/swh/indexer/metadata_dictionary/dart.py @@ -0,0 +1,74 @@ +# Copyright (C) 2022 The Software Heritage developers +# See the AUTHORS file at the top-level directory of this distribution +# License: GNU General Public License version 3, or any later version +# See top-level LICENSE file for more information + +import os.path +import re + +from swh.indexer.codemeta import _DATA_DIR, SCHEMA_URI, _read_crosstable + +from .base import YamlMapping + +PUB_TABLE_PATH = os.path.join(_DATA_DIR, "pubspec.csv") + +with open(PUB_TABLE_PATH) as fd: + (CODEMETA_TERMS, PUB_TABLE) = _read_crosstable(fd) + + +def name_to_person(name): + return { + "@type": SCHEMA_URI + "Person", + SCHEMA_URI + "name": name, + } + + +class PubspecMapping(YamlMapping): + + name = "pubspec" + filename = b"pubspec.yaml" + mapping = PUB_TABLE["Pubspec"] + string_fields = [ + "repository", + "keywords", + "description", + "name", + "homepage", + "issue_tracker", + "platforms", + "license" + # license will only be used with the SPDX Identifier + ] + + def normalize_license(self, s): + if isinstance(s, str): + return {"@id": "https://spdx.org/licenses/" + s} + + def normalize_homepage(self, s): + if isinstance(s, str): + return {"@id": s} + + def normalize_author(self, s): + name_email_regex = "(?P.*?)( <(?P.*)>)" + author = {"@type": SCHEMA_URI + "Person"} + if isinstance(s, str): + match = re.search(name_email_regex, s) + if match: + name = match.group("name") + email = match.group("email") + author[SCHEMA_URI + "email"] = email + else: + name = s + + author[SCHEMA_URI + "name"] = name + + return {"@list": [author]} + + def normalize_authors(self, authors_list): + authors = {"@list": []} + + if isinstance(authors_list, list): + for s in authors_list: + author = self.normalize_author(s)["@list"] + authors["@list"] += author + return authors diff --git a/swh/indexer/metadata_dictionary/github.py b/swh/indexer/metadata_dictionary/github.py new file mode 100644 index 0000000..020c8d0 --- /dev/null +++ b/swh/indexer/metadata_dictionary/github.py @@ -0,0 +1,130 @@ +# Copyright (C) 2022 The Software Heritage developers +# See the AUTHORS file at the top-level directory of this distribution +# License: GNU General Public License version 3, or any later version +# See top-level LICENSE file for more information + +import json +from typing import Any, Dict, Tuple + +from swh.indexer.codemeta import ACTIVITYSTREAMS_URI, CROSSWALK_TABLE, FORGEFED_URI + +from .base import BaseExtrinsicMapping, JsonMapping, produce_terms + + +def _prettyprint(d): + print(json.dumps(d, indent=4)) + + +class GitHubMapping(BaseExtrinsicMapping, JsonMapping): + name = "github" + mapping = CROSSWALK_TABLE["GitHub"] + string_fields = [ + "archive_url", + "created_at", + "updated_at", + "description", + "full_name", + "html_url", + "issues_url", + ] + + @classmethod + def extrinsic_metadata_formats(cls) -> Tuple[str, ...]: + return ("application/vnd.github.v3+json",) + + def _translate_dict(self, content_dict: Dict[str, Any], **kwargs) -> Dict[str, Any]: + d = super()._translate_dict(content_dict, **kwargs) + d["type"] = FORGEFED_URI + "Repository" + return d + + @produce_terms(FORGEFED_URI, ["forks"]) + @produce_terms(ACTIVITYSTREAMS_URI, ["totalItems"]) + def translate_forks_count( + self, translated_metadata: Dict[str, Any], v: Any + ) -> None: + """ + + >>> translated_metadata = {} + >>> GitHubMapping().translate_forks_count(translated_metadata, 42) + >>> _prettyprint(translated_metadata) + { + "https://forgefed.org/ns#forks": [ + { + "@type": "https://www.w3.org/ns/activitystreams#OrderedCollection", + "https://www.w3.org/ns/activitystreams#totalItems": 42 + } + ] + } + """ + if isinstance(v, int): + translated_metadata.setdefault(FORGEFED_URI + "forks", []).append( + { + "@type": ACTIVITYSTREAMS_URI + "OrderedCollection", + ACTIVITYSTREAMS_URI + "totalItems": v, + } + ) + + @produce_terms(ACTIVITYSTREAMS_URI, ["likes"]) + @produce_terms(ACTIVITYSTREAMS_URI, ["totalItems"]) + def translate_stargazers_count( + self, translated_metadata: Dict[str, Any], v: Any + ) -> None: + """ + + >>> translated_metadata = {} + >>> GitHubMapping().translate_stargazers_count(translated_metadata, 42) + >>> _prettyprint(translated_metadata) + { + "https://www.w3.org/ns/activitystreams#likes": [ + { + "@type": "https://www.w3.org/ns/activitystreams#Collection", + "https://www.w3.org/ns/activitystreams#totalItems": 42 + } + ] + } + """ + if isinstance(v, int): + translated_metadata.setdefault(ACTIVITYSTREAMS_URI + "likes", []).append( + { + "@type": ACTIVITYSTREAMS_URI + "Collection", + ACTIVITYSTREAMS_URI + "totalItems": v, + } + ) + + @produce_terms(ACTIVITYSTREAMS_URI, ["followers"]) + @produce_terms(ACTIVITYSTREAMS_URI, ["totalItems"]) + def translate_watchers_count( + self, translated_metadata: Dict[str, Any], v: Any + ) -> None: + """ + + >>> translated_metadata = {} + >>> GitHubMapping().translate_watchers_count(translated_metadata, 42) + >>> _prettyprint(translated_metadata) + { + "https://www.w3.org/ns/activitystreams#followers": [ + { + "@type": "https://www.w3.org/ns/activitystreams#Collection", + "https://www.w3.org/ns/activitystreams#totalItems": 42 + } + ] + } + """ + if isinstance(v, int): + translated_metadata.setdefault( + ACTIVITYSTREAMS_URI + "followers", [] + ).append( + { + "@type": ACTIVITYSTREAMS_URI + "Collection", + ACTIVITYSTREAMS_URI + "totalItems": v, + } + ) + + def normalize_license(self, d): + """ + + >>> GitHubMapping().normalize_license({'spdx_id': 'MIT'}) + {'@id': 'https://spdx.org/licenses/MIT'} + """ + if isinstance(d, dict) and isinstance(d.get("spdx_id"), str): + return {"@id": "https://spdx.org/licenses/" + d["spdx_id"]} diff --git a/swh/indexer/metadata_dictionary/maven.py b/swh/indexer/metadata_dictionary/maven.py index ad4c5ed..419eb74 100644 --- a/swh/indexer/metadata_dictionary/maven.py +++ b/swh/indexer/metadata_dictionary/maven.py @@ -1,162 +1,162 @@ # Copyright (C) 2018-2021 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import os from typing import Any, Dict, Optional import xml.parsers.expat import xmltodict from swh.indexer.codemeta import CROSSWALK_TABLE, SCHEMA_URI -from .base import DictMapping, SingleFileMapping +from .base import DictMapping, SingleFileIntrinsicMapping -class MavenMapping(DictMapping, SingleFileMapping): +class MavenMapping(DictMapping, SingleFileIntrinsicMapping): """ dedicated class for Maven (pom.xml) mapping and translation """ name = "maven" filename = b"pom.xml" mapping = CROSSWALK_TABLE["Java (Maven)"] string_fields = ["name", "version", "description", "email"] def translate(self, content: bytes) -> Optional[Dict[str, Any]]: try: d = xmltodict.parse(content).get("project") or {} except xml.parsers.expat.ExpatError: self.log.warning("Error parsing XML from %s", self.log_suffix) return None except UnicodeDecodeError: self.log.warning("Error unidecoding XML from %s", self.log_suffix) return None except (LookupError, ValueError): # unknown encoding or multi-byte encoding self.log.warning("Error detecting XML encoding from %s", self.log_suffix) return None if not isinstance(d, dict): self.log.warning("Skipping ill-formed XML content: %s", content) return None metadata = self._translate_dict(d, normalize=False) metadata[SCHEMA_URI + "codeRepository"] = self.parse_repositories(d) metadata[SCHEMA_URI + "license"] = self.parse_licenses(d) return self.normalize_translation(metadata) _default_repository = {"url": "https://repo.maven.apache.org/maven2/"} def parse_repositories(self, d): """https://maven.apache.org/pom.html#Repositories >>> import xmltodict >>> from pprint import pprint >>> d = xmltodict.parse(''' ... ... ... codehausSnapshots ... Codehaus Snapshots ... http://snapshots.maven.codehaus.org/maven2 ... default ... ... ... ''') >>> MavenMapping().parse_repositories(d) """ repositories = d.get("repositories") if not repositories: results = [self.parse_repository(d, self._default_repository)] elif isinstance(repositories, dict): repositories = repositories.get("repository") or [] if not isinstance(repositories, list): repositories = [repositories] results = [self.parse_repository(d, repo) for repo in repositories] else: results = [] return [res for res in results if res] or None def parse_repository(self, d, repo): if not isinstance(repo, dict): return if repo.get("layout", "default") != "default": return # TODO ? url = repo.get("url") group_id = d.get("groupId") artifact_id = d.get("artifactId") if ( isinstance(url, str) and isinstance(group_id, str) and isinstance(artifact_id, str) ): repo = os.path.join(url, *group_id.split("."), artifact_id) return {"@id": repo} def normalize_groupId(self, id_): """https://maven.apache.org/pom.html#Maven_Coordinates >>> MavenMapping().normalize_groupId('org.example') {'@id': 'org.example'} """ if isinstance(id_, str): return {"@id": id_} def parse_licenses(self, d): """https://maven.apache.org/pom.html#Licenses >>> import xmltodict >>> import json >>> d = xmltodict.parse(''' ... ... ... Apache License, Version 2.0 ... https://www.apache.org/licenses/LICENSE-2.0.txt ... ... ... ''') >>> print(json.dumps(d, indent=4)) { "licenses": { "license": { "name": "Apache License, Version 2.0", "url": "https://www.apache.org/licenses/LICENSE-2.0.txt" } } } >>> MavenMapping().parse_licenses(d) [{'@id': 'https://www.apache.org/licenses/LICENSE-2.0.txt'}] or, if there are more than one license: >>> import xmltodict >>> from pprint import pprint >>> d = xmltodict.parse(''' ... ... ... Apache License, Version 2.0 ... https://www.apache.org/licenses/LICENSE-2.0.txt ... ... ... MIT License ... https://opensource.org/licenses/MIT ... ... ... ''') >>> pprint(MavenMapping().parse_licenses(d)) [{'@id': 'https://www.apache.org/licenses/LICENSE-2.0.txt'}, {'@id': 'https://opensource.org/licenses/MIT'}] """ licenses = d.get("licenses") if not isinstance(licenses, dict): return licenses = licenses.get("license") if isinstance(licenses, dict): licenses = [licenses] elif not isinstance(licenses, list): return return [ {"@id": license["url"]} for license in licenses if isinstance(license, dict) and isinstance(license.get("url"), str) ] or None diff --git a/swh/indexer/metadata_dictionary/npm.py b/swh/indexer/metadata_dictionary/npm.py index 467866d..d05d355 100644 --- a/swh/indexer/metadata_dictionary/npm.py +++ b/swh/indexer/metadata_dictionary/npm.py @@ -1,228 +1,228 @@ # Copyright (C) 2018-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import re from swh.indexer.codemeta import CROSSWALK_TABLE, SCHEMA_URI -from .base import JsonMapping +from .base import JsonMapping, SingleFileIntrinsicMapping -class NpmMapping(JsonMapping): +class NpmMapping(JsonMapping, SingleFileIntrinsicMapping): """ dedicated class for NPM (package.json) mapping and translation """ name = "npm" mapping = CROSSWALK_TABLE["NodeJS"] filename = b"package.json" string_fields = ["name", "version", "homepage", "description", "email"] _schema_shortcuts = { "github": "git+https://github.com/%s.git", "gist": "git+https://gist.github.com/%s.git", "gitlab": "git+https://gitlab.com/%s.git", # Bitbucket supports both hg and git, and the shortcut does not # tell which one to use. # 'bitbucket': 'https://bitbucket.org/', } def normalize_repository(self, d): """https://docs.npmjs.com/files/package.json#repository >>> NpmMapping().normalize_repository({ ... 'type': 'git', ... 'url': 'https://example.org/foo.git' ... }) {'@id': 'git+https://example.org/foo.git'} >>> NpmMapping().normalize_repository( ... 'gitlab:foo/bar') {'@id': 'git+https://gitlab.com/foo/bar.git'} >>> NpmMapping().normalize_repository( ... 'foo/bar') {'@id': 'git+https://github.com/foo/bar.git'} """ if ( isinstance(d, dict) and isinstance(d.get("type"), str) and isinstance(d.get("url"), str) ): url = "{type}+{url}".format(**d) elif isinstance(d, str): if "://" in d: url = d elif ":" in d: (schema, rest) = d.split(":", 1) if schema in self._schema_shortcuts: url = self._schema_shortcuts[schema] % rest else: return None else: url = self._schema_shortcuts["github"] % d else: return None return {"@id": url} def normalize_bugs(self, d): """https://docs.npmjs.com/files/package.json#bugs >>> NpmMapping().normalize_bugs({ ... 'url': 'https://example.org/bugs/', ... 'email': 'bugs@example.org' ... }) {'@id': 'https://example.org/bugs/'} >>> NpmMapping().normalize_bugs( ... 'https://example.org/bugs/') {'@id': 'https://example.org/bugs/'} """ if isinstance(d, dict) and isinstance(d.get("url"), str): return {"@id": d["url"]} elif isinstance(d, str): return {"@id": d} else: return None _parse_author = re.compile( r"^ *" r"(?P.*?)" r"( +<(?P.*)>)?" r"( +\((?P.*)\))?" r" *$" ) def normalize_author(self, d): """https://docs.npmjs.com/files/package.json#people-fields-author-contributors' >>> from pprint import pprint >>> pprint(NpmMapping().normalize_author({ ... 'name': 'John Doe', ... 'email': 'john.doe@example.org', ... 'url': 'https://example.org/~john.doe', ... })) {'@list': [{'@type': 'http://schema.org/Person', 'http://schema.org/email': 'john.doe@example.org', 'http://schema.org/name': 'John Doe', 'http://schema.org/url': {'@id': 'https://example.org/~john.doe'}}]} >>> pprint(NpmMapping().normalize_author( ... 'John Doe (https://example.org/~john.doe)' ... )) {'@list': [{'@type': 'http://schema.org/Person', 'http://schema.org/email': 'john.doe@example.org', 'http://schema.org/name': 'John Doe', 'http://schema.org/url': {'@id': 'https://example.org/~john.doe'}}]} """ # noqa author = {"@type": SCHEMA_URI + "Person"} if isinstance(d, dict): name = d.get("name", None) email = d.get("email", None) url = d.get("url", None) elif isinstance(d, str): match = self._parse_author.match(d) if not match: return None name = match.group("name") email = match.group("email") url = match.group("url") else: return None if name and isinstance(name, str): author[SCHEMA_URI + "name"] = name if email and isinstance(email, str): author[SCHEMA_URI + "email"] = email if url and isinstance(url, str): author[SCHEMA_URI + "url"] = {"@id": url} return {"@list": [author]} def normalize_description(self, description): r"""Try to re-decode ``description`` as UTF-16, as this is a somewhat common mistake that causes issues in the database because of null bytes in JSON. >>> NpmMapping().normalize_description("foo bar") 'foo bar' >>> NpmMapping().normalize_description( ... "\ufffd\ufffd#\x00 \x00f\x00o\x00o\x00 \x00b\x00a\x00r\x00\r\x00 \x00" ... ) 'foo bar' >>> NpmMapping().normalize_description( ... "\ufffd\ufffd\x00#\x00 \x00f\x00o\x00o\x00 \x00b\x00a\x00r\x00\r\x00 " ... ) 'foo bar' >>> NpmMapping().normalize_description( ... # invalid UTF-16 and meaningless UTF-8: ... "\ufffd\ufffd\x00#\x00\x00\x00 \x00\x00\x00\x00f\x00\x00\x00\x00" ... ) is None True >>> NpmMapping().normalize_description( ... # ditto (ut looks like little-endian at first) ... "\ufffd\ufffd#\x00\x00\x00 \x00\x00\x00\x00f\x00\x00\x00\x00\x00" ... ) is None True >>> NpmMapping().normalize_description(None) is None True """ if not isinstance(description, str): return None # XXX: if this function ever need to support more cases, consider # switching to https://pypi.org/project/ftfy/ instead of adding more hacks if description.startswith("\ufffd\ufffd") and "\x00" in description: # 2 unicode replacement characters followed by '# ' encoded as UTF-16 # is a common mistake, which indicates a README.md was saved as UTF-16, # and some NPM tool opened it as UTF-8 and used the first line as # description. description_bytes = description.encode() # Strip the the two unicode replacement characters assert description_bytes.startswith(b"\xef\xbf\xbd\xef\xbf\xbd") description_bytes = description_bytes[6:] # If the following attempts fail to recover the description, discard it # entirely because the current indexer storage backend (postgresql) cannot # store zero bytes in JSON columns. description = None if not description_bytes.startswith(b"\x00"): # try UTF-16 little-endian (the most common) first try: description = description_bytes.decode("utf-16le") except UnicodeDecodeError: pass if description is None: # if it fails, try UTF-16 big-endian try: description = description_bytes.decode("utf-16be") except UnicodeDecodeError: pass if description: if description.startswith("# "): description = description[2:] return description.rstrip() return description def normalize_license(self, s): """https://docs.npmjs.com/files/package.json#license >>> NpmMapping().normalize_license('MIT') {'@id': 'https://spdx.org/licenses/MIT'} """ if isinstance(s, str): return {"@id": "https://spdx.org/licenses/" + s} def normalize_homepage(self, s): """https://docs.npmjs.com/files/package.json#homepage >>> NpmMapping().normalize_homepage('https://example.org/~john.doe') {'@id': 'https://example.org/~john.doe'} """ if isinstance(s, str): return {"@id": s} def normalize_keywords(self, lst): """https://docs.npmjs.com/files/package.json#homepage >>> NpmMapping().normalize_keywords(['foo', 'bar']) ['foo', 'bar'] """ if isinstance(lst, list): return [x for x in lst if isinstance(x, str)] diff --git a/swh/indexer/metadata_dictionary/python.py b/swh/indexer/metadata_dictionary/python.py index b308294..686deed 100644 --- a/swh/indexer/metadata_dictionary/python.py +++ b/swh/indexer/metadata_dictionary/python.py @@ -1,76 +1,76 @@ # Copyright (C) 2018-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import email.parser import email.policy import itertools from swh.indexer.codemeta import CROSSWALK_TABLE, SCHEMA_URI -from .base import DictMapping, SingleFileMapping +from .base import DictMapping, SingleFileIntrinsicMapping _normalize_pkginfo_key = str.lower class LinebreakPreservingEmailPolicy(email.policy.EmailPolicy): def header_fetch_parse(self, name, value): if hasattr(value, "name"): return value value = value.replace("\n ", "\n") return self.header_factory(name, value) -class PythonPkginfoMapping(DictMapping, SingleFileMapping): +class PythonPkginfoMapping(DictMapping, SingleFileIntrinsicMapping): """Dedicated class for Python's PKG-INFO mapping and translation. https://www.python.org/dev/peps/pep-0314/""" name = "pkg-info" filename = b"PKG-INFO" mapping = { _normalize_pkginfo_key(k): v for (k, v) in CROSSWALK_TABLE["Python PKG-INFO"].items() } string_fields = [ "name", "version", "description", "summary", "author", "author-email", ] _parser = email.parser.BytesHeaderParser(policy=LinebreakPreservingEmailPolicy()) def translate(self, content): msg = self._parser.parsebytes(content) d = {} for (key, value) in msg.items(): key = _normalize_pkginfo_key(key) if value != "UNKNOWN": d.setdefault(key, []).append(value) metadata = self._translate_dict(d, normalize=False) if SCHEMA_URI + "author" in metadata or SCHEMA_URI + "email" in metadata: metadata[SCHEMA_URI + "author"] = { "@list": [ { "@type": SCHEMA_URI + "Person", SCHEMA_URI + "name": metadata.pop(SCHEMA_URI + "author", [None])[0], SCHEMA_URI + "email": metadata.pop(SCHEMA_URI + "email", [None])[0], } ] } return self.normalize_translation(metadata) def normalize_home_page(self, urls): return [{"@id": url} for url in urls] def normalize_keywords(self, keywords): return list(itertools.chain.from_iterable(s.split(" ") for s in keywords)) def normalize_license(self, licenses): return [{"@id": license} for license in licenses] diff --git a/swh/indexer/metadata_dictionary/ruby.py b/swh/indexer/metadata_dictionary/ruby.py index ad86c06..bdb06aa 100644 --- a/swh/indexer/metadata_dictionary/ruby.py +++ b/swh/indexer/metadata_dictionary/ruby.py @@ -1,132 +1,135 @@ -# Copyright (C) 2018-2019 The Software Heritage developers +# Copyright (C) 2018-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import ast import itertools import re +from typing import List from swh.indexer.codemeta import CROSSWALK_TABLE, SCHEMA_URI +from swh.indexer.metadata_dictionary.base import DirectoryLsEntry +from swh.indexer.storage.interface import Sha1 -from .base import DictMapping +from .base import BaseIntrinsicMapping, DictMapping def name_to_person(name): return { "@type": SCHEMA_URI + "Person", SCHEMA_URI + "name": name, } -class GemspecMapping(DictMapping): +class GemspecMapping(BaseIntrinsicMapping, DictMapping): name = "gemspec" mapping = CROSSWALK_TABLE["Ruby Gem"] string_fields = ["name", "version", "description", "summary", "email"] _re_spec_new = re.compile(r".*Gem::Specification.new +(do|\{) +\|.*\|.*") _re_spec_entry = re.compile(r"\s*\w+\.(?P\w+)\s*=\s*(?P.*)") @classmethod - def detect_metadata_files(cls, file_entries): + def detect_metadata_files(cls, file_entries: List[DirectoryLsEntry]) -> List[Sha1]: for entry in file_entries: if entry["name"].endswith(b".gemspec"): return [entry["sha1"]] return [] def translate(self, raw_content): try: raw_content = raw_content.decode() except UnicodeDecodeError: self.log.warning("Error unidecoding from %s", self.log_suffix) return # Skip lines before 'Gem::Specification.new' lines = itertools.dropwhile( lambda x: not self._re_spec_new.match(x), raw_content.split("\n") ) try: next(lines) # Consume 'Gem::Specification.new' except StopIteration: self.log.warning("Could not find Gem::Specification in %s", self.log_suffix) return content_dict = {} for line in lines: match = self._re_spec_entry.match(line) if match: value = self.eval_ruby_expression(match.group("expr")) if value: content_dict[match.group("key")] = value return self._translate_dict(content_dict) def eval_ruby_expression(self, expr): """Very simple evaluator of Ruby expressions. >>> GemspecMapping().eval_ruby_expression('"Foo bar"') 'Foo bar' >>> GemspecMapping().eval_ruby_expression("'Foo bar'") 'Foo bar' >>> GemspecMapping().eval_ruby_expression("['Foo', 'bar']") ['Foo', 'bar'] >>> GemspecMapping().eval_ruby_expression("'Foo bar'.freeze") 'Foo bar' >>> GemspecMapping().eval_ruby_expression( \ "['Foo'.freeze, 'bar'.freeze]") ['Foo', 'bar'] """ def evaluator(node): if isinstance(node, ast.Str): return node.s elif isinstance(node, ast.List): res = [] for element in node.elts: val = evaluator(element) if not val: return res.append(val) return res expr = expr.replace(".freeze", "") try: # We're parsing Ruby expressions here, but Python's # ast.parse works for very simple Ruby expressions # (mainly strings delimited with " or ', and lists # of such strings). tree = ast.parse(expr, mode="eval") except (SyntaxError, ValueError): return if isinstance(tree, ast.Expression): return evaluator(tree.body) def normalize_homepage(self, s): if isinstance(s, str): return {"@id": s} def normalize_license(self, s): if isinstance(s, str): return [{"@id": "https://spdx.org/licenses/" + s}] def normalize_licenses(self, licenses): if isinstance(licenses, list): return [ {"@id": "https://spdx.org/licenses/" + license} for license in licenses if isinstance(license, str) ] def normalize_author(self, author): if isinstance(author, str): return {"@list": [name_to_person(author)]} def normalize_authors(self, authors): if isinstance(authors, list): return { "@list": [ name_to_person(author) for author in authors if isinstance(author, str) ] } diff --git a/swh/indexer/sql/30-schema.sql b/swh/indexer/sql/30-schema.sql index e3d83cf..08587c3 100644 --- a/swh/indexer/sql/30-schema.sql +++ b/swh/indexer/sql/30-schema.sql @@ -1,132 +1,148 @@ --- --- Software Heritage Indexers Data Model --- -- Computing metadata on sha1's contents -- a SHA1 checksum (not necessarily originating from Git) create domain sha1 as bytea check (length(value) = 20); -- a Git object ID, i.e., a SHA1 checksum create domain sha1_git as bytea check (length(value) = 20); create table indexer_configuration ( id serial not null, tool_name text not null, tool_version text not null, tool_configuration jsonb ); comment on table indexer_configuration is 'Indexer''s configuration version'; comment on column indexer_configuration.id is 'Tool identifier'; comment on column indexer_configuration.tool_version is 'Tool name'; comment on column indexer_configuration.tool_version is 'Tool version'; comment on column indexer_configuration.tool_configuration is 'Tool configuration: command line, flags, etc...'; -- Properties (mimetype, encoding, etc...) create table content_mimetype ( id sha1 not null, mimetype text not null, encoding text not null, indexer_configuration_id bigint not null ); comment on table content_mimetype is 'Metadata associated to a raw content'; comment on column content_mimetype.mimetype is 'Raw content Mimetype'; comment on column content_mimetype.encoding is 'Raw content encoding'; comment on column content_mimetype.indexer_configuration_id is 'Tool used to compute the information'; -- Language metadata create table content_language ( id sha1 not null, lang languages not null, indexer_configuration_id bigint not null ); comment on table content_language is 'Language information on a raw content'; comment on column content_language.lang is 'Language information'; comment on column content_language.indexer_configuration_id is 'Tool used to compute the information'; -- ctags information per content create table content_ctags ( id sha1 not null, name text not null, kind text not null, line bigint not null, lang ctags_languages not null, indexer_configuration_id bigint not null ); comment on table content_ctags is 'Ctags information on a raw content'; comment on column content_ctags.id is 'Content identifier'; comment on column content_ctags.name is 'Symbol name'; comment on column content_ctags.kind is 'Symbol kind (function, class, variable, const...)'; comment on column content_ctags.line is 'Symbol line'; comment on column content_ctags.lang is 'Language information for that content'; comment on column content_ctags.indexer_configuration_id is 'Tool used to compute the information'; create table fossology_license( id smallserial, name text not null ); comment on table fossology_license is 'Possible license recognized by license indexer'; comment on column fossology_license.id is 'License identifier'; comment on column fossology_license.name is 'License name'; create table content_fossology_license ( id sha1 not null, license_id smallserial not null, indexer_configuration_id bigint not null ); comment on table content_fossology_license is 'license associated to a raw content'; comment on column content_fossology_license.id is 'Raw content identifier'; comment on column content_fossology_license.license_id is 'One of the content''s license identifier'; comment on column content_fossology_license.indexer_configuration_id is 'Tool used to compute the information'; -- The table content_metadata provides a translation to files -- identified as potentially containning metadata with a translation tool (indexer_configuration_id) create table content_metadata( id sha1 not null, metadata jsonb not null, indexer_configuration_id bigint not null ); comment on table content_metadata is 'metadata semantically translated from a content file'; comment on column content_metadata.id is 'sha1 of content file'; comment on column content_metadata.metadata is 'result of translation with defined format'; comment on column content_metadata.indexer_configuration_id is 'tool used for translation'; -- The table directory_intrinsic_metadata provides a minimal set of intrinsic -- metadata detected with the detection tool (indexer_configuration_id) and -- aggregated from the content_metadata translation. create table directory_intrinsic_metadata( id sha1_git not null, metadata jsonb not null, indexer_configuration_id bigint not null, mappings text array not null ); comment on table directory_intrinsic_metadata is 'metadata semantically detected and translated in a directory'; comment on column directory_intrinsic_metadata.id is 'sha1_git of directory'; comment on column directory_intrinsic_metadata.metadata is 'result of detection and translation with defined format'; comment on column directory_intrinsic_metadata.indexer_configuration_id is 'tool used for detection'; comment on column directory_intrinsic_metadata.mappings is 'type of metadata files used to obtain this metadata (eg. pkg-info, npm)'; create table origin_intrinsic_metadata( id text not null, -- origin url metadata jsonb, indexer_configuration_id bigint not null, from_directory sha1_git not null, metadata_tsvector tsvector, mappings text array not null ); comment on table origin_intrinsic_metadata is 'keeps intrinsic metadata for an origin'; comment on column origin_intrinsic_metadata.id is 'url of the origin'; comment on column origin_intrinsic_metadata.metadata is 'metadata extracted from a directory'; comment on column origin_intrinsic_metadata.indexer_configuration_id is 'tool used to generate this metadata'; comment on column origin_intrinsic_metadata.from_directory is 'sha1 of the directory this metadata was copied from.'; comment on column origin_intrinsic_metadata.mappings is 'type of metadata files used to obtain this metadata (eg. pkg-info, npm)'; + +create table origin_extrinsic_metadata( + id text not null, -- origin url + metadata jsonb, + indexer_configuration_id bigint not null, + from_remd_id sha1_git not null, + metadata_tsvector tsvector, + mappings text array not null +); + +comment on table origin_extrinsic_metadata is 'keeps extrinsic metadata for an origin'; +comment on column origin_extrinsic_metadata.id is 'url of the origin'; +comment on column origin_extrinsic_metadata.metadata is 'metadata extracted from a directory'; +comment on column origin_extrinsic_metadata.indexer_configuration_id is 'tool used to generate this metadata'; +comment on column origin_extrinsic_metadata.from_remd_id is 'sha1 of the directory this metadata was copied from.'; +comment on column origin_extrinsic_metadata.mappings is 'type of metadata files used to obtain this metadata (eg. github, gitlab)'; diff --git a/swh/indexer/sql/50-func.sql b/swh/indexer/sql/50-func.sql index 4e74732..d459a4a 100644 --- a/swh/indexer/sql/50-func.sql +++ b/swh/indexer/sql/50-func.sql @@ -1,414 +1,487 @@ -- Postgresql index helper function create or replace function hash_sha1(text) returns text language sql strict immutable as $$ select encode(public.digest($1, 'sha1'), 'hex') $$; comment on function hash_sha1(text) is 'Compute sha1 hash as text'; -- create a temporary table called tmp_TBLNAME, mimicking existing table -- TBLNAME -- -- Args: -- tblname: name of the table to mimic create or replace function swh_mktemp(tblname regclass) returns void language plpgsql as $$ begin execute format(' create temporary table if not exists tmp_%1$I (like %1$I including defaults) on commit delete rows; alter table tmp_%1$I drop column if exists object_id; ', tblname); return; end $$; -- create a temporary table for content_mimetype tmp_content_mimetype, create or replace function swh_mktemp_content_mimetype() returns void language sql as $$ create temporary table if not exists tmp_content_mimetype ( like content_mimetype including defaults ) on commit delete rows; $$; comment on function swh_mktemp_content_mimetype() IS 'Helper table to add mimetype information'; -- add tmp_content_mimetype entries to content_mimetype, overwriting duplicates. -- -- If filtering duplicates is in order, the call to -- swh_content_mimetype_missing must take place before calling this -- function. -- -- operates in bulk: 0. swh_mktemp(content_mimetype), 1. COPY to tmp_content_mimetype, -- 2. call this function create or replace function swh_content_mimetype_add() returns bigint language plpgsql as $$ declare res bigint; begin insert into content_mimetype (id, mimetype, encoding, indexer_configuration_id) select id, mimetype, encoding, indexer_configuration_id from tmp_content_mimetype tcm on conflict(id, indexer_configuration_id) do update set mimetype = excluded.mimetype, encoding = excluded.encoding; get diagnostics res = ROW_COUNT; return res; end $$; comment on function swh_content_mimetype_add() IS 'Add new content mimetypes'; -- add tmp_content_language entries to content_language, overwriting duplicates. -- -- If filtering duplicates is in order, the call to -- swh_content_language_missing must take place before calling this -- function. -- -- operates in bulk: 0. swh_mktemp(content_language), 1. COPY to -- tmp_content_language, 2. call this function create or replace function swh_content_language_add() returns bigint language plpgsql as $$ declare res bigint; begin insert into content_language (id, lang, indexer_configuration_id) select id, lang, indexer_configuration_id from tmp_content_language tcl on conflict(id, indexer_configuration_id) do update set lang = excluded.lang; get diagnostics res = ROW_COUNT; return res; end $$; comment on function swh_content_language_add() IS 'Add new content languages'; -- create a temporary table for retrieving content_language create or replace function swh_mktemp_content_language() returns void language sql as $$ create temporary table if not exists tmp_content_language ( like content_language including defaults ) on commit delete rows; $$; comment on function swh_mktemp_content_language() is 'Helper table to add content language'; -- create a temporary table for content_ctags tmp_content_ctags, create or replace function swh_mktemp_content_ctags() returns void language sql as $$ create temporary table if not exists tmp_content_ctags ( like content_ctags including defaults ) on commit delete rows; $$; comment on function swh_mktemp_content_ctags() is 'Helper table to add content ctags'; -- add tmp_content_ctags entries to content_ctags, overwriting duplicates -- -- operates in bulk: 0. swh_mktemp(content_ctags), 1. COPY to tmp_content_ctags, -- 2. call this function create or replace function swh_content_ctags_add() returns bigint language plpgsql as $$ declare res bigint; begin insert into content_ctags (id, name, kind, line, lang, indexer_configuration_id) select id, name, kind, line, lang, indexer_configuration_id from tmp_content_ctags tct on conflict(id, hash_sha1(name), kind, line, lang, indexer_configuration_id) do nothing; get diagnostics res = ROW_COUNT; return res; end $$; comment on function swh_content_ctags_add() IS 'Add new ctags symbols per content'; create type content_ctags_signature as ( id sha1, name text, kind text, line bigint, lang ctags_languages, tool_id integer, tool_name text, tool_version text, tool_configuration jsonb ); -- Search within ctags content. -- create or replace function swh_content_ctags_search( expression text, l integer default 10, last_sha1 sha1 default '\x0000000000000000000000000000000000000000') returns setof content_ctags_signature language sql as $$ select c.id, name, kind, line, lang, i.id as tool_id, tool_name, tool_version, tool_configuration from content_ctags c inner join indexer_configuration i on i.id = c.indexer_configuration_id where hash_sha1(name) = hash_sha1(expression) and c.id > last_sha1 order by id limit l; $$; comment on function swh_content_ctags_search(text, integer, sha1) IS 'Equality search through ctags'' symbols'; -- create a temporary table for content_fossology_license tmp_content_fossology_license, create or replace function swh_mktemp_content_fossology_license() returns void language sql as $$ create temporary table if not exists tmp_content_fossology_license ( id sha1, license text, indexer_configuration_id integer ) on commit delete rows; $$; comment on function swh_mktemp_content_fossology_license() is 'Helper table to add content license'; -- add tmp_content_fossology_license entries to content_fossology_license, -- overwriting duplicates. -- -- operates in bulk: 0. swh_mktemp(content_fossology_license), 1. COPY to -- tmp_content_fossology_license, 2. call this function create or replace function swh_content_fossology_license_add() returns bigint language plpgsql as $$ declare res bigint; begin -- insert unknown licenses first insert into fossology_license (name) select distinct license from tmp_content_fossology_license tmp where not exists (select 1 from fossology_license where name=tmp.license) on conflict(name) do nothing; insert into content_fossology_license (id, license_id, indexer_configuration_id) select tcl.id, (select id from fossology_license where name = tcl.license) as license, indexer_configuration_id from tmp_content_fossology_license tcl on conflict(id, license_id, indexer_configuration_id) do update set license_id = excluded.license_id; get diagnostics res = ROW_COUNT; return res; end $$; comment on function swh_content_fossology_license_add() IS 'Add new content licenses'; -- content_metadata functions -- add tmp_content_metadata entries to content_metadata, overwriting duplicates -- -- If filtering duplicates is in order, the call to -- swh_content_metadata_missing must take place before calling this -- function. -- -- operates in bulk: 0. swh_mktemp(content_language), 1. COPY to -- tmp_content_metadata, 2. call this function create or replace function swh_content_metadata_add() returns bigint language plpgsql as $$ declare res bigint; begin insert into content_metadata (id, metadata, indexer_configuration_id) select id, metadata, indexer_configuration_id from tmp_content_metadata tcm on conflict(id, indexer_configuration_id) do update set metadata = excluded.metadata; get diagnostics res = ROW_COUNT; return res; end $$; comment on function swh_content_metadata_add() IS 'Add new content metadata'; -- create a temporary table for retrieving content_metadata create or replace function swh_mktemp_content_metadata() returns void language sql as $$ create temporary table if not exists tmp_content_metadata ( like content_metadata including defaults ) on commit delete rows; $$; comment on function swh_mktemp_content_metadata() is 'Helper table to add content metadata'; -- end content_metadata functions -- add tmp_directory_intrinsic_metadata entries to directory_intrinsic_metadata, -- overwriting duplicates. -- -- If filtering duplicates is in order, the call to -- swh_directory_intrinsic_metadata_missing must take place before calling this -- function. -- -- operates in bulk: 0. swh_mktemp(content_language), 1. COPY to -- tmp_directory_intrinsic_metadata, 2. call this function create or replace function swh_directory_intrinsic_metadata_add() returns bigint language plpgsql as $$ declare res bigint; begin insert into directory_intrinsic_metadata (id, metadata, mappings, indexer_configuration_id) select id, metadata, mappings, indexer_configuration_id from tmp_directory_intrinsic_metadata tcm on conflict(id, indexer_configuration_id) do update set metadata = excluded.metadata, mappings = excluded.mappings; get diagnostics res = ROW_COUNT; return res; end $$; comment on function swh_directory_intrinsic_metadata_add() IS 'Add new directory intrinsic metadata'; -- create a temporary table for retrieving directory_intrinsic_metadata create or replace function swh_mktemp_directory_intrinsic_metadata() returns void language sql as $$ create temporary table if not exists tmp_directory_intrinsic_metadata ( like directory_intrinsic_metadata including defaults ) on commit delete rows; $$; comment on function swh_mktemp_directory_intrinsic_metadata() is 'Helper table to add directory intrinsic metadata'; -- create a temporary table for retrieving origin_intrinsic_metadata create or replace function swh_mktemp_origin_intrinsic_metadata() returns void language sql as $$ create temporary table if not exists tmp_origin_intrinsic_metadata ( like origin_intrinsic_metadata including defaults ) on commit delete rows; $$; comment on function swh_mktemp_origin_intrinsic_metadata() is 'Helper table to add origin intrinsic metadata'; create or replace function swh_mktemp_indexer_configuration() returns void language sql as $$ create temporary table if not exists tmp_indexer_configuration ( like indexer_configuration including defaults ) on commit delete rows; alter table tmp_indexer_configuration drop column if exists id; $$; - --- add tmp_indexer_configuration entries to indexer_configuration, --- overwriting duplicates if any. --- --- operates in bulk: 0. create temporary tmp_indexer_configuration, 1. COPY to --- it, 2. call this function to insert and filtering out duplicates -create or replace function swh_indexer_configuration_add() - returns setof indexer_configuration - language plpgsql -as $$ -begin - insert into indexer_configuration(tool_name, tool_version, tool_configuration) - select tool_name, tool_version, tool_configuration from tmp_indexer_configuration tmp - on conflict(tool_name, tool_version, tool_configuration) do nothing; - - return query - select id, tool_name, tool_version, tool_configuration - from tmp_indexer_configuration join indexer_configuration - using(tool_name, tool_version, tool_configuration); - - return; -end -$$; - -- add tmp_origin_intrinsic_metadata entries to origin_intrinsic_metadata, -- overwriting duplicates. -- -- If filtering duplicates is in order, the call to -- swh_origin_intrinsic_metadata_missing must take place before calling this -- function. -- -- operates in bulk: 0. swh_mktemp(content_language), 1. COPY to -- tmp_origin_intrinsic_metadata, 2. call this function create or replace function swh_origin_intrinsic_metadata_add() returns bigint language plpgsql as $$ declare res bigint; begin perform swh_origin_intrinsic_metadata_compute_tsvector(); insert into origin_intrinsic_metadata (id, metadata, indexer_configuration_id, from_directory, metadata_tsvector, mappings) select id, metadata, indexer_configuration_id, from_directory, metadata_tsvector, mappings from tmp_origin_intrinsic_metadata on conflict(id, indexer_configuration_id) do update set metadata = excluded.metadata, metadata_tsvector = excluded.metadata_tsvector, mappings = excluded.mappings, from_directory = excluded.from_directory; get diagnostics res = ROW_COUNT; return res; end $$; comment on function swh_origin_intrinsic_metadata_add() IS 'Add new origin intrinsic metadata'; -- Compute the metadata_tsvector column in tmp_origin_intrinsic_metadata. -- -- It uses the "pg_catalog.simple" dictionary, as it has no stopword, -- so it should be suitable for proper names and non-English text. create or replace function swh_origin_intrinsic_metadata_compute_tsvector() returns void language plpgsql as $$ begin update tmp_origin_intrinsic_metadata set metadata_tsvector = to_tsvector('pg_catalog.simple', metadata); end $$; + +-- create a temporary table for retrieving origin_extrinsic_metadata +create or replace function swh_mktemp_origin_extrinsic_metadata() + returns void + language sql +as $$ + create temporary table if not exists tmp_origin_extrinsic_metadata ( + like origin_extrinsic_metadata including defaults + ) on commit delete rows; +$$; + +comment on function swh_mktemp_origin_extrinsic_metadata() is 'Helper table to add origin extrinsic metadata'; + +create or replace function swh_mktemp_indexer_configuration() + returns void + language sql +as $$ + create temporary table if not exists tmp_indexer_configuration ( + like indexer_configuration including defaults + ) on commit delete rows; + alter table tmp_indexer_configuration drop column if exists id; +$$; + +-- add tmp_origin_extrinsic_metadata entries to origin_extrinsic_metadata, +-- overwriting duplicates. +-- +-- If filtering duplicates is in order, the call to +-- swh_origin_extrinsic_metadata_missing must take place before calling this +-- function. +-- +-- operates in bulk: 0. swh_mktemp(content_language), 1. COPY to +-- tmp_origin_extrinsic_metadata, 2. call this function +create or replace function swh_origin_extrinsic_metadata_add() + returns bigint + language plpgsql +as $$ +declare + res bigint; +begin + perform swh_origin_extrinsic_metadata_compute_tsvector(); + + insert into origin_extrinsic_metadata (id, metadata, indexer_configuration_id, from_remd_id, metadata_tsvector, mappings) + select id, metadata, indexer_configuration_id, from_remd_id, + metadata_tsvector, mappings + from tmp_origin_extrinsic_metadata + on conflict(id, indexer_configuration_id) + do update set + metadata = excluded.metadata, + metadata_tsvector = excluded.metadata_tsvector, + mappings = excluded.mappings, + from_remd_id = excluded.from_remd_id; + + get diagnostics res = ROW_COUNT; + return res; +end +$$; + +comment on function swh_origin_extrinsic_metadata_add() IS 'Add new origin extrinsic metadata'; + + +-- Compute the metadata_tsvector column in tmp_origin_extrinsic_metadata. +-- +-- It uses the "pg_catalog.simple" dictionary, as it has no stopword, +-- so it should be suitable for proper names and non-English text. +create or replace function swh_origin_extrinsic_metadata_compute_tsvector() + returns void + language plpgsql +as $$ +begin + update tmp_origin_extrinsic_metadata + set metadata_tsvector = to_tsvector('pg_catalog.simple', metadata); +end +$$; + + +-- add tmp_indexer_configuration entries to indexer_configuration, +-- overwriting duplicates if any. +-- +-- operates in bulk: 0. create temporary tmp_indexer_configuration, 1. COPY to +-- it, 2. call this function to insert and filtering out duplicates +create or replace function swh_indexer_configuration_add() + returns setof indexer_configuration + language plpgsql +as $$ +begin + insert into indexer_configuration(tool_name, tool_version, tool_configuration) + select tool_name, tool_version, tool_configuration from tmp_indexer_configuration tmp + on conflict(tool_name, tool_version, tool_configuration) do nothing; + + return query + select id, tool_name, tool_version, tool_configuration + from tmp_indexer_configuration join indexer_configuration + using(tool_name, tool_version, tool_configuration); + + return; +end +$$; diff --git a/swh/indexer/sql/60-indexes.sql b/swh/indexer/sql/60-indexes.sql index 070ba29..5b42af7 100644 --- a/swh/indexer/sql/60-indexes.sql +++ b/swh/indexer/sql/60-indexes.sql @@ -1,69 +1,79 @@ -- fossology_license create unique index fossology_license_pkey on fossology_license(id); alter table fossology_license add primary key using index fossology_license_pkey; create unique index on fossology_license(name); -- indexer_configuration create unique index concurrently indexer_configuration_pkey on indexer_configuration(id); alter table indexer_configuration add primary key using index indexer_configuration_pkey; create unique index on indexer_configuration(tool_name, tool_version, tool_configuration); -- content_ctags create index on content_ctags(id); create index on content_ctags(hash_sha1(name)); create unique index on content_ctags(id, hash_sha1(name), kind, line, lang, indexer_configuration_id); alter table content_ctags add constraint content_ctags_indexer_configuration_id_fkey foreign key (indexer_configuration_id) references indexer_configuration(id) not valid; alter table content_ctags validate constraint content_ctags_indexer_configuration_id_fkey; -- content_metadata create unique index content_metadata_pkey on content_metadata(id, indexer_configuration_id); alter table content_metadata add primary key using index content_metadata_pkey; alter table content_metadata add constraint content_metadata_indexer_configuration_id_fkey foreign key (indexer_configuration_id) references indexer_configuration(id) not valid; alter table content_metadata validate constraint content_metadata_indexer_configuration_id_fkey; -- directory_intrinsic_metadata create unique index directory_intrinsic_metadata_pkey on directory_intrinsic_metadata(id, indexer_configuration_id); alter table directory_intrinsic_metadata add primary key using index directory_intrinsic_metadata_pkey; alter table directory_intrinsic_metadata add constraint directory_intrinsic_metadata_indexer_configuration_id_fkey foreign key (indexer_configuration_id) references indexer_configuration(id) not valid; alter table directory_intrinsic_metadata validate constraint directory_intrinsic_metadata_indexer_configuration_id_fkey; -- content_mimetype create unique index content_mimetype_pkey on content_mimetype(id, indexer_configuration_id); alter table content_mimetype add primary key using index content_mimetype_pkey; alter table content_mimetype add constraint content_mimetype_indexer_configuration_id_fkey foreign key (indexer_configuration_id) references indexer_configuration(id) not valid; alter table content_mimetype validate constraint content_mimetype_indexer_configuration_id_fkey; create index on content_mimetype(id) where mimetype like 'text/%'; -- content_language create unique index content_language_pkey on content_language(id, indexer_configuration_id); alter table content_language add primary key using index content_language_pkey; alter table content_language add constraint content_language_indexer_configuration_id_fkey foreign key (indexer_configuration_id) references indexer_configuration(id) not valid; alter table content_language validate constraint content_language_indexer_configuration_id_fkey; -- content_fossology_license create unique index content_fossology_license_pkey on content_fossology_license(id, license_id, indexer_configuration_id); alter table content_fossology_license add primary key using index content_fossology_license_pkey; alter table content_fossology_license add constraint content_fossology_license_license_id_fkey foreign key (license_id) references fossology_license(id) not valid; alter table content_fossology_license validate constraint content_fossology_license_license_id_fkey; alter table content_fossology_license add constraint content_fossology_license_indexer_configuration_id_fkey foreign key (indexer_configuration_id) references indexer_configuration(id) not valid; alter table content_fossology_license validate constraint content_fossology_license_indexer_configuration_id_fkey; -- origin_intrinsic_metadata create unique index origin_intrinsic_metadata_pkey on origin_intrinsic_metadata(id, indexer_configuration_id); alter table origin_intrinsic_metadata add primary key using index origin_intrinsic_metadata_pkey; alter table origin_intrinsic_metadata add constraint origin_intrinsic_metadata_indexer_configuration_id_fkey foreign key (indexer_configuration_id) references indexer_configuration(id) not valid; alter table origin_intrinsic_metadata validate constraint origin_intrinsic_metadata_indexer_configuration_id_fkey; create index origin_intrinsic_metadata_fulltext_idx on origin_intrinsic_metadata using gin (metadata_tsvector); create index origin_intrinsic_metadata_mappings_idx on origin_intrinsic_metadata using gin (mappings); + +-- origin_extrinsic_metadata +create unique index origin_extrinsic_metadata_pkey on origin_extrinsic_metadata(id, indexer_configuration_id); +alter table origin_extrinsic_metadata add primary key using index origin_extrinsic_metadata_pkey; + +alter table origin_extrinsic_metadata add constraint origin_extrinsic_metadata_indexer_configuration_id_fkey foreign key (indexer_configuration_id) references indexer_configuration(id) not valid; +alter table origin_extrinsic_metadata validate constraint origin_extrinsic_metadata_indexer_configuration_id_fkey; + +create index origin_extrinsic_metadata_fulltext_idx on origin_extrinsic_metadata using gin (metadata_tsvector); +create index origin_extrinsic_metadata_mappings_idx on origin_extrinsic_metadata using gin (mappings); diff --git a/swh/indexer/sql/upgrades/135.sql b/swh/indexer/sql/upgrades/135.sql new file mode 100644 index 0000000..20aeaf0 --- /dev/null +++ b/swh/indexer/sql/upgrades/135.sql @@ -0,0 +1,106 @@ +-- SWH Indexer DB schema upgrade +-- from_version: 134 +-- to_version: 135 +-- description: Add support for origin_extrinsic_metadata + +insert into dbversion(version, release, description) + values(135, now(), 'Work In Progress'); + +create table origin_extrinsic_metadata( + id text not null, -- origin url + metadata jsonb, + indexer_configuration_id bigint not null, + from_remd_id sha1_git not null, + metadata_tsvector tsvector, + mappings text array not null +); + +comment on table origin_extrinsic_metadata is 'keeps extrinsic metadata for an origin'; +comment on column origin_extrinsic_metadata.id is 'url of the origin'; +comment on column origin_extrinsic_metadata.metadata is 'metadata extracted from a directory'; +comment on column origin_extrinsic_metadata.indexer_configuration_id is 'tool used to generate this metadata'; +comment on column origin_extrinsic_metadata.from_remd_id is 'sha1 of the directory this metadata was copied from.'; +comment on column origin_extrinsic_metadata.mappings is 'type of metadata files used to obtain this metadata (eg. github, gitlab)'; + +-- create a temporary table for retrieving origin_extrinsic_metadata +create or replace function swh_mktemp_origin_extrinsic_metadata() + returns void + language sql +as $$ + create temporary table if not exists tmp_origin_extrinsic_metadata ( + like origin_extrinsic_metadata including defaults + ) on commit delete rows; +$$; + +comment on function swh_mktemp_origin_extrinsic_metadata() is 'Helper table to add origin extrinsic metadata'; + +create or replace function swh_mktemp_indexer_configuration() + returns void + language sql +as $$ + create temporary table if not exists tmp_indexer_configuration ( + like indexer_configuration including defaults + ) on commit delete rows; + alter table tmp_indexer_configuration drop column if exists id; +$$; + +-- add tmp_origin_extrinsic_metadata entries to origin_extrinsic_metadata, +-- overwriting duplicates. +-- +-- If filtering duplicates is in order, the call to +-- swh_origin_extrinsic_metadata_missing must take place before calling this +-- function. +-- +-- operates in bulk: 0. swh_mktemp(content_language), 1. COPY to +-- tmp_origin_extrinsic_metadata, 2. call this function +create or replace function swh_origin_extrinsic_metadata_add() + returns bigint + language plpgsql +as $$ +declare + res bigint; +begin + perform swh_origin_extrinsic_metadata_compute_tsvector(); + + insert into origin_extrinsic_metadata (id, metadata, indexer_configuration_id, from_remd_id, metadata_tsvector, mappings) + select id, metadata, indexer_configuration_id, from_remd_id, + metadata_tsvector, mappings + from tmp_origin_extrinsic_metadata + on conflict(id, indexer_configuration_id) + do update set + metadata = excluded.metadata, + metadata_tsvector = excluded.metadata_tsvector, + mappings = excluded.mappings, + from_remd_id = excluded.from_remd_id; + + get diagnostics res = ROW_COUNT; + return res; +end +$$; + +comment on function swh_origin_extrinsic_metadata_add() IS 'Add new origin extrinsic metadata'; + + +-- Compute the metadata_tsvector column in tmp_origin_extrinsic_metadata. +-- +-- It uses the "pg_catalog.simple" dictionary, as it has no stopword, +-- so it should be suitable for proper names and non-English text. +create or replace function swh_origin_extrinsic_metadata_compute_tsvector() + returns void + language plpgsql +as $$ +begin + update tmp_origin_extrinsic_metadata + set metadata_tsvector = to_tsvector('pg_catalog.simple', metadata); +end +$$; + +-- origin_extrinsic_metadata +create unique index origin_extrinsic_metadata_pkey on origin_extrinsic_metadata(id, indexer_configuration_id); +alter table origin_extrinsic_metadata add primary key using index origin_extrinsic_metadata_pkey; + +alter table origin_extrinsic_metadata add constraint origin_extrinsic_metadata_indexer_configuration_id_fkey foreign key (indexer_configuration_id) references indexer_configuration(id) not valid; +alter table origin_extrinsic_metadata validate constraint origin_extrinsic_metadata_indexer_configuration_id_fkey; + +create index origin_extrinsic_metadata_fulltext_idx on origin_extrinsic_metadata using gin (metadata_tsvector); +create index origin_extrinsic_metadata_mappings_idx on origin_extrinsic_metadata using gin (mappings); diff --git a/swh/indexer/storage/__init__.py b/swh/indexer/storage/__init__.py index 470cf79..ab5315c 100644 --- a/swh/indexer/storage/__init__.py +++ b/swh/indexer/storage/__init__.py @@ -1,755 +1,802 @@ # Copyright (C) 2015-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from collections import Counter from importlib import import_module import json from typing import Dict, Iterable, List, Optional, Tuple, Union import warnings import psycopg2 import psycopg2.pool from swh.core.db.common import db_transaction from swh.indexer.storage.interface import IndexerStorageInterface from swh.model.hashutil import hash_to_bytes, hash_to_hex from swh.model.model import SHA1_SIZE from swh.storage.exc import StorageDBError from swh.storage.utils import get_partition_bounds_bytes from . import converters from .db import Db from .exc import DuplicateId, IndexerStorageArgumentException from .interface import PagedResult, Sha1 from .metrics import process_metrics, send_metric, timed from .model import ( ContentCtagsRow, ContentLanguageRow, ContentLicenseRow, ContentMetadataRow, ContentMimetypeRow, DirectoryIntrinsicMetadataRow, + OriginExtrinsicMetadataRow, OriginIntrinsicMetadataRow, ) from .writer import JournalWriter INDEXER_CFG_KEY = "indexer_storage" MAPPING_NAMES = ["cff", "codemeta", "gemspec", "maven", "npm", "pkg-info"] SERVER_IMPLEMENTATIONS: Dict[str, str] = { "postgresql": ".IndexerStorage", "remote": ".api.client.RemoteStorage", "memory": ".in_memory.IndexerStorage", # deprecated "local": ".IndexerStorage", } def get_indexer_storage(cls: str, **kwargs) -> IndexerStorageInterface: """Instantiate an indexer storage implementation of class `cls` with arguments `kwargs`. Args: cls: indexer storage class (local, remote or memory) kwargs: dictionary of arguments passed to the indexer storage class constructor Returns: an instance of swh.indexer.storage Raises: ValueError if passed an unknown storage class. """ if "args" in kwargs: warnings.warn( 'Explicit "args" key is deprecated, use keys directly instead.', DeprecationWarning, ) kwargs = kwargs["args"] class_path = SERVER_IMPLEMENTATIONS.get(cls) if class_path is None: raise ValueError( f"Unknown indexer storage class `{cls}`. " f"Supported: {', '.join(SERVER_IMPLEMENTATIONS)}" ) (module_path, class_name) = class_path.rsplit(".", 1) module = import_module(module_path if module_path else ".", package=__package__) BackendClass = getattr(module, class_name) check_config = kwargs.pop("check_config", {}) idx_storage = BackendClass(**kwargs) if check_config: if not idx_storage.check_config(**check_config): raise EnvironmentError("Indexer storage check config failed") return idx_storage def check_id_duplicates(data): """ If any two row models in `data` have the same unique key, raises a `ValueError`. Values associated to the key must be hashable. Args: data (List[dict]): List of dictionaries to be inserted >>> check_id_duplicates([ ... ContentLanguageRow(id=b'foo', indexer_configuration_id=42, lang="python"), ... ContentLanguageRow(id=b'foo', indexer_configuration_id=32, lang="python"), ... ]) >>> check_id_duplicates([ ... ContentLanguageRow(id=b'foo', indexer_configuration_id=42, lang="python"), ... ContentLanguageRow(id=b'foo', indexer_configuration_id=42, lang="python"), ... ]) Traceback (most recent call last): ... swh.indexer.storage.exc.DuplicateId: [{'id': b'foo', 'indexer_configuration_id': 42}] """ # noqa counter = Counter(tuple(sorted(item.unique_key().items())) for item in data) duplicates = [id_ for (id_, count) in counter.items() if count >= 2] if duplicates: raise DuplicateId(list(map(dict, duplicates))) class IndexerStorage: """SWH Indexer Storage Datastore""" - current_version = 134 + current_version = 135 def __init__(self, db, min_pool_conns=1, max_pool_conns=10, journal_writer=None): """ Args: db: either a libpq connection string, or a psycopg2 connection journal_writer: configuration passed to `swh.journal.writer.get_journal_writer` """ self.journal_writer = JournalWriter(self._tool_get_from_id, journal_writer) try: if isinstance(db, psycopg2.extensions.connection): self._pool = None self._db = Db(db) else: self._pool = psycopg2.pool.ThreadedConnectionPool( min_pool_conns, max_pool_conns, db ) self._db = None except psycopg2.OperationalError as e: raise StorageDBError(e) def get_db(self): if self._db: return self._db return Db.from_pool(self._pool) def put_db(self, db): if db is not self._db: db.put_conn() @timed @db_transaction() def check_config(self, *, check_write, db=None, cur=None): # Check permissions on one of the tables if check_write: check = "INSERT" else: check = "SELECT" cur.execute( "select has_table_privilege(current_user, 'content_mimetype', %s)", # noqa (check,), ) return cur.fetchone()[0] @timed @db_transaction() def content_mimetype_missing( self, mimetypes: Iterable[Dict], db=None, cur=None ) -> List[Tuple[Sha1, int]]: return [obj[0] for obj in db.content_mimetype_missing_from_list(mimetypes, cur)] @timed @db_transaction() def get_partition( self, indexer_type: str, indexer_configuration_id: int, partition_id: int, nb_partitions: int, page_token: Optional[str] = None, limit: int = 1000, with_textual_data=False, db=None, cur=None, ) -> PagedResult[Sha1]: """Retrieve ids of content with `indexer_type` within within partition partition_id bound by limit. Args: **indexer_type**: Type of data content to index (mimetype, language, etc...) **indexer_configuration_id**: The tool used to index data **partition_id**: index of the partition to fetch **nb_partitions**: total number of partitions to split into **page_token**: opaque token used for pagination **limit**: Limit result (default to 1000) **with_textual_data** (bool): Deal with only textual content (True) or all content (all contents by defaults, False) Raises: IndexerStorageArgumentException for; - limit to None - wrong indexer_type provided Returns: PagedResult of Sha1. If next_page_token is None, there is no more data to fetch """ if limit is None: raise IndexerStorageArgumentException("limit should not be None") if indexer_type not in db.content_indexer_names: err = f"Wrong type. Should be one of [{','.join(db.content_indexer_names)}]" raise IndexerStorageArgumentException(err) start, end = get_partition_bounds_bytes(partition_id, nb_partitions, SHA1_SIZE) if page_token is not None: start = hash_to_bytes(page_token) if end is None: end = b"\xff" * SHA1_SIZE next_page_token: Optional[str] = None ids = [ row[0] for row in db.content_get_range( indexer_type, start, end, indexer_configuration_id, limit=limit + 1, with_textual_data=with_textual_data, cur=cur, ) ] if len(ids) >= limit: next_page_token = hash_to_hex(ids[-1]) ids = ids[:limit] assert len(ids) <= limit return PagedResult(results=ids, next_page_token=next_page_token) @timed @db_transaction() def content_mimetype_get_partition( self, indexer_configuration_id: int, partition_id: int, nb_partitions: int, page_token: Optional[str] = None, limit: int = 1000, db=None, cur=None, ) -> PagedResult[Sha1]: return self.get_partition( "mimetype", indexer_configuration_id, partition_id, nb_partitions, page_token=page_token, limit=limit, db=db, cur=cur, ) @timed @process_metrics @db_transaction() def content_mimetype_add( self, mimetypes: List[ContentMimetypeRow], db=None, cur=None, ) -> Dict[str, int]: check_id_duplicates(mimetypes) mimetypes.sort(key=lambda m: m.id) self.journal_writer.write_additions("content_mimetype", mimetypes) db.mktemp_content_mimetype(cur) db.copy_to( [m.to_dict() for m in mimetypes], "tmp_content_mimetype", ["id", "mimetype", "encoding", "indexer_configuration_id"], cur, ) count = db.content_mimetype_add_from_temp(cur) return {"content_mimetype:add": count} @timed @db_transaction() def content_mimetype_get( self, ids: Iterable[Sha1], db=None, cur=None ) -> List[ContentMimetypeRow]: return [ ContentMimetypeRow.from_dict( converters.db_to_mimetype(dict(zip(db.content_mimetype_cols, c))) ) for c in db.content_mimetype_get_from_list(ids, cur) ] @timed @db_transaction() def content_language_missing( self, languages: Iterable[Dict], db=None, cur=None ) -> List[Tuple[Sha1, int]]: return [obj[0] for obj in db.content_language_missing_from_list(languages, cur)] @timed @db_transaction() def content_language_get( self, ids: Iterable[Sha1], db=None, cur=None ) -> List[ContentLanguageRow]: return [ ContentLanguageRow.from_dict( converters.db_to_language(dict(zip(db.content_language_cols, c))) ) for c in db.content_language_get_from_list(ids, cur) ] @timed @process_metrics @db_transaction() def content_language_add( self, languages: List[ContentLanguageRow], db=None, cur=None, ) -> Dict[str, int]: check_id_duplicates(languages) languages.sort(key=lambda m: m.id) self.journal_writer.write_additions("content_language", languages) db.mktemp_content_language(cur) # empty language is mapped to 'unknown' db.copy_to( ( { "id": lang.id, "lang": lang.lang or "unknown", "indexer_configuration_id": lang.indexer_configuration_id, } for lang in languages ), "tmp_content_language", ["id", "lang", "indexer_configuration_id"], cur, ) count = db.content_language_add_from_temp(cur) return {"content_language:add": count} @timed @db_transaction() def content_ctags_missing( self, ctags: Iterable[Dict], db=None, cur=None ) -> List[Tuple[Sha1, int]]: return [obj[0] for obj in db.content_ctags_missing_from_list(ctags, cur)] @timed @db_transaction() def content_ctags_get( self, ids: Iterable[Sha1], db=None, cur=None ) -> List[ContentCtagsRow]: return [ ContentCtagsRow.from_dict( converters.db_to_ctags(dict(zip(db.content_ctags_cols, c))) ) for c in db.content_ctags_get_from_list(ids, cur) ] @timed @process_metrics @db_transaction() def content_ctags_add( self, ctags: List[ContentCtagsRow], db=None, cur=None, ) -> Dict[str, int]: check_id_duplicates(ctags) ctags.sort(key=lambda m: m.id) self.journal_writer.write_additions("content_ctags", ctags) db.mktemp_content_ctags(cur) db.copy_to( [ctag.to_dict() for ctag in ctags], tblname="tmp_content_ctags", columns=["id", "name", "kind", "line", "lang", "indexer_configuration_id"], cur=cur, ) count = db.content_ctags_add_from_temp(cur) return {"content_ctags:add": count} @timed @db_transaction() def content_ctags_search( self, expression: str, limit: int = 10, last_sha1: Optional[Sha1] = None, db=None, cur=None, ) -> List[ContentCtagsRow]: return [ ContentCtagsRow.from_dict( converters.db_to_ctags(dict(zip(db.content_ctags_cols, obj))) ) for obj in db.content_ctags_search(expression, last_sha1, limit, cur=cur) ] @timed @db_transaction() def content_fossology_license_get( self, ids: Iterable[Sha1], db=None, cur=None ) -> List[ContentLicenseRow]: return [ ContentLicenseRow.from_dict( converters.db_to_fossology_license( dict(zip(db.content_fossology_license_cols, c)) ) ) for c in db.content_fossology_license_get_from_list(ids, cur) ] @timed @process_metrics @db_transaction() def content_fossology_license_add( self, licenses: List[ContentLicenseRow], db=None, cur=None, ) -> Dict[str, int]: check_id_duplicates(licenses) licenses.sort(key=lambda m: m.id) self.journal_writer.write_additions("content_fossology_license", licenses) db.mktemp_content_fossology_license(cur) db.copy_to( [license.to_dict() for license in licenses], tblname="tmp_content_fossology_license", columns=["id", "license", "indexer_configuration_id"], cur=cur, ) count = db.content_fossology_license_add_from_temp(cur) return {"content_fossology_license:add": count} @timed @db_transaction() def content_fossology_license_get_partition( self, indexer_configuration_id: int, partition_id: int, nb_partitions: int, page_token: Optional[str] = None, limit: int = 1000, db=None, cur=None, ) -> PagedResult[Sha1]: return self.get_partition( "fossology_license", indexer_configuration_id, partition_id, nb_partitions, page_token=page_token, limit=limit, with_textual_data=True, db=db, cur=cur, ) @timed @db_transaction() def content_metadata_missing( self, metadata: Iterable[Dict], db=None, cur=None ) -> List[Tuple[Sha1, int]]: return [obj[0] for obj in db.content_metadata_missing_from_list(metadata, cur)] @timed @db_transaction() def content_metadata_get( self, ids: Iterable[Sha1], db=None, cur=None ) -> List[ContentMetadataRow]: return [ ContentMetadataRow.from_dict( converters.db_to_metadata(dict(zip(db.content_metadata_cols, c))) ) for c in db.content_metadata_get_from_list(ids, cur) ] @timed @process_metrics @db_transaction() def content_metadata_add( self, metadata: List[ContentMetadataRow], db=None, cur=None, ) -> Dict[str, int]: check_id_duplicates(metadata) metadata.sort(key=lambda m: m.id) self.journal_writer.write_additions("content_metadata", metadata) db.mktemp_content_metadata(cur) db.copy_to( [m.to_dict() for m in metadata], "tmp_content_metadata", ["id", "metadata", "indexer_configuration_id"], cur, ) count = db.content_metadata_add_from_temp(cur) return { "content_metadata:add": count, } @timed @db_transaction() def directory_intrinsic_metadata_missing( self, metadata: Iterable[Dict], db=None, cur=None ) -> List[Tuple[Sha1, int]]: return [ obj[0] for obj in db.directory_intrinsic_metadata_missing_from_list(metadata, cur) ] @timed @db_transaction() def directory_intrinsic_metadata_get( self, ids: Iterable[Sha1], db=None, cur=None ) -> List[DirectoryIntrinsicMetadataRow]: return [ DirectoryIntrinsicMetadataRow.from_dict( converters.db_to_metadata( dict(zip(db.directory_intrinsic_metadata_cols, c)) ) ) for c in db.directory_intrinsic_metadata_get_from_list(ids, cur) ] @timed @process_metrics @db_transaction() def directory_intrinsic_metadata_add( self, metadata: List[DirectoryIntrinsicMetadataRow], db=None, cur=None, ) -> Dict[str, int]: check_id_duplicates(metadata) metadata.sort(key=lambda m: m.id) self.journal_writer.write_additions("directory_intrinsic_metadata", metadata) db.mktemp_directory_intrinsic_metadata(cur) db.copy_to( [m.to_dict() for m in metadata], "tmp_directory_intrinsic_metadata", ["id", "metadata", "mappings", "indexer_configuration_id"], cur, ) count = db.directory_intrinsic_metadata_add_from_temp(cur) return { "directory_intrinsic_metadata:add": count, } @timed @db_transaction() def origin_intrinsic_metadata_get( self, urls: Iterable[str], db=None, cur=None ) -> List[OriginIntrinsicMetadataRow]: return [ OriginIntrinsicMetadataRow.from_dict( converters.db_to_metadata( dict(zip(db.origin_intrinsic_metadata_cols, c)) ) ) for c in db.origin_intrinsic_metadata_get_from_list(urls, cur) ] @timed @process_metrics @db_transaction() def origin_intrinsic_metadata_add( self, metadata: List[OriginIntrinsicMetadataRow], db=None, cur=None, ) -> Dict[str, int]: check_id_duplicates(metadata) metadata.sort(key=lambda m: m.id) self.journal_writer.write_additions("origin_intrinsic_metadata", metadata) db.mktemp_origin_intrinsic_metadata(cur) db.copy_to( [m.to_dict() for m in metadata], "tmp_origin_intrinsic_metadata", [ "id", "metadata", "indexer_configuration_id", "from_directory", "mappings", ], cur, ) count = db.origin_intrinsic_metadata_add_from_temp(cur) return { "origin_intrinsic_metadata:add": count, } @timed @db_transaction() def origin_intrinsic_metadata_search_fulltext( self, conjunction: List[str], limit: int = 100, db=None, cur=None ) -> List[OriginIntrinsicMetadataRow]: return [ OriginIntrinsicMetadataRow.from_dict( converters.db_to_metadata( dict(zip(db.origin_intrinsic_metadata_cols, c)) ) ) for c in db.origin_intrinsic_metadata_search_fulltext( conjunction, limit=limit, cur=cur ) ] @timed @db_transaction() def origin_intrinsic_metadata_search_by_producer( self, page_token: str = "", limit: int = 100, ids_only: bool = False, mappings: Optional[List[str]] = None, tool_ids: Optional[List[int]] = None, db=None, cur=None, ) -> PagedResult[Union[str, OriginIntrinsicMetadataRow]]: assert isinstance(page_token, str) # we go to limit+1 to check whether we should add next_page_token in # the response rows = db.origin_intrinsic_metadata_search_by_producer( page_token, limit + 1, ids_only, mappings, tool_ids, cur ) next_page_token = None if ids_only: results = [origin for (origin,) in rows] if len(results) > limit: results[limit:] = [] next_page_token = results[-1] else: results = [ OriginIntrinsicMetadataRow.from_dict( converters.db_to_metadata( dict(zip(db.origin_intrinsic_metadata_cols, row)) ) ) for row in rows ] if len(results) > limit: results[limit:] = [] next_page_token = results[-1].id return PagedResult( results=results, next_page_token=next_page_token, ) @timed @db_transaction() def origin_intrinsic_metadata_stats(self, db=None, cur=None): mapping_names = [m for m in MAPPING_NAMES] select_parts = [] # Count rows for each mapping for mapping_name in mapping_names: select_parts.append( ( "sum(case when (mappings @> ARRAY['%s']) " " then 1 else 0 end)" ) % mapping_name ) # Total select_parts.append("sum(1)") # Rows whose metadata has at least one key that is not '@context' select_parts.append( "sum(case when ('{}'::jsonb @> (metadata - '@context')) " " then 0 else 1 end)" ) cur.execute( "select " + ", ".join(select_parts) + " from origin_intrinsic_metadata" ) results = dict(zip(mapping_names + ["total", "non_empty"], cur.fetchone())) return { "total": results.pop("total"), "non_empty": results.pop("non_empty"), "per_mapping": results, } + @timed + @db_transaction() + def origin_extrinsic_metadata_get( + self, urls: Iterable[str], db=None, cur=None + ) -> List[OriginExtrinsicMetadataRow]: + return [ + OriginExtrinsicMetadataRow.from_dict( + converters.db_to_metadata( + dict(zip(db.origin_extrinsic_metadata_cols, c)) + ) + ) + for c in db.origin_extrinsic_metadata_get_from_list(urls, cur) + ] + + @timed + @process_metrics + @db_transaction() + def origin_extrinsic_metadata_add( + self, + metadata: List[OriginExtrinsicMetadataRow], + db=None, + cur=None, + ) -> Dict[str, int]: + check_id_duplicates(metadata) + metadata.sort(key=lambda m: m.id) + self.journal_writer.write_additions("origin_extrinsic_metadata", metadata) + + db.mktemp_origin_extrinsic_metadata(cur) + + db.copy_to( + [m.to_dict() for m in metadata], + "tmp_origin_extrinsic_metadata", + [ + "id", + "metadata", + "indexer_configuration_id", + "from_remd_id", + "mappings", + ], + cur, + ) + count = db.origin_extrinsic_metadata_add_from_temp(cur) + return { + "origin_extrinsic_metadata:add": count, + } + @timed @db_transaction() def indexer_configuration_add(self, tools, db=None, cur=None): db.mktemp_indexer_configuration(cur) db.copy_to( tools, "tmp_indexer_configuration", ["tool_name", "tool_version", "tool_configuration"], cur, ) tools = db.indexer_configuration_add_from_temp(cur) results = [dict(zip(db.indexer_configuration_cols, line)) for line in tools] send_metric( "indexer_configuration:add", len(results), method_name="indexer_configuration_add", ) return results @timed @db_transaction() def indexer_configuration_get(self, tool, db=None, cur=None): tool_conf = tool["tool_configuration"] if isinstance(tool_conf, dict): tool_conf = json.dumps(tool_conf) idx = db.indexer_configuration_get( tool["tool_name"], tool["tool_version"], tool_conf ) if not idx: return None return dict(zip(db.indexer_configuration_cols, idx)) @db_transaction() def _tool_get_from_id(self, id_, db, cur): tool = dict( zip( db.indexer_configuration_cols, db.indexer_configuration_get_from_id(id_, cur), ) ) return { "id": tool["id"], "name": tool["tool_name"], "version": tool["tool_version"], "configuration": tool["tool_configuration"], } diff --git a/swh/indexer/storage/db.py b/swh/indexer/storage/db.py index cb0ae0a..bd8391d 100644 --- a/swh/indexer/storage/db.py +++ b/swh/indexer/storage/db.py @@ -1,534 +1,563 @@ # Copyright (C) 2015-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from typing import Dict, Iterable, Iterator, List from swh.core.db import BaseDb from swh.core.db.db_utils import execute_values_generator, stored_procedure from swh.model import hashutil from .interface import Sha1 class Db(BaseDb): """Proxy to the SWH Indexer DB, with wrappers around stored procedures""" content_mimetype_hash_keys = ["id", "indexer_configuration_id"] def _missing_from_list( self, table: str, data: Iterable[Dict], hash_keys: List[str], cur=None ): """Read from table the data with hash_keys that are missing. Args: table: Table name (e.g content_mimetype, content_language, etc...) data: Dict of data to read from hash_keys: List of keys to read in the data dict. Yields: The data which is missing from the db. """ cur = self._cursor(cur) keys = ", ".join(hash_keys) equality = " AND ".join(("t.%s = c.%s" % (key, key)) for key in hash_keys) yield from execute_values_generator( cur, """ select %s from (values %%s) as t(%s) where not exists ( select 1 from %s c where %s ) """ % (keys, keys, table, equality), (tuple(m[k] for k in hash_keys) for m in data), ) def content_mimetype_missing_from_list( self, mimetypes: Iterable[Dict], cur=None ) -> Iterator[Sha1]: """List missing mimetypes.""" yield from self._missing_from_list( "content_mimetype", mimetypes, self.content_mimetype_hash_keys, cur=cur ) content_mimetype_cols = [ "id", "mimetype", "encoding", "tool_id", "tool_name", "tool_version", "tool_configuration", ] @stored_procedure("swh_mktemp_content_mimetype") def mktemp_content_mimetype(self, cur=None): pass def content_mimetype_add_from_temp(self, cur=None): cur = self._cursor(cur) cur.execute("select * from swh_content_mimetype_add()") return cur.fetchone()[0] def _convert_key(self, key, main_table="c"): """Convert keys according to specific use in the module. Args: key (str): Key expression to change according to the alias used in the query main_table (str): Alias to use for the main table. Default to c for content_{something}. Expected: Tables content_{something} being aliased as 'c' (something in {language, mimetype, ...}), table indexer_configuration being aliased as 'i'. """ if key == "id": return "%s.id" % main_table elif key == "tool_id": return "i.id as tool_id" elif key == "license": return ( """ ( select name from fossology_license where id = %s.license_id ) as licenses""" % main_table ) return key def _get_from_list(self, table, ids, cols, cur=None, id_col="id"): """Fetches entries from the `table` such that their `id` field (or whatever is given to `id_col`) is in `ids`. Returns the columns `cols`. The `cur` parameter is used to connect to the database. """ cur = self._cursor(cur) keys = map(self._convert_key, cols) query = """ select {keys} from (values %s) as t(id) inner join {table} c on c.{id_col}=t.id inner join indexer_configuration i on c.indexer_configuration_id=i.id; """.format( keys=", ".join(keys), id_col=id_col, table=table ) yield from execute_values_generator(cur, query, ((_id,) for _id in ids)) content_indexer_names = { "mimetype": "content_mimetype", "fossology_license": "content_fossology_license", } def content_get_range( self, content_type, start, end, indexer_configuration_id, limit=1000, with_textual_data=False, cur=None, ): """Retrieve contents with content_type, within range [start, end] bound by limit and associated to the given indexer configuration id. When asking to work on textual content, that filters on the mimetype table with any mimetype that is not binary. """ cur = self._cursor(cur) table = self.content_indexer_names[content_type] if with_textual_data: extra = """inner join content_mimetype cm on (t.id=cm.id and cm.mimetype like 'text/%%' and %(start)s <= cm.id and cm.id <= %(end)s) """ else: extra = "" query = f"""select t.id from {table} t {extra} where t.indexer_configuration_id=%(tool_id)s and %(start)s <= t.id and t.id <= %(end)s order by t.indexer_configuration_id, t.id limit %(limit)s""" cur.execute( query, { "start": start, "end": end, "tool_id": indexer_configuration_id, "limit": limit, }, ) yield from cur def content_mimetype_get_from_list(self, ids, cur=None): yield from self._get_from_list( "content_mimetype", ids, self.content_mimetype_cols, cur=cur ) content_language_hash_keys = ["id", "indexer_configuration_id"] def content_language_missing_from_list(self, languages, cur=None): """List missing languages.""" yield from self._missing_from_list( "content_language", languages, self.content_language_hash_keys, cur=cur ) content_language_cols = [ "id", "lang", "tool_id", "tool_name", "tool_version", "tool_configuration", ] @stored_procedure("swh_mktemp_content_language") def mktemp_content_language(self, cur=None): pass def content_language_add_from_temp(self, cur=None): cur = self._cursor(cur) cur.execute("select * from swh_content_language_add()") return cur.fetchone()[0] def content_language_get_from_list(self, ids, cur=None): yield from self._get_from_list( "content_language", ids, self.content_language_cols, cur=cur ) content_ctags_hash_keys = ["id", "indexer_configuration_id"] def content_ctags_missing_from_list(self, ctags, cur=None): """List missing ctags.""" yield from self._missing_from_list( "content_ctags", ctags, self.content_ctags_hash_keys, cur=cur ) content_ctags_cols = [ "id", "name", "kind", "line", "lang", "tool_id", "tool_name", "tool_version", "tool_configuration", ] @stored_procedure("swh_mktemp_content_ctags") def mktemp_content_ctags(self, cur=None): pass def content_ctags_add_from_temp(self, cur=None): cur = self._cursor(cur) cur.execute("select * from swh_content_ctags_add()") return cur.fetchone()[0] def content_ctags_get_from_list(self, ids, cur=None): cur = self._cursor(cur) keys = map(self._convert_key, self.content_ctags_cols) yield from execute_values_generator( cur, """ select %s from (values %%s) as t(id) inner join content_ctags c on c.id=t.id inner join indexer_configuration i on c.indexer_configuration_id=i.id order by line """ % ", ".join(keys), ((_id,) for _id in ids), ) def content_ctags_search(self, expression, last_sha1, limit, cur=None): cur = self._cursor(cur) if not last_sha1: query = """SELECT %s FROM swh_content_ctags_search(%%s, %%s)""" % ( ",".join(self.content_ctags_cols) ) cur.execute(query, (expression, limit)) else: if last_sha1 and isinstance(last_sha1, bytes): last_sha1 = "\\x%s" % hashutil.hash_to_hex(last_sha1) elif last_sha1: last_sha1 = "\\x%s" % last_sha1 query = """SELECT %s FROM swh_content_ctags_search(%%s, %%s, %%s)""" % ( ",".join(self.content_ctags_cols) ) cur.execute(query, (expression, limit, last_sha1)) yield from cur content_fossology_license_cols = [ "id", "tool_id", "tool_name", "tool_version", "tool_configuration", "license", ] @stored_procedure("swh_mktemp_content_fossology_license") def mktemp_content_fossology_license(self, cur=None): pass def content_fossology_license_add_from_temp(self, cur=None): """Add new licenses per content.""" cur = self._cursor(cur) cur.execute("select * from swh_content_fossology_license_add()") return cur.fetchone()[0] def content_fossology_license_get_from_list(self, ids, cur=None): """Retrieve licenses per id.""" cur = self._cursor(cur) keys = map(self._convert_key, self.content_fossology_license_cols) yield from execute_values_generator( cur, """ select %s from (values %%s) as t(id) inner join content_fossology_license c on t.id=c.id inner join indexer_configuration i on i.id=c.indexer_configuration_id """ % ", ".join(keys), ((_id,) for _id in ids), ) content_metadata_hash_keys = ["id", "indexer_configuration_id"] def content_metadata_missing_from_list(self, metadata, cur=None): """List missing metadata.""" yield from self._missing_from_list( "content_metadata", metadata, self.content_metadata_hash_keys, cur=cur ) content_metadata_cols = [ "id", "metadata", "tool_id", "tool_name", "tool_version", "tool_configuration", ] @stored_procedure("swh_mktemp_content_metadata") def mktemp_content_metadata(self, cur=None): pass def content_metadata_add_from_temp(self, cur=None): cur = self._cursor(cur) cur.execute("select * from swh_content_metadata_add()") return cur.fetchone()[0] def content_metadata_get_from_list(self, ids, cur=None): yield from self._get_from_list( "content_metadata", ids, self.content_metadata_cols, cur=cur ) directory_intrinsic_metadata_hash_keys = ["id", "indexer_configuration_id"] def directory_intrinsic_metadata_missing_from_list(self, metadata, cur=None): """List missing metadata.""" yield from self._missing_from_list( "directory_intrinsic_metadata", metadata, self.directory_intrinsic_metadata_hash_keys, cur=cur, ) directory_intrinsic_metadata_cols = [ "id", "metadata", "mappings", "tool_id", "tool_name", "tool_version", "tool_configuration", ] @stored_procedure("swh_mktemp_directory_intrinsic_metadata") def mktemp_directory_intrinsic_metadata(self, cur=None): pass def directory_intrinsic_metadata_add_from_temp(self, cur=None): cur = self._cursor(cur) cur.execute("select * from swh_directory_intrinsic_metadata_add()") return cur.fetchone()[0] def directory_intrinsic_metadata_get_from_list(self, ids, cur=None): yield from self._get_from_list( "directory_intrinsic_metadata", ids, self.directory_intrinsic_metadata_cols, cur=cur, ) origin_intrinsic_metadata_cols = [ "id", "metadata", "from_directory", "mappings", "tool_id", "tool_name", "tool_version", "tool_configuration", ] origin_intrinsic_metadata_regconfig = "pg_catalog.simple" """The dictionary used to normalize 'metadata' and queries. 'pg_catalog.simple' provides no stopword, so it should be suitable for proper names and non-English content. When updating this value, make sure to add a new index on origin_intrinsic_metadata.metadata.""" @stored_procedure("swh_mktemp_origin_intrinsic_metadata") def mktemp_origin_intrinsic_metadata(self, cur=None): pass def origin_intrinsic_metadata_add_from_temp(self, cur=None): cur = self._cursor(cur) cur.execute("select * from swh_origin_intrinsic_metadata_add()") return cur.fetchone()[0] def origin_intrinsic_metadata_get_from_list(self, ids, cur=None): yield from self._get_from_list( "origin_intrinsic_metadata", ids, self.origin_intrinsic_metadata_cols, cur=cur, id_col="id", ) def origin_intrinsic_metadata_search_fulltext(self, terms, *, limit, cur): regconfig = self.origin_intrinsic_metadata_regconfig tsquery_template = " && ".join( "plainto_tsquery('%s', %%s)" % regconfig for _ in terms ) tsquery_args = [(term,) for term in terms] keys = ( self._convert_key(col, "oim") for col in self.origin_intrinsic_metadata_cols ) query = ( "SELECT {keys} FROM origin_intrinsic_metadata AS oim " "INNER JOIN indexer_configuration AS i " "ON oim.indexer_configuration_id=i.id " "JOIN LATERAL (SELECT {tsquery_template}) AS s(tsq) ON true " "WHERE oim.metadata_tsvector @@ tsq " "ORDER BY ts_rank(oim.metadata_tsvector, tsq, 1) DESC " "LIMIT %s;" ).format(keys=", ".join(keys), tsquery_template=tsquery_template) cur.execute(query, tsquery_args + [limit]) yield from cur def origin_intrinsic_metadata_search_by_producer( self, last, limit, ids_only, mappings, tool_ids, cur ): if ids_only: keys = "oim.id" else: keys = ", ".join( ( self._convert_key(col, "oim") for col in self.origin_intrinsic_metadata_cols ) ) query_parts = [ "SELECT %s" % keys, "FROM origin_intrinsic_metadata AS oim", "INNER JOIN indexer_configuration AS i", "ON oim.indexer_configuration_id=i.id", ] args = [] where = [] if last: where.append("oim.id > %s") args.append(last) if mappings is not None: where.append("oim.mappings && %s") args.append(list(mappings)) if tool_ids is not None: where.append("oim.indexer_configuration_id = ANY(%s)") args.append(list(tool_ids)) if where: query_parts.append("WHERE") query_parts.append(" AND ".join(where)) if limit: query_parts.append("LIMIT %s") args.append(limit) cur.execute(" ".join(query_parts), args) yield from cur + origin_extrinsic_metadata_cols = [ + "id", + "metadata", + "from_remd_id", + "mappings", + "tool_id", + "tool_name", + "tool_version", + "tool_configuration", + ] + + @stored_procedure("swh_mktemp_origin_extrinsic_metadata") + def mktemp_origin_extrinsic_metadata(self, cur=None): + pass + + def origin_extrinsic_metadata_add_from_temp(self, cur=None): + cur = self._cursor(cur) + cur.execute("select * from swh_origin_extrinsic_metadata_add()") + return cur.fetchone()[0] + + def origin_extrinsic_metadata_get_from_list(self, ids, cur=None): + yield from self._get_from_list( + "origin_extrinsic_metadata", + ids, + self.origin_extrinsic_metadata_cols, + cur=cur, + id_col="id", + ) + indexer_configuration_cols = [ "id", "tool_name", "tool_version", "tool_configuration", ] @stored_procedure("swh_mktemp_indexer_configuration") def mktemp_indexer_configuration(self, cur=None): pass def indexer_configuration_add_from_temp(self, cur=None): cur = self._cursor(cur) cur.execute( "SELECT %s from swh_indexer_configuration_add()" % (",".join(self.indexer_configuration_cols),) ) yield from cur def indexer_configuration_get( self, tool_name, tool_version, tool_configuration, cur=None ): cur = self._cursor(cur) cur.execute( """select %s from indexer_configuration where tool_name=%%s and tool_version=%%s and tool_configuration=%%s""" % (",".join(self.indexer_configuration_cols)), (tool_name, tool_version, tool_configuration), ) return cur.fetchone() def indexer_configuration_get_from_id(self, id_, cur=None): cur = self._cursor(cur) cur.execute( """select %s from indexer_configuration where id=%%s""" % (",".join(self.indexer_configuration_cols)), (id_,), ) return cur.fetchone() diff --git a/swh/indexer/storage/in_memory.py b/swh/indexer/storage/in_memory.py index 974f671..afe7f57 100644 --- a/swh/indexer/storage/in_memory.py +++ b/swh/indexer/storage/in_memory.py @@ -1,506 +1,519 @@ # Copyright (C) 2018-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from collections import Counter, defaultdict import itertools import json import math import operator import re from typing import ( Any, Dict, Generic, Iterable, List, Optional, Set, Tuple, Type, TypeVar, Union, ) from swh.core.collections import SortedList from swh.model.hashutil import hash_to_bytes, hash_to_hex from swh.model.model import SHA1_SIZE, Sha1Git from swh.storage.utils import get_partition_bounds_bytes from . import MAPPING_NAMES, check_id_duplicates from .exc import IndexerStorageArgumentException from .interface import PagedResult, Sha1 from .model import ( BaseRow, ContentCtagsRow, ContentLanguageRow, ContentLicenseRow, ContentMetadataRow, ContentMimetypeRow, DirectoryIntrinsicMetadataRow, + OriginExtrinsicMetadataRow, OriginIntrinsicMetadataRow, ) from .writer import JournalWriter SHA1_DIGEST_SIZE = 160 ToolId = int def _transform_tool(tool): return { "id": tool["id"], "name": tool["tool_name"], "version": tool["tool_version"], "configuration": tool["tool_configuration"], } def check_id_types(data: List[Dict[str, Any]]): """Checks all elements of the list have an 'id' whose type is 'bytes'.""" if not all(isinstance(item.get("id"), bytes) for item in data): raise IndexerStorageArgumentException("identifiers must be bytes.") def _key_from_dict(d): return tuple(sorted(d.items())) TValue = TypeVar("TValue", bound=BaseRow) class SubStorage(Generic[TValue]): """Implements common missing/get/add logic for each indexer type.""" _data: Dict[Sha1, Dict[Tuple, Dict[str, Any]]] _tools_per_id: Dict[Sha1, Set[ToolId]] def __init__(self, row_class: Type[TValue], tools, journal_writer): self.row_class = row_class self._tools = tools self._sorted_ids = SortedList[bytes, Sha1]() self._data = defaultdict(dict) self._journal_writer = journal_writer self._tools_per_id = defaultdict(set) def _key_from_dict(self, d) -> Tuple: """Like the global _key_from_dict, but filters out dict keys that don't belong in the unique key.""" return _key_from_dict({k: d[k] for k in self.row_class.UNIQUE_KEY_FIELDS}) def missing(self, keys: Iterable[Dict]) -> List[Sha1]: """List data missing from storage. Args: data (iterable): dictionaries with keys: - **id** (bytes): sha1 identifier - **indexer_configuration_id** (int): tool used to compute the results Yields: missing sha1s """ results = [] for key in keys: tool_id = key["indexer_configuration_id"] id_ = key["id"] if tool_id not in self._tools_per_id.get(id_, set()): results.append(id_) return results def get(self, ids: Iterable[Sha1]) -> List[TValue]: """Retrieve data per id. Args: ids (iterable): sha1 checksums Yields: dict: dictionaries with the following keys: - **id** (bytes) - **tool** (dict): tool used to compute metadata - arbitrary data (as provided to `add`) """ results = [] for id_ in ids: for entry in self._data[id_].values(): entry = entry.copy() tool_id = entry.pop("indexer_configuration_id") results.append( self.row_class( id=id_, tool=_transform_tool(self._tools[tool_id]), **entry, ) ) return results def get_all(self) -> List[TValue]: return self.get(self._sorted_ids) def get_partition( self, indexer_configuration_id: int, partition_id: int, nb_partitions: int, page_token: Optional[str] = None, limit: int = 1000, ) -> PagedResult[Sha1]: """Retrieve ids of content with `indexer_type` within partition partition_id bound by limit. Args: **indexer_type**: Type of data content to index (mimetype, language, etc...) **indexer_configuration_id**: The tool used to index data **partition_id**: index of the partition to fetch **nb_partitions**: total number of partitions to split into **page_token**: opaque token used for pagination **limit**: Limit result (default to 1000) **with_textual_data** (bool): Deal with only textual content (True) or all content (all contents by defaults, False) Raises: IndexerStorageArgumentException for; - limit to None - wrong indexer_type provided Returns: PagedResult of Sha1. If next_page_token is None, there is no more data to fetch """ if limit is None: raise IndexerStorageArgumentException("limit should not be None") (start, end) = get_partition_bounds_bytes( partition_id, nb_partitions, SHA1_SIZE ) if page_token: start = hash_to_bytes(page_token) if end is None: end = b"\xff" * SHA1_SIZE next_page_token: Optional[str] = None ids: List[Sha1] = [] sha1s = (sha1 for sha1 in self._sorted_ids.iter_from(start)) for counter, sha1 in enumerate(sha1s): if sha1 > end: break if counter >= limit: next_page_token = hash_to_hex(sha1) break ids.append(sha1) assert len(ids) <= limit return PagedResult(results=ids, next_page_token=next_page_token) def add(self, data: Iterable[TValue]) -> int: """Add data not present in storage. Args: data (iterable): dictionaries with keys: - **id**: sha1 - **indexer_configuration_id**: tool used to compute the results - arbitrary data """ data = list(data) check_id_duplicates(data) object_type = self.row_class.object_type # type: ignore self._journal_writer.write_additions(object_type, data) count = 0 for obj in data: item = obj.to_dict() id_ = item.pop("id") tool_id = item["indexer_configuration_id"] key = _key_from_dict(obj.unique_key()) self._data[id_][key] = item self._tools_per_id[id_].add(tool_id) count += 1 if id_ not in self._sorted_ids: self._sorted_ids.add(id_) return count class IndexerStorage: """In-memory SWH indexer storage.""" def __init__(self, journal_writer=None): self._tools = {} def tool_getter(id_): tool = self._tools[id_] return { "id": tool["id"], "name": tool["tool_name"], "version": tool["tool_version"], "configuration": tool["tool_configuration"], } self.journal_writer = JournalWriter(tool_getter, journal_writer) args = (self._tools, self.journal_writer) self._mimetypes = SubStorage(ContentMimetypeRow, *args) self._languages = SubStorage(ContentLanguageRow, *args) self._content_ctags = SubStorage(ContentCtagsRow, *args) self._licenses = SubStorage(ContentLicenseRow, *args) self._content_metadata = SubStorage(ContentMetadataRow, *args) self._directory_intrinsic_metadata = SubStorage( DirectoryIntrinsicMetadataRow, *args ) self._origin_intrinsic_metadata = SubStorage(OriginIntrinsicMetadataRow, *args) + self._origin_extrinsic_metadata = SubStorage(OriginExtrinsicMetadataRow, *args) def check_config(self, *, check_write): return True def content_mimetype_missing( self, mimetypes: Iterable[Dict] ) -> List[Tuple[Sha1, int]]: return self._mimetypes.missing(mimetypes) def content_mimetype_get_partition( self, indexer_configuration_id: int, partition_id: int, nb_partitions: int, page_token: Optional[str] = None, limit: int = 1000, ) -> PagedResult[Sha1]: return self._mimetypes.get_partition( indexer_configuration_id, partition_id, nb_partitions, page_token, limit ) def content_mimetype_add( self, mimetypes: List[ContentMimetypeRow] ) -> Dict[str, int]: added = self._mimetypes.add(mimetypes) return {"content_mimetype:add": added} def content_mimetype_get(self, ids: Iterable[Sha1]) -> List[ContentMimetypeRow]: return self._mimetypes.get(ids) def content_language_missing( self, languages: Iterable[Dict] ) -> List[Tuple[Sha1, int]]: return self._languages.missing(languages) def content_language_get(self, ids: Iterable[Sha1]) -> List[ContentLanguageRow]: return self._languages.get(ids) def content_language_add( self, languages: List[ContentLanguageRow] ) -> Dict[str, int]: added = self._languages.add(languages) return {"content_language:add": added} def content_ctags_missing(self, ctags: Iterable[Dict]) -> List[Tuple[Sha1, int]]: return self._content_ctags.missing(ctags) def content_ctags_get(self, ids: Iterable[Sha1]) -> List[ContentCtagsRow]: return self._content_ctags.get(ids) def content_ctags_add(self, ctags: List[ContentCtagsRow]) -> Dict[str, int]: added = self._content_ctags.add(ctags) return {"content_ctags:add": added} def content_ctags_search( self, expression: str, limit: int = 10, last_sha1: Optional[Sha1] = None ) -> List[ContentCtagsRow]: nb_matches = 0 items_per_id: Dict[Tuple[Sha1Git, ToolId], List[ContentCtagsRow]] = {} for item in sorted(self._content_ctags.get_all()): if item.id <= (last_sha1 or bytes(0 for _ in range(SHA1_DIGEST_SIZE))): continue items_per_id.setdefault( (item.id, item.indexer_configuration_id), [] ).append(item) results = [] for items in items_per_id.values(): for item in items: if item.name != expression: continue nb_matches += 1 if nb_matches > limit: break results.append(item) return results def content_fossology_license_get( self, ids: Iterable[Sha1] ) -> List[ContentLicenseRow]: return self._licenses.get(ids) def content_fossology_license_add( self, licenses: List[ContentLicenseRow] ) -> Dict[str, int]: added = self._licenses.add(licenses) return {"content_fossology_license:add": added} def content_fossology_license_get_partition( self, indexer_configuration_id: int, partition_id: int, nb_partitions: int, page_token: Optional[str] = None, limit: int = 1000, ) -> PagedResult[Sha1]: return self._licenses.get_partition( indexer_configuration_id, partition_id, nb_partitions, page_token, limit ) def content_metadata_missing( self, metadata: Iterable[Dict] ) -> List[Tuple[Sha1, int]]: return self._content_metadata.missing(metadata) def content_metadata_get(self, ids: Iterable[Sha1]) -> List[ContentMetadataRow]: return self._content_metadata.get(ids) def content_metadata_add( self, metadata: List[ContentMetadataRow] ) -> Dict[str, int]: added = self._content_metadata.add(metadata) return {"content_metadata:add": added} def directory_intrinsic_metadata_missing( self, metadata: Iterable[Dict] ) -> List[Tuple[Sha1, int]]: return self._directory_intrinsic_metadata.missing(metadata) def directory_intrinsic_metadata_get( self, ids: Iterable[Sha1] ) -> List[DirectoryIntrinsicMetadataRow]: return self._directory_intrinsic_metadata.get(ids) def directory_intrinsic_metadata_add( self, metadata: List[DirectoryIntrinsicMetadataRow] ) -> Dict[str, int]: added = self._directory_intrinsic_metadata.add(metadata) return {"directory_intrinsic_metadata:add": added} def origin_intrinsic_metadata_get( self, urls: Iterable[str] ) -> List[OriginIntrinsicMetadataRow]: return self._origin_intrinsic_metadata.get(urls) def origin_intrinsic_metadata_add( self, metadata: List[OriginIntrinsicMetadataRow] ) -> Dict[str, int]: added = self._origin_intrinsic_metadata.add(metadata) return {"origin_intrinsic_metadata:add": added} def origin_intrinsic_metadata_search_fulltext( self, conjunction: List[str], limit: int = 100 ) -> List[OriginIntrinsicMetadataRow]: # A very crude fulltext search implementation, but that's enough # to work on English metadata tokens_re = re.compile("[a-zA-Z0-9]+") search_tokens = list(itertools.chain(*map(tokens_re.findall, conjunction))) def rank(data): # Tokenize the metadata text = json.dumps(data.metadata) text_tokens = tokens_re.findall(text) text_token_occurences = Counter(text_tokens) # Count the number of occurrences of search tokens in the text score = 0 for search_token in search_tokens: if text_token_occurences[search_token] == 0: # Search token is not in the text. return 0 score += text_token_occurences[search_token] # Normalize according to the text's length return score / math.log(len(text_tokens)) results = [ (rank(data), data) for data in self._origin_intrinsic_metadata.get_all() ] results = [(rank_, data) for (rank_, data) in results if rank_ > 0] results.sort( key=operator.itemgetter(0), reverse=True # Don't try to order 'data' ) return [result for (rank_, result) in results[:limit]] def origin_intrinsic_metadata_search_by_producer( self, page_token: str = "", limit: int = 100, ids_only: bool = False, mappings: Optional[List[str]] = None, tool_ids: Optional[List[int]] = None, ) -> PagedResult[Union[str, OriginIntrinsicMetadataRow]]: assert isinstance(page_token, str) nb_results = 0 if mappings is not None: mapping_set = frozenset(mappings) if tool_ids is not None: tool_id_set = frozenset(tool_ids) rows = [] # we go to limit+1 to check whether we should add next_page_token in # the response for entry in self._origin_intrinsic_metadata.get_all(): if entry.id <= page_token: continue if nb_results >= (limit + 1): break if mappings and mapping_set.isdisjoint(entry.mappings): continue if tool_ids and entry.tool["id"] not in tool_id_set: continue rows.append(entry) nb_results += 1 if len(rows) > limit: rows = rows[:limit] next_page_token = rows[-1].id else: next_page_token = None if ids_only: rows = [row.id for row in rows] return PagedResult( results=rows, next_page_token=next_page_token, ) def origin_intrinsic_metadata_stats(self): mapping_count = {m: 0 for m in MAPPING_NAMES} total = non_empty = 0 for data in self._origin_intrinsic_metadata.get_all(): total += 1 if set(data.metadata) - {"@context"}: non_empty += 1 for mapping in data.mappings: mapping_count[mapping] += 1 return {"per_mapping": mapping_count, "total": total, "non_empty": non_empty} + def origin_extrinsic_metadata_get( + self, urls: Iterable[str] + ) -> List[OriginExtrinsicMetadataRow]: + return self._origin_extrinsic_metadata.get(urls) + + def origin_extrinsic_metadata_add( + self, metadata: List[OriginExtrinsicMetadataRow] + ) -> Dict[str, int]: + added = self._origin_extrinsic_metadata.add(metadata) + return {"origin_extrinsic_metadata:add": added} + def indexer_configuration_add(self, tools): inserted = [] for tool in tools: tool = tool.copy() id_ = self._tool_key(tool) tool["id"] = id_ self._tools[id_] = tool inserted.append(tool) return inserted def indexer_configuration_get(self, tool): return self._tools.get(self._tool_key(tool)) def _tool_key(self, tool): return hash( ( tool["tool_name"], tool["tool_version"], json.dumps(tool["tool_configuration"], sort_keys=True), ) ) diff --git a/swh/indexer/storage/interface.py b/swh/indexer/storage/interface.py index 24c735d..012c685 100644 --- a/swh/indexer/storage/interface.py +++ b/swh/indexer/storage/interface.py @@ -1,520 +1,549 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from typing import Dict, Iterable, List, Optional, Tuple, TypeVar, Union from typing_extensions import Protocol, runtime_checkable from swh.core.api import remote_api_endpoint from swh.core.api.classes import PagedResult as CorePagedResult from swh.indexer.storage.model import ( ContentCtagsRow, ContentLanguageRow, ContentLicenseRow, ContentMetadataRow, ContentMimetypeRow, DirectoryIntrinsicMetadataRow, + OriginExtrinsicMetadataRow, OriginIntrinsicMetadataRow, ) TResult = TypeVar("TResult") PagedResult = CorePagedResult[TResult, str] Sha1 = bytes @runtime_checkable class IndexerStorageInterface(Protocol): @remote_api_endpoint("check_config") def check_config(self, *, check_write): """Check that the storage is configured and ready to go.""" ... @remote_api_endpoint("content_mimetype/missing") def content_mimetype_missing( self, mimetypes: Iterable[Dict] ) -> List[Tuple[Sha1, int]]: """Generate mimetypes missing from storage. Args: mimetypes (iterable): iterable of dict with keys: - **id** (bytes): sha1 identifier - **indexer_configuration_id** (int): tool used to compute the results Returns: list of tuple (id, indexer_configuration_id) missing """ ... @remote_api_endpoint("content_mimetype/range") def content_mimetype_get_partition( self, indexer_configuration_id: int, partition_id: int, nb_partitions: int, page_token: Optional[str] = None, limit: int = 1000, ) -> PagedResult[Sha1]: """Retrieve mimetypes within partition partition_id bound by limit. Args: **indexer_configuration_id**: The tool used to index data **partition_id**: index of the partition to fetch **nb_partitions**: total number of partitions to split into **page_token**: opaque token used for pagination **limit**: Limit result (default to 1000) Raises: IndexerStorageArgumentException for; - limit to None - wrong indexer_type provided Returns: PagedResult of Sha1. If next_page_token is None, there is no more data to fetch """ ... @remote_api_endpoint("content_mimetype/add") def content_mimetype_add( self, mimetypes: List[ContentMimetypeRow] ) -> Dict[str, int]: """Add mimetypes not present in storage. Args: mimetypes: mimetype rows to be added, with their `tool` attribute set to None. overwrite (``True``) or skip duplicates (``False``, the default) Returns: Dict summary of number of rows added """ ... @remote_api_endpoint("content_mimetype") def content_mimetype_get(self, ids: Iterable[Sha1]) -> List[ContentMimetypeRow]: """Retrieve full content mimetype per ids. Args: ids: sha1 identifiers Returns: mimetype row objects """ ... @remote_api_endpoint("content_language/missing") def content_language_missing( self, languages: Iterable[Dict] ) -> List[Tuple[Sha1, int]]: """List languages missing from storage. Args: languages (iterable): dictionaries with keys: - **id** (bytes): sha1 identifier - **indexer_configuration_id** (int): tool used to compute the results Returns: list of tuple (id, indexer_configuration_id) missing """ ... @remote_api_endpoint("content_language") def content_language_get(self, ids: Iterable[Sha1]) -> List[ContentLanguageRow]: """Retrieve full content language per ids. Args: ids (iterable): sha1 identifier Returns: language row objects """ ... @remote_api_endpoint("content_language/add") def content_language_add( self, languages: List[ContentLanguageRow] ) -> Dict[str, int]: """Add languages not present in storage. Args: languages: language row objects Returns: Dict summary of number of rows added """ ... @remote_api_endpoint("content/ctags/missing") def content_ctags_missing(self, ctags: Iterable[Dict]) -> List[Tuple[Sha1, int]]: """List ctags missing from storage. Args: ctags (iterable): dicts with keys: - **id** (bytes): sha1 identifier - **indexer_configuration_id** (int): tool used to compute the results Returns: list of missing id for the tuple (id, indexer_configuration_id) """ ... @remote_api_endpoint("content/ctags") def content_ctags_get(self, ids: Iterable[Sha1]) -> List[ContentCtagsRow]: """Retrieve ctags per id. Args: ids (iterable): sha1 checksums Returns: list of language rows """ ... @remote_api_endpoint("content/ctags/add") def content_ctags_add(self, ctags: List[ContentCtagsRow]) -> Dict[str, int]: """Add ctags not present in storage Args: ctags (iterable): dictionaries with keys: - **id** (bytes): sha1 - **ctags** ([list): List of dictionary with keys: name, kind, line, lang Returns: Dict summary of number of rows added """ ... @remote_api_endpoint("content/ctags/search") def content_ctags_search( self, expression: str, limit: int = 10, last_sha1: Optional[Sha1] = None ) -> List[ContentCtagsRow]: """Search through content's raw ctags symbols. Args: expression (str): Expression to search for limit (int): Number of rows to return (default to 10). last_sha1 (str): Offset from which retrieving data (default to ''). Returns: rows of ctags including id, name, lang, kind, line, etc... """ ... @remote_api_endpoint("content/fossology_license") def content_fossology_license_get( self, ids: Iterable[Sha1] ) -> List[ContentLicenseRow]: """Retrieve licenses per id. Args: ids: sha1 identifiers Yields: license rows; possibly more than one per (sha1, tool_id) if there are multiple licenses. """ ... @remote_api_endpoint("content/fossology_license/add") def content_fossology_license_add( self, licenses: List[ContentLicenseRow] ) -> Dict[str, int]: """Add licenses not present in storage. Args: license: license rows to be added, with their `tool` attribute set to None. Returns: Dict summary of number of rows added """ ... @remote_api_endpoint("content/fossology_license/range") def content_fossology_license_get_partition( self, indexer_configuration_id: int, partition_id: int, nb_partitions: int, page_token: Optional[str] = None, limit: int = 1000, ) -> PagedResult[Sha1]: """Retrieve licenses within the partition partition_id bound by limit. Args: **indexer_configuration_id**: The tool used to index data **partition_id**: index of the partition to fetch **nb_partitions**: total number of partitions to split into **page_token**: opaque token used for pagination **limit**: Limit result (default to 1000) Raises: IndexerStorageArgumentException for; - limit to None - wrong indexer_type provided Returns: PagedResult of Sha1. If next_page_token is None, there is no more data to fetch """ ... @remote_api_endpoint("content_metadata/missing") def content_metadata_missing( self, metadata: Iterable[Dict] ) -> List[Tuple[Sha1, int]]: """List metadata missing from storage. Args: metadata (iterable): dictionaries with keys: - **id** (bytes): sha1 identifier - **indexer_configuration_id** (int): tool used to compute the results Yields: missing sha1s """ ... @remote_api_endpoint("content_metadata") def content_metadata_get(self, ids: Iterable[Sha1]) -> List[ContentMetadataRow]: """Retrieve metadata per id. Args: ids (iterable): sha1 checksums Yields: dictionaries with the following keys: id (bytes) metadata (str): associated metadata tool (dict): tool used to compute metadata """ ... @remote_api_endpoint("content_metadata/add") def content_metadata_add( self, metadata: List[ContentMetadataRow] ) -> Dict[str, int]: """Add metadata not present in storage. Args: metadata (iterable): dictionaries with keys: - **id**: sha1 - **metadata**: arbitrary dict Returns: Dict summary of number of rows added """ ... @remote_api_endpoint("directory_intrinsic_metadata/missing") def directory_intrinsic_metadata_missing( self, metadata: Iterable[Dict] ) -> List[Tuple[Sha1, int]]: """List metadata missing from storage. Args: metadata (iterable): dictionaries with keys: - **id** (bytes): sha1_git directory identifier - **indexer_configuration_id** (int): tool used to compute the results Returns: missing ids """ ... @remote_api_endpoint("directory_intrinsic_metadata") def directory_intrinsic_metadata_get( self, ids: Iterable[Sha1] ) -> List[DirectoryIntrinsicMetadataRow]: """Retrieve directory metadata per id. Args: ids (iterable): sha1 checksums Returns: ContentMetadataRow objects """ ... @remote_api_endpoint("directory_intrinsic_metadata/add") def directory_intrinsic_metadata_add( self, metadata: List[DirectoryIntrinsicMetadataRow], ) -> Dict[str, int]: """Add metadata not present in storage. Args: metadata: ContentMetadataRow objects Returns: Dict summary of number of rows added """ ... @remote_api_endpoint("origin_intrinsic_metadata") def origin_intrinsic_metadata_get( self, urls: Iterable[str] ) -> List[OriginIntrinsicMetadataRow]: """Retrieve origin metadata per id. Args: urls (iterable): origin URLs Returns: list of OriginIntrinsicMetadataRow """ ... @remote_api_endpoint("origin_intrinsic_metadata/add") def origin_intrinsic_metadata_add( self, metadata: List[OriginIntrinsicMetadataRow] ) -> Dict[str, int]: """Add origin metadata not present in storage. Args: metadata: list of OriginIntrinsicMetadataRow objects Returns: Dict summary of number of rows added """ ... @remote_api_endpoint("origin_intrinsic_metadata/search/fulltext") def origin_intrinsic_metadata_search_fulltext( self, conjunction: List[str], limit: int = 100 ) -> List[OriginIntrinsicMetadataRow]: """Returns the list of origins whose metadata contain all the terms. Args: conjunction: List of terms to be searched for. limit: The maximum number of results to return Returns: list of OriginIntrinsicMetadataRow """ ... @remote_api_endpoint("origin_intrinsic_metadata/search/by_producer") def origin_intrinsic_metadata_search_by_producer( self, page_token: str = "", limit: int = 100, ids_only: bool = False, mappings: Optional[List[str]] = None, tool_ids: Optional[List[int]] = None, ) -> PagedResult[Union[str, OriginIntrinsicMetadataRow]]: """Returns the list of origins whose metadata contain all the terms. Args: page_token (str): Opaque token used for pagination. limit (int): The maximum number of results to return ids_only (bool): Determines whether only origin urls are returned or the content as well mappings (List[str]): Returns origins whose intrinsic metadata were generated using at least one of these mappings. Returns: OriginIntrinsicMetadataRow objects """ ... @remote_api_endpoint("origin_intrinsic_metadata/stats") def origin_intrinsic_metadata_stats(self): """Returns counts of indexed metadata per origins, broken down into metadata types. Returns: dict: dictionary with keys: - total (int): total number of origins that were indexed (possibly yielding an empty metadata dictionary) - non_empty (int): total number of origins that we extracted a non-empty metadata dictionary from - per_mapping (dict): a dictionary with mapping names as keys and number of origins whose indexing used this mapping. Note that indexing a given origin may use 0, 1, or many mappings. """ ... + @remote_api_endpoint("origin_extrinsic_metadata") + def origin_extrinsic_metadata_get( + self, urls: Iterable[str] + ) -> List[OriginExtrinsicMetadataRow]: + """Retrieve origin metadata per id. + + Args: + urls (iterable): origin URLs + + Returns: list of OriginExtrinsicMetadataRow + """ + ... + + @remote_api_endpoint("origin_extrinsic_metadata/add") + def origin_extrinsic_metadata_add( + self, metadata: List[OriginExtrinsicMetadataRow] + ) -> Dict[str, int]: + """Add origin metadata not present in storage. + + Args: + metadata: list of OriginExtrinsicMetadataRow objects + + Returns: + Dict summary of number of rows added + + """ + ... + @remote_api_endpoint("indexer_configuration/add") def indexer_configuration_add(self, tools): """Add new tools to the storage. Args: tools ([dict]): List of dictionary representing tool to insert in the db. Dictionary with the following keys: - **tool_name** (str): tool's name - **tool_version** (str): tool's version - **tool_configuration** (dict): tool's configuration (free form dict) Returns: List of dict inserted in the db (holding the id key as well). The order of the list is not guaranteed to match the order of the initial list. """ ... @remote_api_endpoint("indexer_configuration/data") def indexer_configuration_get(self, tool): """Retrieve tool information. Args: tool (dict): Dictionary representing a tool with the following keys: - **tool_name** (str): tool's name - **tool_version** (str): tool's version - **tool_configuration** (dict): tool's configuration (free form dict) Returns: The same dictionary with an `id` key, None otherwise. """ ... diff --git a/swh/indexer/storage/model.py b/swh/indexer/storage/model.py index 0642c32..df8a897 100644 --- a/swh/indexer/storage/model.py +++ b/swh/indexer/storage/model.py @@ -1,138 +1,150 @@ # Copyright (C) 2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information """Classes used internally by the in-memory idx-storage, and will be used for the interface of the idx-storage in the near future.""" from __future__ import annotations from typing import Any, Dict, List, Optional, Tuple, Type, TypeVar import attr from typing_extensions import Final from swh.model.model import Sha1Git, dictify TSelf = TypeVar("TSelf") @attr.s class BaseRow: UNIQUE_KEY_FIELDS: Tuple = ("id", "indexer_configuration_id") id = attr.ib(type=Any) indexer_configuration_id = attr.ib(type=Optional[int], default=None, kw_only=True) tool = attr.ib(type=Optional[Dict], default=None, kw_only=True) def __attrs_post_init__(self): if self.indexer_configuration_id is None and self.tool is None: raise TypeError("Either indexer_configuration_id or tool must be not None.") if self.indexer_configuration_id is not None and self.tool is not None: raise TypeError( "indexer_configuration_id and tool are mutually exclusive; " "only one may be not None." ) def anonymize(self: TSelf) -> Optional[TSelf]: # Needed to implement swh.journal.writer.ValueProtocol return None def to_dict(self) -> Dict[str, Any]: """Wrapper of `attr.asdict` that can be overridden by subclasses that have special handling of some of the fields.""" d = dictify(attr.asdict(self, recurse=False)) if d["indexer_configuration_id"] is None: del d["indexer_configuration_id"] if d["tool"] is None: del d["tool"] return d @classmethod def from_dict(cls: Type[TSelf], d) -> TSelf: return cls(**d) def unique_key(self) -> Dict: obj = self # tool["id"] and obj.indexer_configuration_id are the same value, but # only one of them is set for any given object if obj.indexer_configuration_id is None: assert obj.tool # constructors ensures tool XOR indexer_configuration_id obj = attr.evolve(obj, indexer_configuration_id=obj.tool["id"], tool=None) return {key: getattr(obj, key) for key in self.UNIQUE_KEY_FIELDS} @attr.s class ContentMimetypeRow(BaseRow): object_type: Final = "content_mimetype" id = attr.ib(type=Sha1Git) mimetype = attr.ib(type=str) encoding = attr.ib(type=str) @attr.s class ContentLanguageRow(BaseRow): object_type: Final = "content_language" id = attr.ib(type=Sha1Git) lang = attr.ib(type=str) @attr.s class ContentCtagsRow(BaseRow): object_type: Final = "content_ctags" UNIQUE_KEY_FIELDS = ( "id", "indexer_configuration_id", "name", "kind", "line", "lang", ) id = attr.ib(type=Sha1Git) name = attr.ib(type=str) kind = attr.ib(type=str) line = attr.ib(type=int) lang = attr.ib(type=str) @attr.s class ContentLicenseRow(BaseRow): object_type: Final = "content_fossology_license" UNIQUE_KEY_FIELDS = ("id", "indexer_configuration_id", "license") id = attr.ib(type=Sha1Git) license = attr.ib(type=str) @attr.s class ContentMetadataRow(BaseRow): object_type: Final = "content_metadata" id = attr.ib(type=Sha1Git) metadata = attr.ib(type=Dict[str, Any]) @attr.s class DirectoryIntrinsicMetadataRow(BaseRow): object_type: Final = "directory_intrinsic_metadata" id = attr.ib(type=Sha1Git) metadata = attr.ib(type=Dict[str, Any]) mappings = attr.ib(type=List[str]) @attr.s class OriginIntrinsicMetadataRow(BaseRow): object_type: Final = "origin_intrinsic_metadata" id = attr.ib(type=str) metadata = attr.ib(type=Dict[str, Any]) from_directory = attr.ib(type=Sha1Git) mappings = attr.ib(type=List[str]) + + +@attr.s +class OriginExtrinsicMetadataRow(BaseRow): + object_type: Final = "origin_extrinsic_metadata" + + id = attr.ib(type=str) + """origin URL""" + metadata = attr.ib(type=Dict[str, Any]) + from_remd_id = attr.ib(type=Sha1Git) + """id of the RawExtrinsicMetadata object used as source for indexed metadata""" + mappings = attr.ib(type=List[str]) diff --git a/swh/indexer/tests/conftest.py b/swh/indexer/tests/conftest.py index b9211ab..bcf0af1 100644 --- a/swh/indexer/tests/conftest.py +++ b/swh/indexer/tests/conftest.py @@ -1,132 +1,130 @@ # Copyright (C) 2019-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from datetime import timedelta from functools import partial import os from typing import List, Tuple from unittest.mock import patch import pytest from pytest_postgresql import factories import yaml from swh.core.db.pytest_plugin import initialize_database_for_module from swh.indexer.storage import IndexerStorage, get_indexer_storage from swh.objstorage.factory import get_objstorage from swh.storage import get_storage from .utils import fill_obj_storage, fill_storage TASK_NAMES: List[Tuple[str, str]] = [ # (scheduler-task-type, task-class-test-name) ("index-directory-metadata", "directory_intrinsic_metadata"), ("index-origin-metadata", "origin_intrinsic_metadata"), ] idx_postgresql_proc = factories.postgresql_proc( load=[ partial( initialize_database_for_module, modname="indexer", version=IndexerStorage.current_version, ) ], ) idx_storage_postgresql = factories.postgresql("idx_postgresql_proc") @pytest.fixture def indexer_scheduler(swh_scheduler): # Insert the expected task types within the scheduler for task_name, task_class_name in TASK_NAMES: swh_scheduler.create_task_type( { "type": task_name, "description": f"The {task_class_name} indexer testing task", "backend_name": f"swh.indexer.tests.tasks.{task_class_name}", "default_interval": timedelta(days=1), "min_interval": timedelta(hours=6), "max_interval": timedelta(days=12), "num_retries": 3, } ) return swh_scheduler @pytest.fixture def idx_storage_backend_config(idx_storage_postgresql): """Basic pg storage configuration with no journal collaborator for the indexer storage (to avoid pulling optional dependency on clients of this fixture) """ return { "cls": "local", "db": idx_storage_postgresql.dsn, } @pytest.fixture def swh_indexer_config( swh_storage_backend_config, idx_storage_backend_config, swh_scheduler_config ): return { "storage": swh_storage_backend_config, "objstorage": {"cls": "memory"}, "indexer_storage": idx_storage_backend_config, "scheduler": {"cls": "local", **swh_scheduler_config}, "tools": { "name": "file", "version": "1:5.30-1+deb9u1", "configuration": {"type": "library", "debian-package": "python3-magic"}, }, "compute_checksums": ["blake2b512"], # for rehash indexer } @pytest.fixture def idx_storage(swh_indexer_config): """An instance of in-memory indexer storage that gets injected into all indexers classes. """ idx_storage_config = swh_indexer_config["indexer_storage"] return get_indexer_storage(**idx_storage_config) @pytest.fixture def storage(swh_indexer_config): """An instance of in-memory storage that gets injected into all indexers classes. """ storage = get_storage(**swh_indexer_config["storage"]) fill_storage(storage) return storage @pytest.fixture def obj_storage(swh_indexer_config): """An instance of in-memory objstorage that gets injected into all indexers classes. """ objstorage = get_objstorage(**swh_indexer_config["objstorage"]) fill_obj_storage(objstorage) - with patch.dict( - "swh.objstorage.factory._STORAGE_CLASSES", {"memory": lambda: objstorage} - ): + with patch("swh.indexer.indexer.get_objstorage", return_value=objstorage): yield objstorage @pytest.fixture def swh_config(swh_indexer_config, monkeypatch, tmp_path): conffile = os.path.join(str(tmp_path), "indexer.yml") with open(conffile, "w") as f: f.write(yaml.dump(swh_indexer_config)) monkeypatch.setenv("SWH_CONFIG_FILENAME", conffile) return conffile diff --git a/swh/indexer/tests/metadata_dictionary/__init__.py b/swh/indexer/tests/metadata_dictionary/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/swh/indexer/tests/metadata_dictionary/test_cff.py b/swh/indexer/tests/metadata_dictionary/test_cff.py new file mode 100644 index 0000000..f91a689 --- /dev/null +++ b/swh/indexer/tests/metadata_dictionary/test_cff.py @@ -0,0 +1,220 @@ +# Copyright (C) 2017-2022 The Software Heritage developers +# See the AUTHORS file at the top-level directory of this distribution +# License: GNU General Public License version 3, or any later version +# See top-level LICENSE file for more information + +from swh.indexer.metadata_dictionary import MAPPINGS + + +def test_compute_metadata_cff(): + """ + testing CITATION.cff translation + """ + content = """# YAML 1.2 +--- +abstract: "Command line program to convert from Citation File \ +Format to various other formats such as BibTeX, EndNote, RIS, \ +schema.org, CodeMeta, and .zenodo.json." +authors: + - + affiliation: "Netherlands eScience Center" + family-names: Klaver + given-names: Tom + - + affiliation: "Humboldt-Universität zu Berlin" + family-names: Druskat + given-names: Stephan + orcid: https://orcid.org/0000-0003-4925-7248 +cff-version: "1.0.3" +date-released: 2019-11-12 +doi: 10.5281/zenodo.1162057 +keywords: + - "citation" + - "bibliography" + - "cff" + - "CITATION.cff" +license: Apache-2.0 +message: "If you use this software, please cite it using these metadata." +license: Apache-2.0 +message: "If you use this software, please cite it using these metadata." +repository-code: "https://github.com/citation-file-format/cff-converter-python" +title: cffconvert +version: "1.4.0-alpha0" + """.encode( + "utf-8" + ) + + expected = { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "type": "SoftwareSourceCode", + "author": [ + { + "type": "Person", + "affiliation": { + "type": "Organization", + "name": "Netherlands eScience Center", + }, + "familyName": "Klaver", + "givenName": "Tom", + }, + { + "id": "https://orcid.org/0000-0003-4925-7248", + "type": "Person", + "affiliation": { + "type": "Organization", + "name": "Humboldt-Universität zu Berlin", + }, + "familyName": "Druskat", + "givenName": "Stephan", + }, + ], + "codeRepository": ( + "https://github.com/citation-file-format/cff-converter-python" + ), + "datePublished": "2019-11-12", + "description": """Command line program to convert from \ +Citation File Format to various other formats such as BibTeX, EndNote, \ +RIS, schema.org, CodeMeta, and .zenodo.json.""", + "identifier": "https://doi.org/10.5281/zenodo.1162057", + "keywords": ["citation", "bibliography", "cff", "CITATION.cff"], + "license": "https://spdx.org/licenses/Apache-2.0", + "version": "1.4.0-alpha0", + } + + result = MAPPINGS["CffMapping"]().translate(content) + assert expected == result + + +def test_compute_metadata_cff_invalid_yaml(): + """ + test yaml translation for invalid yaml file + """ + content = """cff-version: 1.0.3 +message: To cite the SigMF specification, please include the following: +authors: + - name: The GNU Radio Foundation, Inc. + """.encode( + "utf-8" + ) + + expected = None + + result = MAPPINGS["CffMapping"]().translate(content) + assert expected == result + + +def test_compute_metadata_cff_empty(): + """ + test yaml translation for empty yaml file + """ + content = """ + """.encode( + "utf-8" + ) + + expected = None + + result = MAPPINGS["CffMapping"]().translate(content) + assert expected == result + + +def test_compute_metadata_cff_list(): + """ + test yaml translation for empty yaml file + """ + content = """ +- Foo +- Bar + """.encode( + "utf-8" + ) + + expected = None + + result = MAPPINGS["CffMapping"]().translate(content) + assert expected == result + + +def test_cff_empty_fields(): + """ + testing CITATION.cff translation + """ + content = """# YAML 1.2 + authors: + - + affiliation: "Hogwarts" + family-names: + given-names: Harry + - + affiliation: "Ministry of Magic" + family-names: Weasley + orcid: + given-names: Arthur + + + """.encode( + "utf-8" + ) + + expected = { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "type": "SoftwareSourceCode", + "author": [ + { + "type": "Person", + "affiliation": { + "type": "Organization", + "name": "Hogwarts", + }, + "givenName": "Harry", + }, + { + "type": "Person", + "affiliation": { + "type": "Organization", + "name": "Ministry of Magic", + }, + "familyName": "Weasley", + "givenName": "Arthur", + }, + ], + } + + result = MAPPINGS["CffMapping"]().translate(content) + assert expected == result + + +def test_cff_invalid_fields(): + """ + testing CITATION.cff translation + """ + content = """# YAML 1.2 + authors: + - + affiliation: "Hogwarts" + family-names: + - Potter + - James + given-names: Harry + + """.encode( + "utf-8" + ) + + expected = { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "type": "SoftwareSourceCode", + "author": [ + { + "type": "Person", + "affiliation": { + "type": "Organization", + "name": "Hogwarts", + }, + "givenName": "Harry", + }, + ], + } + + result = MAPPINGS["CffMapping"]().translate(content) + assert expected == result diff --git a/swh/indexer/tests/metadata_dictionary/test_codemeta.py b/swh/indexer/tests/metadata_dictionary/test_codemeta.py new file mode 100644 index 0000000..383b4a7 --- /dev/null +++ b/swh/indexer/tests/metadata_dictionary/test_codemeta.py @@ -0,0 +1,175 @@ +# Copyright (C) 2017-2022 The Software Heritage developers +# See the AUTHORS file at the top-level directory of this distribution +# License: GNU General Public License version 3, or any later version +# See top-level LICENSE file for more information + +import json + +from hypothesis import HealthCheck, given, settings + +from swh.indexer.codemeta import CODEMETA_TERMS +from swh.indexer.metadata_detector import detect_metadata +from swh.indexer.metadata_dictionary import MAPPINGS + +from ..utils import json_document_strategy + + +def test_compute_metadata_valid_codemeta(): + raw_content = b"""{ + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "@type": "SoftwareSourceCode", + "identifier": "CodeMeta", + "description": "CodeMeta is a concept vocabulary that can be used to standardize the exchange of software metadata across repositories and organizations.", + "name": "CodeMeta: Minimal metadata schemas for science software and code, in JSON-LD", + "codeRepository": "https://github.com/codemeta/codemeta", + "issueTracker": "https://github.com/codemeta/codemeta/issues", + "license": "https://spdx.org/licenses/Apache-2.0", + "version": "2.0", + "author": [ + { + "@type": "Person", + "givenName": "Carl", + "familyName": "Boettiger", + "email": "cboettig@gmail.com", + "@id": "http://orcid.org/0000-0002-1642-628X" + }, + { + "@type": "Person", + "givenName": "Matthew B.", + "familyName": "Jones", + "email": "jones@nceas.ucsb.edu", + "@id": "http://orcid.org/0000-0003-0077-4738" + } + ], + "maintainer": { + "@type": "Person", + "givenName": "Carl", + "familyName": "Boettiger", + "email": "cboettig@gmail.com", + "@id": "http://orcid.org/0000-0002-1642-628X" + }, + "contIntegration": "https://travis-ci.org/codemeta/codemeta", + "developmentStatus": "active", + "downloadUrl": "https://github.com/codemeta/codemeta/archive/2.0.zip", + "funder": { + "@id": "https://doi.org/10.13039/100000001", + "@type": "Organization", + "name": "National Science Foundation" + }, + "funding":"1549758; Codemeta: A Rosetta Stone for Metadata in Scientific Software", + "keywords": [ + "metadata", + "software" + ], + "version":"2.0", + "dateCreated":"2017-06-05", + "datePublished":"2017-06-05", + "programmingLanguage": "JSON-LD" + }""" # noqa + expected_result = { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "type": "SoftwareSourceCode", + "identifier": "CodeMeta", + "description": "CodeMeta is a concept vocabulary that can " + "be used to standardize the exchange of software metadata " + "across repositories and organizations.", + "name": "CodeMeta: Minimal metadata schemas for science " + "software and code, in JSON-LD", + "codeRepository": "https://github.com/codemeta/codemeta", + "issueTracker": "https://github.com/codemeta/codemeta/issues", + "license": "https://spdx.org/licenses/Apache-2.0", + "version": "2.0", + "author": [ + { + "type": "Person", + "givenName": "Carl", + "familyName": "Boettiger", + "email": "cboettig@gmail.com", + "id": "http://orcid.org/0000-0002-1642-628X", + }, + { + "type": "Person", + "givenName": "Matthew B.", + "familyName": "Jones", + "email": "jones@nceas.ucsb.edu", + "id": "http://orcid.org/0000-0003-0077-4738", + }, + ], + "maintainer": { + "type": "Person", + "givenName": "Carl", + "familyName": "Boettiger", + "email": "cboettig@gmail.com", + "id": "http://orcid.org/0000-0002-1642-628X", + }, + "contIntegration": "https://travis-ci.org/codemeta/codemeta", + "developmentStatus": "active", + "downloadUrl": "https://github.com/codemeta/codemeta/archive/2.0.zip", + "funder": { + "id": "https://doi.org/10.13039/100000001", + "type": "Organization", + "name": "National Science Foundation", + }, + "funding": "1549758; Codemeta: A Rosetta Stone for Metadata " + "in Scientific Software", + "keywords": ["metadata", "software"], + "version": "2.0", + "dateCreated": "2017-06-05", + "datePublished": "2017-06-05", + "programmingLanguage": "JSON-LD", + } + result = MAPPINGS["CodemetaMapping"]().translate(raw_content) + assert result == expected_result + + +def test_compute_metadata_codemeta_alternate_context(): + raw_content = b"""{ + "@context": "https://raw.githubusercontent.com/codemeta/codemeta/master/codemeta.jsonld", + "@type": "SoftwareSourceCode", + "identifier": "CodeMeta" + }""" # noqa + expected_result = { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "type": "SoftwareSourceCode", + "identifier": "CodeMeta", + } + result = MAPPINGS["CodemetaMapping"]().translate(raw_content) + assert result == expected_result + + +@settings(suppress_health_check=[HealthCheck.too_slow]) +@given(json_document_strategy(keys=CODEMETA_TERMS)) +def test_codemeta_adversarial(doc): + raw = json.dumps(doc).encode() + MAPPINGS["CodemetaMapping"]().translate(raw) + + +def test_detect_metadata_codemeta_json_uppercase(): + df = [ + { + "sha1_git": b"abc", + "name": b"index.html", + "target": b"abc", + "length": 897, + "status": "visible", + "type": "file", + "perms": 33188, + "dir_id": b"dir_a", + "sha1": b"bcd", + }, + { + "sha1_git": b"aab", + "name": b"CODEMETA.json", + "target": b"aab", + "length": 712, + "status": "visible", + "type": "file", + "perms": 33188, + "dir_id": b"dir_a", + "sha1": b"bcd", + }, + ] + results = detect_metadata(df) + + expected_results = {"CodemetaMapping": [b"bcd"]} + assert expected_results == results diff --git a/swh/indexer/tests/metadata_dictionary/test_composer.py b/swh/indexer/tests/metadata_dictionary/test_composer.py new file mode 100644 index 0000000..9513938 --- /dev/null +++ b/swh/indexer/tests/metadata_dictionary/test_composer.py @@ -0,0 +1,84 @@ +# Copyright (C) 2017-2022 The Software Heritage developers +# See the AUTHORS file at the top-level directory of this distribution +# License: GNU General Public License version 3, or any later version +# See top-level LICENSE file for more information + +from swh.indexer.metadata_dictionary import MAPPINGS + + +def test_compute_metadata_composer(): + raw_content = """{ +"name": "symfony/polyfill-mbstring", +"type": "library", +"description": "Symfony polyfill for the Mbstring extension", +"keywords": [ + "polyfill", + "shim", + "compatibility", + "portable" +], +"homepage": "https://symfony.com", +"license": "MIT", +"authors": [ + { + "name": "Nicolas Grekas", + "email": "p@tchwork.com" + }, + { + "name": "Symfony Community", + "homepage": "https://symfony.com/contributors" + } +], +"require": { + "php": ">=7.1" +}, +"provide": { + "ext-mbstring": "*" +}, +"autoload": { + "files": [ + "bootstrap.php" + ] +}, +"suggest": { + "ext-mbstring": "For best performance" +}, +"minimum-stability": "dev", +"extra": { + "branch-alias": { + "dev-main": "1.26-dev" + }, + "thanks": { + "name": "symfony/polyfill", + "url": "https://github.com/symfony/polyfill" + } +} +} + """.encode( + "utf-8" + ) + + result = MAPPINGS["ComposerMapping"]().translate(raw_content) + + expected = { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "type": "SoftwareSourceCode", + "name": "symfony/polyfill-mbstring", + "keywords": ["polyfill", "shim", "compatibility", "portable"], + "description": "Symfony polyfill for the Mbstring extension", + "url": "https://symfony.com", + "license": "https://spdx.org/licenses/MIT", + "author": [ + { + "type": "Person", + "name": "Nicolas Grekas", + "email": "p@tchwork.com", + }, + { + "type": "Person", + "name": "Symfony Community", + }, + ], + } + + assert result == expected diff --git a/swh/indexer/tests/metadata_dictionary/test_dart.py b/swh/indexer/tests/metadata_dictionary/test_dart.py new file mode 100644 index 0000000..146f7c7 --- /dev/null +++ b/swh/indexer/tests/metadata_dictionary/test_dart.py @@ -0,0 +1,157 @@ +# Copyright (C) 2022 The Software Heritage developers +# See the AUTHORS file at the top-level directory of this distribution +# License: GNU General Public License version 3, or any later version +# See top-level LICENSE file for more information + +from swh.indexer.metadata_dictionary import MAPPINGS + + +def test_compute_metadata_pubspec(): + raw_content = """ +--- +name: newtify +description: >- + Have you been turned into a newt? Would you like to be? + This package can help. It has all of the + newt-transmogrification functionality you have been looking + for. +keywords: + - polyfill + - shim + - compatibility + - portable + - mbstring +version: 1.2.3 +license: MIT +homepage: https://example-pet-store.com/newtify +documentation: https://example-pet-store.com/newtify/docs + +environment: + sdk: '>=2.10.0 <3.0.0' + +dependencies: + efts: ^2.0.4 + transmogrify: ^0.4.0 + +dev_dependencies: + test: '>=1.15.0 <2.0.0' + """.encode( + "utf-8" + ) + + result = MAPPINGS["PubMapping"]().translate(raw_content) + + expected = { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "type": "SoftwareSourceCode", + "name": "newtify", + "keywords": [ + "polyfill", + "shim", + "compatibility", + "portable", + "mbstring", + ], + "description": """Have you been turned into a newt? Would you like to be? \ +This package can help. It has all of the \ +newt-transmogrification functionality you have been looking \ +for.""", + "url": "https://example-pet-store.com/newtify", + "license": "https://spdx.org/licenses/MIT", + } + + assert result == expected + + +def test_normalize_author_pubspec(): + raw_content = """ + author: Atlee Pine + """.encode( + "utf-8" + ) + + result = MAPPINGS["PubMapping"]().translate(raw_content) + + expected = { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "type": "SoftwareSourceCode", + "author": [ + {"type": "Person", "name": "Atlee Pine", "email": "atlee@example.org"}, + ], + } + + assert result == expected + + +def test_normalize_authors_pubspec(): + raw_content = """ + authors: + - Vicky Merzown + - Ron Bilius Weasley + """.encode( + "utf-8" + ) + + result = MAPPINGS["PubMapping"]().translate(raw_content) + + expected = { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "type": "SoftwareSourceCode", + "author": [ + {"type": "Person", "name": "Vicky Merzown", "email": "vmz@example.org"}, + { + "type": "Person", + "name": "Ron Bilius Weasley", + }, + ], + } + + assert result == expected + + +def test_normalize_author_authors_pubspec(): + raw_content = """ + authors: + - Vicky Merzown + - Ron Bilius Weasley + author: Hermione Granger + """.encode( + "utf-8" + ) + + result = MAPPINGS["PubMapping"]().translate(raw_content) + + expected = { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "type": "SoftwareSourceCode", + "author": [ + {"type": "Person", "name": "Vicky Merzown", "email": "vmz@example.org"}, + { + "type": "Person", + "name": "Ron Bilius Weasley", + }, + { + "type": "Person", + "name": "Hermione Granger", + }, + ], + } + + assert result == expected + + +def test_normalize_empty_authors(): + raw_content = """ + authors: + """.encode( + "utf-8" + ) + + result = MAPPINGS["PubMapping"]().translate(raw_content) + + expected = { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "type": "SoftwareSourceCode", + } + + assert result == expected diff --git a/swh/indexer/tests/metadata_dictionary/test_github.py b/swh/indexer/tests/metadata_dictionary/test_github.py new file mode 100644 index 0000000..290d91c --- /dev/null +++ b/swh/indexer/tests/metadata_dictionary/test_github.py @@ -0,0 +1,142 @@ +# Copyright (C) 2022 The Software Heritage developers +# See the AUTHORS file at the top-level directory of this distribution +# License: GNU General Public License version 3, or any later version +# See top-level LICENSE file for more information + +from swh.indexer.metadata_dictionary import MAPPINGS + +CONTEXT = [ + "https://doi.org/10.5063/schema/codemeta-2.0", + { + "as": "https://www.w3.org/ns/activitystreams#", + "forge": "https://forgefed.org/ns#", + }, +] + + +def test_compute_metadata_none(): + """ + testing content empty content is empty + should return None + """ + content = b"" + + # None if no metadata was found or an error occurred + declared_metadata = None + result = MAPPINGS["GitHubMapping"]().translate(content) + assert declared_metadata == result + + +def test_supported_terms(): + terms = MAPPINGS["GitHubMapping"].supported_terms() + assert { + "http://schema.org/name", + "http://schema.org/license", + "https://forgefed.org/ns#forks", + "https://www.w3.org/ns/activitystreams#totalItems", + } <= terms + + +def test_compute_metadata_github(): + """ + testing only computation of metadata with hard_mapping_npm + """ + content = b""" +{ + "id": 80521091, + "node_id": "MDEwOlJlcG9zaXRvcnk4MDUyMTA5MQ==", + "name": "swh-indexer", + "full_name": "SoftwareHeritage/swh-indexer", + "private": false, + "owner": { + "login": "SoftwareHeritage", + "id": 18555939, + "node_id": "MDEyOk9yZ2FuaXphdGlvbjE4NTU1OTM5", + "avatar_url": "https://avatars.githubusercontent.com/u/18555939?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/SoftwareHeritage", + "type": "Organization", + "site_admin": false + }, + "html_url": "https://github.com/SoftwareHeritage/swh-indexer", + "description": "GitHub mirror of Metadata indexer", + "fork": false, + "url": "https://api.github.com/repos/SoftwareHeritage/swh-indexer", + "created_at": "2017-01-31T13:05:39Z", + "updated_at": "2022-06-22T08:02:20Z", + "pushed_at": "2022-06-29T09:01:08Z", + "git_url": "git://github.com/SoftwareHeritage/swh-indexer.git", + "ssh_url": "git@github.com:SoftwareHeritage/swh-indexer.git", + "clone_url": "https://github.com/SoftwareHeritage/swh-indexer.git", + "svn_url": "https://github.com/SoftwareHeritage/swh-indexer", + "homepage": "https://forge.softwareheritage.org/source/swh-indexer/", + "size": 2713, + "stargazers_count": 13, + "watchers_count": 12, + "language": "Python", + "has_issues": false, + "has_projects": false, + "has_downloads": true, + "has_wiki": false, + "has_pages": false, + "forks_count": 1, + "mirror_url": null, + "archived": false, + "disabled": false, + "open_issues_count": 0, + "license": { + "key": "gpl-3.0", + "name": "GNU General Public License v3.0", + "spdx_id": "GPL-3.0", + "url": "https://api.github.com/licenses/gpl-3.0", + "node_id": "MDc6TGljZW5zZTk=" + }, + "allow_forking": true, + "is_template": false, + "web_commit_signoff_required": false, + "topics": [ + + ], + "visibility": "public", + "forks": 1, + "open_issues": 0, + "watchers": 13, + "default_branch": "master", + "temp_clone_token": null, + "organization": { + "login": "SoftwareHeritage", + "id": 18555939, + "node_id": "MDEyOk9yZ2FuaXphdGlvbjE4NTU1OTM5", + "avatar_url": "https://avatars.githubusercontent.com/u/18555939?v=4", + "gravatar_id": "", + "type": "Organization", + "site_admin": false + }, + "network_count": 1, + "subscribers_count": 6 +} + + """ + result = MAPPINGS["GitHubMapping"]().translate(content) + assert result == { + "@context": CONTEXT, + "type": "https://forgefed.org/ns#Repository", + "forge:forks": { + "as:totalItems": 1, + "type": "as:OrderedCollection", + }, + "as:likes": { + "as:totalItems": 13, + "type": "as:Collection", + }, + "as:followers": { + "as:totalItems": 12, + "type": "as:Collection", + }, + "license": "https://spdx.org/licenses/GPL-3.0", + "name": "SoftwareHeritage/swh-indexer", + "description": "GitHub mirror of Metadata indexer", + "schema:codeRepository": "https://github.com/SoftwareHeritage/swh-indexer", + "schema:dateCreated": "2017-01-31T13:05:39Z", + "schema:dateModified": "2022-06-22T08:02:20Z", + } diff --git a/swh/indexer/tests/metadata_dictionary/test_maven.py b/swh/indexer/tests/metadata_dictionary/test_maven.py new file mode 100644 index 0000000..ea51860 --- /dev/null +++ b/swh/indexer/tests/metadata_dictionary/test_maven.py @@ -0,0 +1,365 @@ +# Copyright (C) 2017-2022 The Software Heritage developers +# See the AUTHORS file at the top-level directory of this distribution +# License: GNU General Public License version 3, or any later version +# See top-level LICENSE file for more information + +import logging + +from hypothesis import HealthCheck, given, settings + +from swh.indexer.metadata_dictionary import MAPPINGS + +from ..utils import xml_document_strategy + + +def test_compute_metadata_maven(): + raw_content = b""" + + Maven Default Project + 4.0.0 + com.mycompany.app + my-app + 1.2.3 + + + central + Maven Repository Switchboard + default + http://repo1.maven.org/maven2 + + false + + + + + + Apache License, Version 2.0 + https://www.apache.org/licenses/LICENSE-2.0.txt + repo + A business-friendly OSS license + + + """ + result = MAPPINGS["MavenMapping"]().translate(raw_content) + assert result == { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "type": "SoftwareSourceCode", + "name": "Maven Default Project", + "identifier": "com.mycompany.app", + "version": "1.2.3", + "license": "https://www.apache.org/licenses/LICENSE-2.0.txt", + "codeRepository": ("http://repo1.maven.org/maven2/com/mycompany/app/my-app"), + } + + +def test_compute_metadata_maven_empty(): + raw_content = b""" + + """ + result = MAPPINGS["MavenMapping"]().translate(raw_content) + assert result == { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "type": "SoftwareSourceCode", + } + + +def test_compute_metadata_maven_almost_empty(): + raw_content = b""" + + + """ + result = MAPPINGS["MavenMapping"]().translate(raw_content) + assert result == { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "type": "SoftwareSourceCode", + } + + +def test_compute_metadata_maven_invalid_xml(caplog): + expected_warning = ( + "swh.indexer.metadata_dictionary.maven.MavenMapping", + logging.WARNING, + "Error parsing XML from foo", + ) + caplog.at_level(logging.WARNING, logger="swh.indexer.metadata_dictionary") + + raw_content = b""" + """ + caplog.clear() + result = MAPPINGS["MavenMapping"]("foo").translate(raw_content) + assert caplog.record_tuples == [expected_warning], result + assert result is None + + raw_content = b""" + """ + caplog.clear() + result = MAPPINGS["MavenMapping"]("foo").translate(raw_content) + assert caplog.record_tuples == [expected_warning], result + assert result is None + + +def test_compute_metadata_maven_unknown_encoding(caplog): + expected_warning = ( + "swh.indexer.metadata_dictionary.maven.MavenMapping", + logging.WARNING, + "Error detecting XML encoding from foo", + ) + caplog.at_level(logging.WARNING, logger="swh.indexer.metadata_dictionary") + + raw_content = b""" + + """ + caplog.clear() + result = MAPPINGS["MavenMapping"]("foo").translate(raw_content) + assert caplog.record_tuples == [expected_warning], result + assert result is None + + raw_content = b""" + + """ + caplog.clear() + result = MAPPINGS["MavenMapping"]("foo").translate(raw_content) + assert caplog.record_tuples == [expected_warning], result + assert result is None + + +def test_compute_metadata_maven_invalid_encoding(caplog): + expected_warning = [ + # libexpat1 <= 2.2.10-2+deb11u1 + [ + ( + "swh.indexer.metadata_dictionary.maven.MavenMapping", + logging.WARNING, + "Error unidecoding XML from foo", + ) + ], + # libexpat1 >= 2.2.10-2+deb11u2 + [ + ( + "swh.indexer.metadata_dictionary.maven.MavenMapping", + logging.WARNING, + "Error parsing XML from foo", + ) + ], + ] + caplog.at_level(logging.WARNING, logger="swh.indexer.metadata_dictionary") + + raw_content = b""" + + """ + caplog.clear() + result = MAPPINGS["MavenMapping"]("foo").translate(raw_content) + assert caplog.record_tuples in expected_warning, result + assert result is None + + +def test_compute_metadata_maven_minimal(): + raw_content = b""" + + Maven Default Project + 4.0.0 + com.mycompany.app + my-app + 1.2.3 + """ + result = MAPPINGS["MavenMapping"]().translate(raw_content) + assert result == { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "type": "SoftwareSourceCode", + "name": "Maven Default Project", + "identifier": "com.mycompany.app", + "version": "1.2.3", + "codeRepository": ( + "https://repo.maven.apache.org/maven2/com/mycompany/app/my-app" + ), + } + + +def test_compute_metadata_maven_empty_nodes(): + raw_content = b""" + + Maven Default Project + 4.0.0 + com.mycompany.app + my-app + 1.2.3 + + + """ + result = MAPPINGS["MavenMapping"]().translate(raw_content) + assert result == { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "type": "SoftwareSourceCode", + "name": "Maven Default Project", + "identifier": "com.mycompany.app", + "version": "1.2.3", + "codeRepository": ( + "https://repo.maven.apache.org/maven2/com/mycompany/app/my-app" + ), + } + + raw_content = b""" + + Maven Default Project + 4.0.0 + com.mycompany.app + my-app + + """ + result = MAPPINGS["MavenMapping"]().translate(raw_content) + assert result == { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "type": "SoftwareSourceCode", + "name": "Maven Default Project", + "identifier": "com.mycompany.app", + "codeRepository": ( + "https://repo.maven.apache.org/maven2/com/mycompany/app/my-app" + ), + } + + raw_content = b""" + + + 4.0.0 + com.mycompany.app + my-app + 1.2.3 + """ + result = MAPPINGS["MavenMapping"]().translate(raw_content) + assert result == { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "type": "SoftwareSourceCode", + "identifier": "com.mycompany.app", + "version": "1.2.3", + "codeRepository": ( + "https://repo.maven.apache.org/maven2/com/mycompany/app/my-app" + ), + } + + raw_content = b""" + + Maven Default Project + 4.0.0 + com.mycompany.app + my-app + 1.2.3 + + + """ + result = MAPPINGS["MavenMapping"]().translate(raw_content) + assert result == { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "type": "SoftwareSourceCode", + "name": "Maven Default Project", + "identifier": "com.mycompany.app", + "version": "1.2.3", + "codeRepository": ( + "https://repo.maven.apache.org/maven2/com/mycompany/app/my-app" + ), + } + + raw_content = b""" + + + 1.2.3 + """ + result = MAPPINGS["MavenMapping"]().translate(raw_content) + assert result == { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "type": "SoftwareSourceCode", + "version": "1.2.3", + } + + +def test_compute_metadata_maven_invalid_licenses(): + raw_content = b""" + + Maven Default Project + 4.0.0 + com.mycompany.app + my-app + 1.2.3 + + foo + + """ + result = MAPPINGS["MavenMapping"]().translate(raw_content) + assert result == { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "type": "SoftwareSourceCode", + "name": "Maven Default Project", + "identifier": "com.mycompany.app", + "version": "1.2.3", + "codeRepository": ( + "https://repo.maven.apache.org/maven2/com/mycompany/app/my-app" + ), + } + + +def test_compute_metadata_maven_multiple(): + """Tests when there are multiple code repos and licenses.""" + raw_content = b""" + + Maven Default Project + 4.0.0 + com.mycompany.app + my-app + 1.2.3 + + + central + Maven Repository Switchboard + default + http://repo1.maven.org/maven2 + + false + + + + example + Example Maven Repo + default + http://example.org/maven2 + + + + + Apache License, Version 2.0 + https://www.apache.org/licenses/LICENSE-2.0.txt + repo + A business-friendly OSS license + + + MIT license + https://opensource.org/licenses/MIT + + + """ + result = MAPPINGS["MavenMapping"]().translate(raw_content) + assert result == { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "type": "SoftwareSourceCode", + "name": "Maven Default Project", + "identifier": "com.mycompany.app", + "version": "1.2.3", + "license": [ + "https://www.apache.org/licenses/LICENSE-2.0.txt", + "https://opensource.org/licenses/MIT", + ], + "codeRepository": [ + "http://repo1.maven.org/maven2/com/mycompany/app/my-app", + "http://example.org/maven2/com/mycompany/app/my-app", + ], + } + + +@settings(suppress_health_check=[HealthCheck.too_slow]) +@given( + xml_document_strategy( + keys=list(MAPPINGS["MavenMapping"].mapping), # type: ignore + root="project", + xmlns="http://maven.apache.org/POM/4.0.0", + ) +) +def test_maven_adversarial(doc): + MAPPINGS["MavenMapping"]().translate(doc) diff --git a/swh/indexer/tests/metadata_dictionary/test_npm.py b/swh/indexer/tests/metadata_dictionary/test_npm.py new file mode 100644 index 0000000..2f7d7cf --- /dev/null +++ b/swh/indexer/tests/metadata_dictionary/test_npm.py @@ -0,0 +1,322 @@ +# Copyright (C) 2017-2022 The Software Heritage developers +# See the AUTHORS file at the top-level directory of this distribution +# License: GNU General Public License version 3, or any later version +# See top-level LICENSE file for more information + +import json + +from hypothesis import HealthCheck, given, settings +import pytest + +from swh.indexer.metadata_detector import detect_metadata +from swh.indexer.metadata_dictionary import MAPPINGS +from swh.indexer.storage.model import ContentMetadataRow +from swh.model.hashutil import hash_to_bytes + +from ..test_metadata import TRANSLATOR_TOOL, ContentMetadataTestIndexer +from ..utils import ( + BASE_TEST_CONFIG, + fill_obj_storage, + fill_storage, + json_document_strategy, +) + + +def test_compute_metadata_none(): + """ + testing content empty content is empty + should return None + """ + content = b"" + + # None if no metadata was found or an error occurred + declared_metadata = None + result = MAPPINGS["NpmMapping"]().translate(content) + assert declared_metadata == result + + +def test_compute_metadata_npm(): + """ + testing only computation of metadata with hard_mapping_npm + """ + content = b""" + { + "name": "test_metadata", + "version": "0.0.2", + "description": "Simple package.json test for indexer", + "repository": { + "type": "git", + "url": "https://github.com/moranegg/metadata_test" + }, + "author": { + "email": "moranegg@example.com", + "name": "Morane G" + } + } + """ + declared_metadata = { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "type": "SoftwareSourceCode", + "name": "test_metadata", + "version": "0.0.2", + "description": "Simple package.json test for indexer", + "codeRepository": "git+https://github.com/moranegg/metadata_test", + "author": [ + { + "type": "Person", + "name": "Morane G", + "email": "moranegg@example.com", + } + ], + } + + result = MAPPINGS["NpmMapping"]().translate(content) + assert declared_metadata == result + + +def test_compute_metadata_invalid_description_npm(): + """ + testing only computation of metadata with hard_mapping_npm + """ + content = b""" + { + "name": "test_metadata", + "version": "0.0.2", + "description": 1234 + } + """ + declared_metadata = { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "type": "SoftwareSourceCode", + "name": "test_metadata", + "version": "0.0.2", + } + + result = MAPPINGS["NpmMapping"]().translate(content) + assert declared_metadata == result + + +def test_index_content_metadata_npm(): + """ + testing NPM with package.json + - one sha1 uses a file that can't be translated to metadata and + should return None in the translated metadata + """ + sha1s = [ + hash_to_bytes("26a9f72a7c87cc9205725cfd879f514ff4f3d8d5"), + hash_to_bytes("d4c647f0fc257591cc9ba1722484229780d1c607"), + hash_to_bytes("02fb2c89e14f7fab46701478c83779c7beb7b069"), + ] + # this metadata indexer computes only metadata for package.json + # in npm context with a hard mapping + config = BASE_TEST_CONFIG.copy() + config["tools"] = [TRANSLATOR_TOOL] + metadata_indexer = ContentMetadataTestIndexer(config=config) + fill_obj_storage(metadata_indexer.objstorage) + fill_storage(metadata_indexer.storage) + + metadata_indexer.run(sha1s) + results = list(metadata_indexer.idx_storage.content_metadata_get(sha1s)) + + expected_results = [ + ContentMetadataRow( + id=hash_to_bytes("26a9f72a7c87cc9205725cfd879f514ff4f3d8d5"), + tool=TRANSLATOR_TOOL, + metadata={ + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "type": "SoftwareSourceCode", + "codeRepository": "git+https://github.com/moranegg/metadata_test", + "description": "Simple package.json test for indexer", + "name": "test_metadata", + "version": "0.0.1", + }, + ), + ContentMetadataRow( + id=hash_to_bytes("d4c647f0fc257591cc9ba1722484229780d1c607"), + tool=TRANSLATOR_TOOL, + metadata={ + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "type": "SoftwareSourceCode", + "issueTracker": "https://github.com/npm/npm/issues", + "author": [ + { + "type": "Person", + "name": "Isaac Z. Schlueter", + "email": "i@izs.me", + "url": "http://blog.izs.me", + } + ], + "codeRepository": "git+https://github.com/npm/npm", + "description": "a package manager for JavaScript", + "license": "https://spdx.org/licenses/Artistic-2.0", + "version": "5.0.3", + "name": "npm", + "keywords": [ + "install", + "modules", + "package manager", + "package.json", + ], + "url": "https://docs.npmjs.com/", + }, + ), + ] + + for result in results: + del result.tool["id"] + + # The assertion below returns False sometimes because of nested lists + assert expected_results == results + + +def test_npm_bugs_normalization(): + # valid dictionary + package_json = b"""{ + "name": "foo", + "bugs": { + "url": "https://github.com/owner/project/issues", + "email": "foo@example.com" + } + }""" + result = MAPPINGS["NpmMapping"]().translate(package_json) + assert result == { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "name": "foo", + "issueTracker": "https://github.com/owner/project/issues", + "type": "SoftwareSourceCode", + } + + # "invalid" dictionary + package_json = b"""{ + "name": "foo", + "bugs": { + "email": "foo@example.com" + } + }""" + result = MAPPINGS["NpmMapping"]().translate(package_json) + assert result == { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "name": "foo", + "type": "SoftwareSourceCode", + } + + # string + package_json = b"""{ + "name": "foo", + "bugs": "https://github.com/owner/project/issues" + }""" + result = MAPPINGS["NpmMapping"]().translate(package_json) + assert result == { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "name": "foo", + "issueTracker": "https://github.com/owner/project/issues", + "type": "SoftwareSourceCode", + } + + +def test_npm_repository_normalization(): + # normal + package_json = b"""{ + "name": "foo", + "repository": { + "type" : "git", + "url" : "https://github.com/npm/cli.git" + } + }""" + result = MAPPINGS["NpmMapping"]().translate(package_json) + assert result == { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "name": "foo", + "codeRepository": "git+https://github.com/npm/cli.git", + "type": "SoftwareSourceCode", + } + + # missing url + package_json = b"""{ + "name": "foo", + "repository": { + "type" : "git" + } + }""" + result = MAPPINGS["NpmMapping"]().translate(package_json) + assert result == { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "name": "foo", + "type": "SoftwareSourceCode", + } + + # github shortcut + package_json = b"""{ + "name": "foo", + "repository": "github:npm/cli" + }""" + result = MAPPINGS["NpmMapping"]().translate(package_json) + expected_result = { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "name": "foo", + "codeRepository": "git+https://github.com/npm/cli.git", + "type": "SoftwareSourceCode", + } + assert result == expected_result + + # github shortshortcut + package_json = b"""{ + "name": "foo", + "repository": "npm/cli" + }""" + result = MAPPINGS["NpmMapping"]().translate(package_json) + assert result == expected_result + + # gitlab shortcut + package_json = b"""{ + "name": "foo", + "repository": "gitlab:user/repo" + }""" + result = MAPPINGS["NpmMapping"]().translate(package_json) + assert result == { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "name": "foo", + "codeRepository": "git+https://gitlab.com/user/repo.git", + "type": "SoftwareSourceCode", + } + + +@settings(suppress_health_check=[HealthCheck.too_slow]) +@given(json_document_strategy(keys=list(MAPPINGS["NpmMapping"].mapping))) # type: ignore +def test_npm_adversarial(doc): + raw = json.dumps(doc).encode() + MAPPINGS["NpmMapping"]().translate(raw) + + +@pytest.mark.parametrize( + "filename", [b"package.json", b"Package.json", b"PACKAGE.json", b"PACKAGE.JSON"] +) +def test_detect_metadata_package_json(filename): + df = [ + { + "sha1_git": b"abc", + "name": b"index.js", + "target": b"abc", + "length": 897, + "status": "visible", + "type": "file", + "perms": 33188, + "dir_id": b"dir_a", + "sha1": b"bcd", + }, + { + "sha1_git": b"aab", + "name": filename, + "target": b"aab", + "length": 712, + "status": "visible", + "type": "file", + "perms": 33188, + "dir_id": b"dir_a", + "sha1": b"cde", + }, + ] + results = detect_metadata(df) + + expected_results = {"NpmMapping": [b"cde"]} + assert expected_results == results diff --git a/swh/indexer/tests/metadata_dictionary/test_python.py b/swh/indexer/tests/metadata_dictionary/test_python.py new file mode 100644 index 0000000..106a9ca --- /dev/null +++ b/swh/indexer/tests/metadata_dictionary/test_python.py @@ -0,0 +1,114 @@ +# Copyright (C) 2017-2022 The Software Heritage developers +# See the AUTHORS file at the top-level directory of this distribution +# License: GNU General Public License version 3, or any later version +# See top-level LICENSE file for more information + +from swh.indexer.metadata_dictionary import MAPPINGS + + +def test_compute_metadata_pkginfo(): + raw_content = b"""\ +Metadata-Version: 2.1 +Name: swh.core +Version: 0.0.49 +Summary: Software Heritage core utilities +Home-page: https://forge.softwareheritage.org/diffusion/DCORE/ +Author: Software Heritage developers +Author-email: swh-devel@inria.fr +License: UNKNOWN +Project-URL: Bug Reports, https://forge.softwareheritage.org/maniphest +Project-URL: Funding, https://www.softwareheritage.org/donate +Project-URL: Source, https://forge.softwareheritage.org/source/swh-core +Description: swh-core + ======== + \x20 + core library for swh's modules: + - config parser + - hash computations + - serialization + - logging mechanism + \x20 +Platform: UNKNOWN +Classifier: Programming Language :: Python :: 3 +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3) +Classifier: Operating System :: OS Independent +Classifier: Development Status :: 5 - Production/Stable +Description-Content-Type: text/markdown +Provides-Extra: testing +""" # noqa + result = MAPPINGS["PythonPkginfoMapping"]().translate(raw_content) + assert result["description"] == [ + "Software Heritage core utilities", # note the comma here + "swh-core\n" + "========\n" + "\n" + "core library for swh's modules:\n" + "- config parser\n" + "- hash computations\n" + "- serialization\n" + "- logging mechanism\n" + "", + ], result + del result["description"] + assert result == { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "type": "SoftwareSourceCode", + "url": "https://forge.softwareheritage.org/diffusion/DCORE/", + "name": "swh.core", + "author": [ + { + "type": "Person", + "name": "Software Heritage developers", + "email": "swh-devel@inria.fr", + } + ], + "version": "0.0.49", + } + + +def test_compute_metadata_pkginfo_utf8(): + raw_content = b"""\ +Metadata-Version: 1.1 +Name: snowpyt +Description-Content-Type: UNKNOWN +Description: foo + Hydrology N\xc2\xb083 +""" # noqa + result = MAPPINGS["PythonPkginfoMapping"]().translate(raw_content) + assert result == { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "type": "SoftwareSourceCode", + "name": "snowpyt", + "description": "foo\nHydrology N°83", + } + + +def test_compute_metadata_pkginfo_keywords(): + raw_content = b"""\ +Metadata-Version: 2.1 +Name: foo +Keywords: foo bar baz +""" # noqa + result = MAPPINGS["PythonPkginfoMapping"]().translate(raw_content) + assert result == { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "type": "SoftwareSourceCode", + "name": "foo", + "keywords": ["foo", "bar", "baz"], + } + + +def test_compute_metadata_pkginfo_license(): + raw_content = b"""\ +Metadata-Version: 2.1 +Name: foo +License: MIT +""" # noqa + result = MAPPINGS["PythonPkginfoMapping"]().translate(raw_content) + assert result == { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "type": "SoftwareSourceCode", + "name": "foo", + "license": "MIT", + } diff --git a/swh/indexer/tests/metadata_dictionary/test_ruby.py b/swh/indexer/tests/metadata_dictionary/test_ruby.py new file mode 100644 index 0000000..ba2cc30 --- /dev/null +++ b/swh/indexer/tests/metadata_dictionary/test_ruby.py @@ -0,0 +1,134 @@ +# Copyright (C) 2017-2022 The Software Heritage developers +# See the AUTHORS file at the top-level directory of this distribution +# License: GNU General Public License version 3, or any later version +# See top-level LICENSE file for more information + +from hypothesis import HealthCheck, given, settings, strategies + +from swh.indexer.metadata_dictionary import MAPPINGS + + +def test_gemspec_base(): + raw_content = b""" +Gem::Specification.new do |s| +s.name = 'example' +s.version = '0.1.0' +s.licenses = ['MIT'] +s.summary = "This is an example!" +s.description = "Much longer explanation of the example!" +s.authors = ["Ruby Coder"] +s.email = 'rubycoder@example.com' +s.files = ["lib/example.rb"] +s.homepage = 'https://rubygems.org/gems/example' +s.metadata = { "source_code_uri" => "https://github.com/example/example" } +end""" + result = MAPPINGS["GemspecMapping"]().translate(raw_content) + assert set(result.pop("description")) == { + "This is an example!", + "Much longer explanation of the example!", + } + assert result == { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "type": "SoftwareSourceCode", + "author": [{"type": "Person", "name": "Ruby Coder"}], + "name": "example", + "license": "https://spdx.org/licenses/MIT", + "codeRepository": "https://rubygems.org/gems/example", + "email": "rubycoder@example.com", + "version": "0.1.0", + } + + +def test_gemspec_two_author_fields(): + raw_content = b""" +Gem::Specification.new do |s| +s.authors = ["Ruby Coder1"] +s.author = "Ruby Coder2" +end""" + result = MAPPINGS["GemspecMapping"]().translate(raw_content) + assert result.pop("author") in ( + [ + {"type": "Person", "name": "Ruby Coder1"}, + {"type": "Person", "name": "Ruby Coder2"}, + ], + [ + {"type": "Person", "name": "Ruby Coder2"}, + {"type": "Person", "name": "Ruby Coder1"}, + ], + ) + assert result == { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "type": "SoftwareSourceCode", + } + + +def test_gemspec_invalid_author(): + raw_content = b""" +Gem::Specification.new do |s| +s.author = ["Ruby Coder"] +end""" + result = MAPPINGS["GemspecMapping"]().translate(raw_content) + assert result == { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "type": "SoftwareSourceCode", + } + raw_content = b""" +Gem::Specification.new do |s| +s.author = "Ruby Coder1", +end""" + result = MAPPINGS["GemspecMapping"]().translate(raw_content) + assert result == { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "type": "SoftwareSourceCode", + } + raw_content = b""" +Gem::Specification.new do |s| +s.authors = ["Ruby Coder1", ["Ruby Coder2"]] +end""" + result = MAPPINGS["GemspecMapping"]().translate(raw_content) + assert result == { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "type": "SoftwareSourceCode", + "author": [{"type": "Person", "name": "Ruby Coder1"}], + } + + +def test_gemspec_alternative_header(): + raw_content = b""" +require './lib/version' + +Gem::Specification.new { |s| +s.name = 'rb-system-with-aliases' +s.summary = 'execute system commands with aliases' +} +""" + result = MAPPINGS["GemspecMapping"]().translate(raw_content) + assert result == { + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "type": "SoftwareSourceCode", + "name": "rb-system-with-aliases", + "description": "execute system commands with aliases", + } + + +@settings(suppress_health_check=[HealthCheck.too_slow]) +@given( + strategies.dictionaries( + # keys + strategies.one_of( + strategies.text(), + *map(strategies.just, MAPPINGS["GemspecMapping"].mapping), # type: ignore + ), + # values + strategies.recursive( + strategies.characters(), + lambda children: strategies.lists(children, min_size=1), + ), + ) +) +def test_gemspec_adversarial(doc): + parts = [b"Gem::Specification.new do |s|\n"] + for (k, v) in doc.items(): + parts.append(" s.{} = {}\n".format(k, repr(v)).encode()) + parts.append(b"end\n") + MAPPINGS["GemspecMapping"]().translate(b"".join(parts)) diff --git a/swh/indexer/tests/storage/test_storage.py b/swh/indexer/tests/storage/test_storage.py index 77e7b8a..7a135fb 100644 --- a/swh/indexer/tests/storage/test_storage.py +++ b/swh/indexer/tests/storage/test_storage.py @@ -1,1840 +1,2088 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import math import threading from typing import Any, Dict, List, Tuple, Type import attr import pytest from swh.indexer.storage.exc import DuplicateId, IndexerStorageArgumentException from swh.indexer.storage.interface import IndexerStorageInterface, PagedResult from swh.indexer.storage.model import ( BaseRow, ContentCtagsRow, ContentLanguageRow, ContentLicenseRow, ContentMetadataRow, ContentMimetypeRow, DirectoryIntrinsicMetadataRow, + OriginExtrinsicMetadataRow, OriginIntrinsicMetadataRow, ) from swh.model.hashutil import hash_to_bytes def prepare_mimetypes_from_licenses( fossology_licenses: List[ContentLicenseRow], ) -> List[ContentMimetypeRow]: """Fossology license needs some consistent data in db to run.""" mimetypes = [] for c in fossology_licenses: mimetypes.append( ContentMimetypeRow( id=c.id, mimetype="text/plain", # for filtering on textual data to work encoding="utf-8", indexer_configuration_id=c.indexer_configuration_id, ) ) return mimetypes def endpoint_name(etype: str, ename: str) -> str: """Compute the storage's endpoint's name >>> endpoint_name('content_mimetype', 'add') 'content_mimetype_add' >>> endpoint_name('content_fosso_license', 'delete') 'content_fosso_license_delete' """ return f"{etype}_{ename}" def endpoint(storage, etype: str, ename: str): return getattr(storage, endpoint_name(etype, ename)) def expected_summary(count: int, etype: str, ename: str = "add") -> Dict[str, int]: """Compute the expected summary The key is determine according to etype and ename >>> expected_summary(10, 'content_mimetype', 'add') {'content_mimetype:add': 10} >>> expected_summary(9, 'origin_intrinsic_metadata', 'delete') {'origin_intrinsic_metadata:del': 9} """ pattern = ename[0:3] key = endpoint_name(etype, ename).replace(f"_{ename}", f":{pattern}") return {key: count} def test_check_config(swh_indexer_storage) -> None: assert swh_indexer_storage.check_config(check_write=True) assert swh_indexer_storage.check_config(check_write=False) class StorageETypeTester: """Base class for testing a series of common behaviour between a bunch of endpoint types supported by an IndexerStorage. This is supposed to be inherited with the following class attributes: - endpoint_type - tool_name - example_data See below for example usage. """ endpoint_type: str tool_name: str example_data: List[Dict] row_class: Type[BaseRow] def test_missing( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data etype = self.endpoint_type tool_id = data.tools[self.tool_name]["id"] # given 2 (hopefully) unknown objects query = [ { "id": data.sha1_1, "indexer_configuration_id": tool_id, }, { "id": data.sha1_2, "indexer_configuration_id": tool_id, }, ] # we expect these are both returned by the xxx_missing endpoint actual_missing = endpoint(storage, etype, "missing")(query) assert list(actual_missing) == [ data.sha1_1, data.sha1_2, ] # now, when we add one of them summary = endpoint(storage, etype, "add")( [ self.row_class.from_dict( { "id": data.sha1_2, **self.example_data[0], "indexer_configuration_id": tool_id, } ) ] ) assert summary == expected_summary(1, etype) # we expect only the other one returned actual_missing = endpoint(storage, etype, "missing")(query) assert list(actual_missing) == [data.sha1_1] def test_add__update_in_place_duplicate( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data etype = self.endpoint_type tool = data.tools[self.tool_name] data_v1 = { "id": data.sha1_2, **self.example_data[0], "indexer_configuration_id": tool["id"], } # given summary = endpoint(storage, etype, "add")([self.row_class.from_dict(data_v1)]) assert summary == expected_summary(1, etype) # not added # when actual_data = list(endpoint(storage, etype, "get")([data.sha1_2])) expected_data_v1 = [ self.row_class.from_dict( {"id": data.sha1_2, **self.example_data[0], "tool": tool} ) ] # then assert actual_data == expected_data_v1 # given data_v2 = data_v1.copy() data_v2.update(self.example_data[1]) endpoint(storage, etype, "add")([self.row_class.from_dict(data_v2)]) assert summary == expected_summary(1, etype) # modified so counted actual_data = list(endpoint(storage, etype, "get")([data.sha1_2])) expected_data_v2 = [ self.row_class.from_dict( { "id": data.sha1_2, **self.example_data[1], "tool": tool, } ) ] # data did change as the v2 was used to overwrite v1 assert actual_data == expected_data_v2 def test_add_deadlock( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data etype = self.endpoint_type tool = data.tools[self.tool_name] hashes = [ hash_to_bytes("34973274ccef6ab4dfaaf86599792fa9c3fe4{:03d}".format(i)) for i in range(1000) ] data_v1 = [ self.row_class.from_dict( { "id": hash_, **self.example_data[0], "indexer_configuration_id": tool["id"], } ) for hash_ in hashes ] data_v2 = [ self.row_class.from_dict( { "id": hash_, **self.example_data[1], "indexer_configuration_id": tool["id"], } ) for hash_ in hashes ] # Remove one item from each, so that both queries have to succeed for # all items to be in the DB. data_v2a = data_v2[1:] data_v2b = list(reversed(data_v2[0:-1])) # given endpoint(storage, etype, "add")(data_v1) # when actual_data = sorted( endpoint(storage, etype, "get")(hashes), key=lambda x: x.id, ) expected_data_v1 = [ self.row_class.from_dict( {"id": hash_, **self.example_data[0], "tool": tool} ) for hash_ in hashes ] # then assert actual_data == expected_data_v1 # given def f1() -> None: endpoint(storage, etype, "add")(data_v2a) def f2() -> None: endpoint(storage, etype, "add")(data_v2b) t1 = threading.Thread(target=f1) t2 = threading.Thread(target=f2) t2.start() t1.start() t1.join() t2.join() actual_data = sorted( endpoint(storage, etype, "get")(hashes), key=lambda x: x.id, ) expected_data_v2 = [ self.row_class.from_dict( {"id": hash_, **self.example_data[1], "tool": tool} ) for hash_ in hashes ] assert len(actual_data) == len(expected_data_v1) == len(expected_data_v2) for (item, expected_item_v1, expected_item_v2) in zip( actual_data, expected_data_v1, expected_data_v2 ): assert item in (expected_item_v1, expected_item_v2) def test_add__duplicate_twice( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data etype = self.endpoint_type tool = data.tools[self.tool_name] data_dir1 = self.row_class.from_dict( { "id": data.directory_id_2, **self.example_data[0], "indexer_configuration_id": tool["id"], } ) data_dir2 = self.row_class.from_dict( { "id": data.directory_id_2, **self.example_data[1], "indexer_configuration_id": tool["id"], } ) # when summary = endpoint(storage, etype, "add")([data_dir1]) assert summary == expected_summary(1, etype) with pytest.raises(DuplicateId): endpoint(storage, etype, "add")([data_dir2, data_dir2]) # then actual_data = list( endpoint(storage, etype, "get")([data.directory_id_2, data.directory_id_1]) ) expected_data = [ self.row_class.from_dict( {"id": data.directory_id_2, **self.example_data[0], "tool": tool} ) ] assert actual_data == expected_data def test_add( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data etype = self.endpoint_type tool = data.tools[self.tool_name] # conftest fills it with mimetypes storage.journal_writer.journal.objects = [] # type: ignore query = [data.sha1_2, data.sha1_1] data1 = self.row_class.from_dict( { "id": data.sha1_2, **self.example_data[0], "indexer_configuration_id": tool["id"], } ) # when summary = endpoint(storage, etype, "add")([data1]) assert summary == expected_summary(1, etype) # then actual_data = list(endpoint(storage, etype, "get")(query)) # then expected_data = [ self.row_class.from_dict( {"id": data.sha1_2, **self.example_data[0], "tool": tool} ) ] assert actual_data == expected_data journal_objects = storage.journal_writer.journal.objects # type: ignore actual_journal_data = [ obj for (obj_type, obj) in journal_objects if obj_type == self.endpoint_type ] assert list(sorted(actual_journal_data)) == list(sorted(expected_data)) class TestIndexerStorageContentMimetypes(StorageETypeTester): """Test Indexer Storage content_mimetype related methods""" endpoint_type = "content_mimetype" tool_name = "file" example_data = [ { "mimetype": "text/plain", "encoding": "utf-8", }, { "mimetype": "text/html", "encoding": "us-ascii", }, ] row_class = ContentMimetypeRow def test_generate_content_mimetype_get_partition_failure( self, swh_indexer_storage: IndexerStorageInterface ) -> None: """get_partition call with wrong limit input should fail""" storage = swh_indexer_storage indexer_configuration_id = 42 with pytest.raises( IndexerStorageArgumentException, match="limit should not be None" ): storage.content_mimetype_get_partition( indexer_configuration_id, 0, 3, limit=None # type: ignore ) def test_generate_content_mimetype_get_partition_no_limit( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: """get_partition should return result""" storage, data = swh_indexer_storage_with_data mimetypes = data.mimetypes expected_ids = set([c.id for c in mimetypes]) indexer_configuration_id = mimetypes[0].indexer_configuration_id assert len(mimetypes) == 16 nb_partitions = 16 actual_ids = [] for partition_id in range(nb_partitions): actual_result = storage.content_mimetype_get_partition( indexer_configuration_id, partition_id, nb_partitions ) assert actual_result.next_page_token is None actual_ids.extend(actual_result.results) assert len(actual_ids) == len(expected_ids) for actual_id in actual_ids: assert actual_id in expected_ids def test_generate_content_mimetype_get_partition_full( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: """get_partition for a single partition should return available ids""" storage, data = swh_indexer_storage_with_data mimetypes = data.mimetypes expected_ids = set([c.id for c in mimetypes]) indexer_configuration_id = mimetypes[0].indexer_configuration_id actual_result = storage.content_mimetype_get_partition( indexer_configuration_id, 0, 1 ) assert actual_result.next_page_token is None actual_ids = actual_result.results assert len(actual_ids) == len(expected_ids) for actual_id in actual_ids: assert actual_id in expected_ids def test_generate_content_mimetype_get_partition_empty( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: """get_partition when at least one of the partitions is empty""" storage, data = swh_indexer_storage_with_data mimetypes = data.mimetypes expected_ids = set([c.id for c in mimetypes]) indexer_configuration_id = mimetypes[0].indexer_configuration_id # nb_partitions = smallest power of 2 such that at least one of # the partitions is empty nb_mimetypes = len(mimetypes) nb_partitions = 1 << math.floor(math.log2(nb_mimetypes) + 1) seen_ids = [] for partition_id in range(nb_partitions): actual_result = storage.content_mimetype_get_partition( indexer_configuration_id, partition_id, nb_partitions, limit=nb_mimetypes + 1, ) for actual_id in actual_result.results: seen_ids.append(actual_id) # Limit is higher than the max number of results assert actual_result.next_page_token is None assert set(seen_ids) == expected_ids def test_generate_content_mimetype_get_partition_with_pagination( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: """get_partition should return ids provided with pagination""" storage, data = swh_indexer_storage_with_data mimetypes = data.mimetypes expected_ids = set([c.id for c in mimetypes]) indexer_configuration_id = mimetypes[0].indexer_configuration_id nb_partitions = 4 actual_ids = [] for partition_id in range(nb_partitions): next_page_token = None while True: actual_result = storage.content_mimetype_get_partition( indexer_configuration_id, partition_id, nb_partitions, limit=2, page_token=next_page_token, ) actual_ids.extend(actual_result.results) next_page_token = actual_result.next_page_token if next_page_token is None: break assert len(set(actual_ids)) == len(set(expected_ids)) for actual_id in actual_ids: assert actual_id in expected_ids class TestIndexerStorageContentLanguage(StorageETypeTester): """Test Indexer Storage content_language related methods""" endpoint_type = "content_language" tool_name = "pygments" example_data = [ { "lang": "haskell", }, { "lang": "common-lisp", }, ] row_class = ContentLanguageRow class TestIndexerStorageContentCTags(StorageETypeTester): """Test Indexer Storage content_ctags related methods""" endpoint_type = "content_ctags" tool_name = "universal-ctags" example_data = [ { "name": "done", "kind": "variable", "line": 119, "lang": "OCaml", }, { "name": "done", "kind": "variable", "line": 100, "lang": "Python", }, { "name": "main", "kind": "function", "line": 119, "lang": "Python", }, ] row_class = ContentCtagsRow # the following tests are disabled because CTAGS behaves differently @pytest.mark.skip def test_add__update_in_place_duplicate(self): pass @pytest.mark.skip def test_add_deadlock(self): pass def test_content_ctags_search( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data # 1. given tool = data.tools["universal-ctags"] tool_id = tool["id"] ctags1 = [ ContentCtagsRow( id=data.sha1_1, indexer_configuration_id=tool_id, **kwargs, # type: ignore ) for kwargs in [ { "name": "hello", "kind": "function", "line": 133, "lang": "Python", }, { "name": "counter", "kind": "variable", "line": 119, "lang": "Python", }, { "name": "hello", "kind": "variable", "line": 210, "lang": "Python", }, ] ] ctags1_with_tool = [ attr.evolve(ctag, indexer_configuration_id=None, tool=tool) for ctag in ctags1 ] ctags2 = [ ContentCtagsRow( id=data.sha1_2, indexer_configuration_id=tool_id, **kwargs, # type: ignore ) for kwargs in [ { "name": "hello", "kind": "variable", "line": 100, "lang": "C", }, { "name": "result", "kind": "variable", "line": 120, "lang": "C", }, ] ] ctags2_with_tool = [ attr.evolve(ctag, indexer_configuration_id=None, tool=tool) for ctag in ctags2 ] storage.content_ctags_add(ctags1 + ctags2) # 1. when actual_ctags = list(storage.content_ctags_search("hello", limit=1)) # 1. then assert actual_ctags == [ctags1_with_tool[0]] # 2. when actual_ctags = list( storage.content_ctags_search("hello", limit=1, last_sha1=data.sha1_1) ) # 2. then assert actual_ctags == [ctags2_with_tool[0]] # 3. when actual_ctags = list(storage.content_ctags_search("hello")) # 3. then assert actual_ctags == [ ctags1_with_tool[0], ctags1_with_tool[2], ctags2_with_tool[0], ] # 4. when actual_ctags = list(storage.content_ctags_search("counter")) # then assert actual_ctags == [ctags1_with_tool[1]] # 5. when actual_ctags = list(storage.content_ctags_search("result", limit=1)) # then assert actual_ctags == [ctags2_with_tool[1]] def test_content_ctags_search_no_result( self, swh_indexer_storage: IndexerStorageInterface ) -> None: storage = swh_indexer_storage actual_ctags = list(storage.content_ctags_search("counter")) assert not actual_ctags def test_content_ctags_add__add_new_ctags_added( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data # given tool = data.tools["universal-ctags"] tool_id = tool["id"] ctag1 = ContentCtagsRow( id=data.sha1_2, indexer_configuration_id=tool_id, name="done", kind="variable", line=100, lang="Scheme", ) ctag1_with_tool = attr.evolve(ctag1, indexer_configuration_id=None, tool=tool) # given storage.content_ctags_add([ctag1]) storage.content_ctags_add([ctag1]) # conflict does nothing # when actual_ctags = list(storage.content_ctags_get([data.sha1_2])) # then assert actual_ctags == [ctag1_with_tool] # given ctag2 = ContentCtagsRow( id=data.sha1_2, indexer_configuration_id=tool_id, name="defn", kind="function", line=120, lang="Scheme", ) ctag2_with_tool = attr.evolve(ctag2, indexer_configuration_id=None, tool=tool) storage.content_ctags_add([ctag2]) actual_ctags = list(storage.content_ctags_get([data.sha1_2])) assert actual_ctags == [ctag1_with_tool, ctag2_with_tool] def test_content_ctags_add__update_in_place( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data # given tool = data.tools["universal-ctags"] tool_id = tool["id"] ctag1 = ContentCtagsRow( id=data.sha1_2, indexer_configuration_id=tool_id, name="done", kind="variable", line=100, lang="Scheme", ) ctag1_with_tool = attr.evolve(ctag1, indexer_configuration_id=None, tool=tool) # given storage.content_ctags_add([ctag1]) # when actual_ctags = list(storage.content_ctags_get([data.sha1_2])) # then assert actual_ctags == [ctag1_with_tool] # given ctag2 = ContentCtagsRow( id=data.sha1_2, indexer_configuration_id=tool_id, name="defn", kind="function", line=120, lang="Scheme", ) ctag2_with_tool = attr.evolve(ctag2, indexer_configuration_id=None, tool=tool) storage.content_ctags_add([ctag1, ctag2]) actual_ctags = list(storage.content_ctags_get([data.sha1_2])) assert actual_ctags == [ctag1_with_tool, ctag2_with_tool] def test_add_empty( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: (storage, data) = swh_indexer_storage_with_data etype = self.endpoint_type summary = endpoint(storage, etype, "add")([]) assert summary == {"content_ctags:add": 0} actual_ctags = list(endpoint(storage, etype, "get")([data.sha1_2])) assert actual_ctags == [] def test_get_unknown( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: (storage, data) = swh_indexer_storage_with_data etype = self.endpoint_type actual_ctags = list(endpoint(storage, etype, "get")([data.sha1_2])) assert actual_ctags == [] class TestIndexerStorageContentMetadata(StorageETypeTester): """Test Indexer Storage content_metadata related methods""" tool_name = "swh-metadata-detector" endpoint_type = "content_metadata" example_data = [ { "metadata": { "other": {}, "codeRepository": { "type": "git", "url": "https://github.com/moranegg/metadata_test", }, "description": "Simple package.json test for indexer", "name": "test_metadata", "version": "0.0.1", }, }, { "metadata": {"other": {}, "name": "test_metadata", "version": "0.0.1"}, }, ] row_class = ContentMetadataRow class TestIndexerStorageDirectoryIntrinsicMetadata(StorageETypeTester): """Test Indexer Storage directory_intrinsic_metadata related methods""" tool_name = "swh-metadata-detector" endpoint_type = "directory_intrinsic_metadata" example_data = [ { "metadata": { "other": {}, "codeRepository": { "type": "git", "url": "https://github.com/moranegg/metadata_test", }, "description": "Simple package.json test for indexer", "name": "test_metadata", "version": "0.0.1", }, "mappings": ["mapping1"], }, { "metadata": {"other": {}, "name": "test_metadata", "version": "0.0.1"}, "mappings": ["mapping2"], }, ] row_class = DirectoryIntrinsicMetadataRow class TestIndexerStorageContentFossologyLicense(StorageETypeTester): endpoint_type = "content_fossology_license" tool_name = "nomos" example_data = [ {"license": "Apache-2.0"}, {"license": "BSD-2-Clause"}, ] row_class = ContentLicenseRow # the following tests are disabled because licenses behaves differently @pytest.mark.skip def test_add__update_in_place_duplicate(self): pass @pytest.mark.skip def test_add_deadlock(self): pass # content_fossology_license_missing does not exist @pytest.mark.skip def test_missing(self): pass def test_content_fossology_license_add__new_license_added( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data # given tool = data.tools["nomos"] tool_id = tool["id"] license1 = ContentLicenseRow( id=data.sha1_1, license="Apache-2.0", indexer_configuration_id=tool_id, ) # given storage.content_fossology_license_add([license1]) # conflict does nothing storage.content_fossology_license_add([license1]) # when actual_licenses = list(storage.content_fossology_license_get([data.sha1_1])) # then expected_licenses = [ ContentLicenseRow( id=data.sha1_1, license="Apache-2.0", tool=tool, ) ] assert actual_licenses == expected_licenses # given license2 = ContentLicenseRow( id=data.sha1_1, license="BSD-2-Clause", indexer_configuration_id=tool_id, ) storage.content_fossology_license_add([license2]) actual_licenses = list(storage.content_fossology_license_get([data.sha1_1])) expected_licenses.append( ContentLicenseRow( id=data.sha1_1, license="BSD-2-Clause", tool=tool, ) ) # first license was not removed when the second one was added assert sorted(actual_licenses) == sorted(expected_licenses) def test_generate_content_fossology_license_get_partition_failure( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: """get_partition call with wrong limit input should fail""" storage, data = swh_indexer_storage_with_data indexer_configuration_id = 42 with pytest.raises( IndexerStorageArgumentException, match="limit should not be None" ): storage.content_fossology_license_get_partition( indexer_configuration_id, 0, 3, limit=None, # type: ignore ) def test_generate_content_fossology_license_get_partition_no_limit( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: """get_partition should return results""" storage, data = swh_indexer_storage_with_data # craft some consistent mimetypes fossology_licenses = data.fossology_licenses mimetypes = prepare_mimetypes_from_licenses(fossology_licenses) indexer_configuration_id = fossology_licenses[0].indexer_configuration_id storage.content_mimetype_add(mimetypes) # add fossology_licenses to storage storage.content_fossology_license_add(fossology_licenses) # All ids from the db expected_ids = set([c.id for c in fossology_licenses]) assert len(fossology_licenses) == 10 assert len(mimetypes) == 10 nb_partitions = 4 actual_ids = [] for partition_id in range(nb_partitions): actual_result = storage.content_fossology_license_get_partition( indexer_configuration_id, partition_id, nb_partitions ) assert actual_result.next_page_token is None actual_ids.extend(actual_result.results) assert len(set(actual_ids)) == len(expected_ids) for actual_id in actual_ids: assert actual_id in expected_ids def test_generate_content_fossology_license_get_partition_full( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: """get_partition for a single partition should return available ids""" storage, data = swh_indexer_storage_with_data # craft some consistent mimetypes fossology_licenses = data.fossology_licenses mimetypes = prepare_mimetypes_from_licenses(fossology_licenses) indexer_configuration_id = fossology_licenses[0].indexer_configuration_id storage.content_mimetype_add(mimetypes) # add fossology_licenses to storage storage.content_fossology_license_add(fossology_licenses) # All ids from the db expected_ids = set([c.id for c in fossology_licenses]) actual_result = storage.content_fossology_license_get_partition( indexer_configuration_id, 0, 1 ) assert actual_result.next_page_token is None actual_ids = actual_result.results assert len(set(actual_ids)) == len(expected_ids) for actual_id in actual_ids: assert actual_id in expected_ids def test_generate_content_fossology_license_get_partition_empty( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: """get_partition when at least one of the partitions is empty""" storage, data = swh_indexer_storage_with_data # craft some consistent mimetypes fossology_licenses = data.fossology_licenses mimetypes = prepare_mimetypes_from_licenses(fossology_licenses) indexer_configuration_id = fossology_licenses[0].indexer_configuration_id storage.content_mimetype_add(mimetypes) # add fossology_licenses to storage storage.content_fossology_license_add(fossology_licenses) # All ids from the db expected_ids = set([c.id for c in fossology_licenses]) # nb_partitions = smallest power of 2 such that at least one of # the partitions is empty nb_licenses = len(fossology_licenses) nb_partitions = 1 << math.floor(math.log2(nb_licenses) + 1) seen_ids = [] for partition_id in range(nb_partitions): actual_result = storage.content_fossology_license_get_partition( indexer_configuration_id, partition_id, nb_partitions, limit=nb_licenses + 1, ) for actual_id in actual_result.results: seen_ids.append(actual_id) # Limit is higher than the max number of results assert actual_result.next_page_token is None assert set(seen_ids) == expected_ids def test_generate_content_fossology_license_get_partition_with_pagination( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: """get_partition should return ids provided with paginationv""" storage, data = swh_indexer_storage_with_data # craft some consistent mimetypes fossology_licenses = data.fossology_licenses mimetypes = prepare_mimetypes_from_licenses(fossology_licenses) indexer_configuration_id = fossology_licenses[0].indexer_configuration_id storage.content_mimetype_add(mimetypes) # add fossology_licenses to storage storage.content_fossology_license_add(fossology_licenses) # All ids from the db expected_ids = [c.id for c in fossology_licenses] nb_partitions = 4 actual_ids = [] for partition_id in range(nb_partitions): next_page_token = None while True: actual_result = storage.content_fossology_license_get_partition( indexer_configuration_id, partition_id, nb_partitions, limit=2, page_token=next_page_token, ) actual_ids.extend(actual_result.results) next_page_token = actual_result.next_page_token if next_page_token is None: break assert len(set(actual_ids)) == len(set(expected_ids)) for actual_id in actual_ids: assert actual_id in expected_ids def test_add_empty( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: (storage, data) = swh_indexer_storage_with_data etype = self.endpoint_type summary = endpoint(storage, etype, "add")([]) assert summary == {"content_fossology_license:add": 0} actual_license = list(endpoint(storage, etype, "get")([data.sha1_2])) assert actual_license == [] def test_get_unknown( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: (storage, data) = swh_indexer_storage_with_data etype = self.endpoint_type actual_license = list(endpoint(storage, etype, "get")([data.sha1_2])) assert actual_license == [] class TestIndexerStorageOriginIntrinsicMetadata: def test_origin_intrinsic_metadata_add( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data # given tool_id = data.tools["swh-metadata-detector"]["id"] metadata = { "version": None, "name": None, } metadata_dir = DirectoryIntrinsicMetadataRow( id=data.directory_id_2, metadata=metadata, mappings=["mapping1"], indexer_configuration_id=tool_id, ) metadata_origin = OriginIntrinsicMetadataRow( id=data.origin_url_1, metadata=metadata, indexer_configuration_id=tool_id, mappings=["mapping1"], from_directory=data.directory_id_2, ) # when storage.directory_intrinsic_metadata_add([metadata_dir]) storage.origin_intrinsic_metadata_add([metadata_origin]) # then actual_metadata = list( storage.origin_intrinsic_metadata_get([data.origin_url_1, "no://where"]) ) expected_metadata = [ OriginIntrinsicMetadataRow( id=data.origin_url_1, metadata=metadata, tool=data.tools["swh-metadata-detector"], from_directory=data.directory_id_2, mappings=["mapping1"], ) ] assert actual_metadata == expected_metadata journal_objects = storage.journal_writer.journal.objects # type: ignore actual_journal_metadata = [ obj for (obj_type, obj) in journal_objects if obj_type == "origin_intrinsic_metadata" ] assert list(sorted(actual_journal_metadata)) == list(sorted(expected_metadata)) def test_origin_intrinsic_metadata_add_update_in_place_duplicate( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data # given tool_id = data.tools["swh-metadata-detector"]["id"] metadata_v1: Dict[str, Any] = { "version": None, "name": None, } metadata_dir_v1 = DirectoryIntrinsicMetadataRow( id=data.directory_id_2, metadata=metadata_v1, mappings=[], indexer_configuration_id=tool_id, ) metadata_origin_v1 = OriginIntrinsicMetadataRow( id=data.origin_url_1, metadata=metadata_v1.copy(), indexer_configuration_id=tool_id, mappings=[], from_directory=data.directory_id_2, ) # given storage.directory_intrinsic_metadata_add([metadata_dir_v1]) storage.origin_intrinsic_metadata_add([metadata_origin_v1]) # when actual_metadata = list( storage.origin_intrinsic_metadata_get([data.origin_url_1]) ) # then expected_metadata_v1 = [ OriginIntrinsicMetadataRow( id=data.origin_url_1, metadata=metadata_v1, tool=data.tools["swh-metadata-detector"], from_directory=data.directory_id_2, mappings=[], ) ] assert actual_metadata == expected_metadata_v1 # given metadata_v2 = metadata_v1.copy() metadata_v2.update( { "name": "test_update_duplicated_metadata", "author": "MG", } ) metadata_dir_v2 = attr.evolve(metadata_dir_v1, metadata=metadata_v2) metadata_origin_v2 = OriginIntrinsicMetadataRow( id=data.origin_url_1, metadata=metadata_v2.copy(), indexer_configuration_id=tool_id, mappings=["npm"], from_directory=data.directory_id_1, ) storage.directory_intrinsic_metadata_add([metadata_dir_v2]) storage.origin_intrinsic_metadata_add([metadata_origin_v2]) actual_metadata = list( storage.origin_intrinsic_metadata_get([data.origin_url_1]) ) expected_metadata_v2 = [ OriginIntrinsicMetadataRow( id=data.origin_url_1, metadata=metadata_v2, tool=data.tools["swh-metadata-detector"], from_directory=data.directory_id_1, mappings=["npm"], ) ] # metadata did change as the v2 was used to overwrite v1 assert actual_metadata == expected_metadata_v2 def test_origin_intrinsic_metadata_add__deadlock( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data # given tool_id = data.tools["swh-metadata-detector"]["id"] origins = ["file:///tmp/origin{:02d}".format(i) for i in range(100)] example_data1: Dict[str, Any] = { "metadata": { "version": None, "name": None, }, "mappings": [], } example_data2: Dict[str, Any] = { "metadata": { "version": "v1.1.1", "name": "foo", }, "mappings": [], } metadata_dir_v1 = DirectoryIntrinsicMetadataRow( id=data.directory_id_2, metadata={ "version": None, "name": None, }, mappings=[], indexer_configuration_id=tool_id, ) data_v1 = [ OriginIntrinsicMetadataRow( id=origin, from_directory=data.directory_id_2, indexer_configuration_id=tool_id, **example_data1, ) for origin in origins ] data_v2 = [ OriginIntrinsicMetadataRow( id=origin, from_directory=data.directory_id_2, indexer_configuration_id=tool_id, **example_data2, ) for origin in origins ] # Remove one item from each, so that both queries have to succeed for # all items to be in the DB. data_v2a = data_v2[1:] data_v2b = list(reversed(data_v2[0:-1])) # given storage.directory_intrinsic_metadata_add([metadata_dir_v1]) storage.origin_intrinsic_metadata_add(data_v1) # when actual_data = list(storage.origin_intrinsic_metadata_get(origins)) expected_data_v1 = [ OriginIntrinsicMetadataRow( id=origin, from_directory=data.directory_id_2, tool=data.tools["swh-metadata-detector"], **example_data1, ) for origin in origins ] # then assert actual_data == expected_data_v1 # given def f1() -> None: storage.origin_intrinsic_metadata_add(data_v2a) def f2() -> None: storage.origin_intrinsic_metadata_add(data_v2b) t1 = threading.Thread(target=f1) t2 = threading.Thread(target=f2) t2.start() t1.start() t1.join() t2.join() actual_data = list(storage.origin_intrinsic_metadata_get(origins)) expected_data_v2 = [ OriginIntrinsicMetadataRow( id=origin, from_directory=data.directory_id_2, tool=data.tools["swh-metadata-detector"], **example_data2, ) for origin in origins ] actual_data.sort(key=lambda item: item.id) assert len(actual_data) == len(expected_data_v1) == len(expected_data_v2) for (item, expected_item_v1, expected_item_v2) in zip( actual_data, expected_data_v1, expected_data_v2 ): assert item in (expected_item_v1, expected_item_v2) def test_origin_intrinsic_metadata_add__duplicate_twice( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data # given tool_id = data.tools["swh-metadata-detector"]["id"] metadata = { "developmentStatus": None, "name": None, } metadata_dir = DirectoryIntrinsicMetadataRow( id=data.directory_id_2, metadata=metadata, mappings=["mapping1"], indexer_configuration_id=tool_id, ) metadata_origin = OriginIntrinsicMetadataRow( id=data.origin_url_1, metadata=metadata, indexer_configuration_id=tool_id, mappings=["mapping1"], from_directory=data.directory_id_2, ) # when storage.directory_intrinsic_metadata_add([metadata_dir]) with pytest.raises(DuplicateId): storage.origin_intrinsic_metadata_add([metadata_origin, metadata_origin]) def test_origin_intrinsic_metadata_search_fulltext( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data # given tool_id = data.tools["swh-metadata-detector"]["id"] metadata1 = { "author": "John Doe", } metadata1_dir = DirectoryIntrinsicMetadataRow( id=data.directory_id_1, metadata=metadata1, mappings=[], indexer_configuration_id=tool_id, ) metadata1_origin = OriginIntrinsicMetadataRow( id=data.origin_url_1, metadata=metadata1, mappings=[], indexer_configuration_id=tool_id, from_directory=data.directory_id_1, ) metadata2 = { "author": "Jane Doe", } metadata2_dir = DirectoryIntrinsicMetadataRow( id=data.directory_id_2, metadata=metadata2, mappings=[], indexer_configuration_id=tool_id, ) metadata2_origin = OriginIntrinsicMetadataRow( id=data.origin_url_2, metadata=metadata2, mappings=[], indexer_configuration_id=tool_id, from_directory=data.directory_id_2, ) # when storage.directory_intrinsic_metadata_add([metadata1_dir]) storage.origin_intrinsic_metadata_add([metadata1_origin]) storage.directory_intrinsic_metadata_add([metadata2_dir]) storage.origin_intrinsic_metadata_add([metadata2_origin]) # then search = storage.origin_intrinsic_metadata_search_fulltext assert set([res.id for res in search(["Doe"])]) == set( [data.origin_url_1, data.origin_url_2] ) assert [res.id for res in search(["John", "Doe"])] == [data.origin_url_1] assert [res.id for res in search(["John"])] == [data.origin_url_1] assert not list(search(["John", "Jane"])) def test_origin_intrinsic_metadata_search_fulltext_rank( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data # given tool_id = data.tools["swh-metadata-detector"]["id"] # The following authors have "Random Person" to add some more content # to the JSON data, to work around normalization quirks when there # are few words (rank/(1+ln(nb_words)) is very sensitive to nb_words # for small values of nb_words). metadata1 = { "author": [ "Random Person", "John Doe", "Jane Doe", ] } metadata1_dir = DirectoryIntrinsicMetadataRow( id=data.directory_id_1, metadata=metadata1, mappings=[], indexer_configuration_id=tool_id, ) metadata1_origin = OriginIntrinsicMetadataRow( id=data.origin_url_1, metadata=metadata1, mappings=[], indexer_configuration_id=tool_id, from_directory=data.directory_id_1, ) metadata2 = { "author": [ "Random Person", "Jane Doe", ] } metadata2_dir = DirectoryIntrinsicMetadataRow( id=data.directory_id_2, metadata=metadata2, mappings=[], indexer_configuration_id=tool_id, ) metadata2_origin = OriginIntrinsicMetadataRow( id=data.origin_url_2, metadata=metadata2, mappings=[], indexer_configuration_id=tool_id, from_directory=data.directory_id_2, ) # when storage.directory_intrinsic_metadata_add([metadata1_dir]) storage.origin_intrinsic_metadata_add([metadata1_origin]) storage.directory_intrinsic_metadata_add([metadata2_dir]) storage.origin_intrinsic_metadata_add([metadata2_origin]) # then search = storage.origin_intrinsic_metadata_search_fulltext assert [res.id for res in search(["Doe"])] == [ data.origin_url_1, data.origin_url_2, ] assert [res.id for res in search(["Doe"], limit=1)] == [data.origin_url_1] assert [res.id for res in search(["John"])] == [data.origin_url_1] assert [res.id for res in search(["Jane"])] == [ data.origin_url_2, data.origin_url_1, ] assert [res.id for res in search(["John", "Jane"])] == [data.origin_url_1] def _fill_origin_intrinsic_metadata( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data tool1_id = data.tools["swh-metadata-detector"]["id"] tool2_id = data.tools["swh-metadata-detector2"]["id"] metadata1 = { "@context": "foo", "author": "John Doe", } metadata1_dir = DirectoryIntrinsicMetadataRow( id=data.directory_id_1, metadata=metadata1, mappings=["npm"], indexer_configuration_id=tool1_id, ) metadata1_origin = OriginIntrinsicMetadataRow( id=data.origin_url_1, metadata=metadata1, mappings=["npm"], indexer_configuration_id=tool1_id, from_directory=data.directory_id_1, ) metadata2 = { "@context": "foo", "author": "Jane Doe", } metadata2_dir = DirectoryIntrinsicMetadataRow( id=data.directory_id_2, metadata=metadata2, mappings=["npm", "gemspec"], indexer_configuration_id=tool2_id, ) metadata2_origin = OriginIntrinsicMetadataRow( id=data.origin_url_2, metadata=metadata2, mappings=["npm", "gemspec"], indexer_configuration_id=tool2_id, from_directory=data.directory_id_2, ) metadata3 = { "@context": "foo", } metadata3_dir = DirectoryIntrinsicMetadataRow( id=data.directory_id_3, metadata=metadata3, mappings=["npm", "gemspec"], indexer_configuration_id=tool2_id, ) metadata3_origin = OriginIntrinsicMetadataRow( id=data.origin_url_3, metadata=metadata3, mappings=["pkg-info"], indexer_configuration_id=tool2_id, from_directory=data.directory_id_3, ) storage.directory_intrinsic_metadata_add([metadata1_dir]) storage.origin_intrinsic_metadata_add([metadata1_origin]) storage.directory_intrinsic_metadata_add([metadata2_dir]) storage.origin_intrinsic_metadata_add([metadata2_origin]) storage.directory_intrinsic_metadata_add([metadata3_dir]) storage.origin_intrinsic_metadata_add([metadata3_origin]) def test_origin_intrinsic_metadata_search_by_producer( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data self._fill_origin_intrinsic_metadata(swh_indexer_storage_with_data) tool1 = data.tools["swh-metadata-detector"] tool2 = data.tools["swh-metadata-detector2"] endpoint = storage.origin_intrinsic_metadata_search_by_producer # test pagination # no 'page_token' param, return all origins result = endpoint(ids_only=True) assert result == PagedResult( results=[ data.origin_url_1, data.origin_url_2, data.origin_url_3, ], next_page_token=None, ) # 'page_token' is < than origin_1, return everything result = endpoint(page_token=data.origin_url_1[:-1], ids_only=True) assert result == PagedResult( results=[ data.origin_url_1, data.origin_url_2, data.origin_url_3, ], next_page_token=None, ) # 'page_token' is origin_3, return nothing result = endpoint(page_token=data.origin_url_3, ids_only=True) assert result == PagedResult(results=[], next_page_token=None) # test limit argument result = endpoint(page_token=data.origin_url_1[:-1], limit=2, ids_only=True) assert result == PagedResult( results=[data.origin_url_1, data.origin_url_2], next_page_token=data.origin_url_2, ) result = endpoint(page_token=data.origin_url_1, limit=2, ids_only=True) assert result == PagedResult( results=[data.origin_url_2, data.origin_url_3], next_page_token=None, ) result = endpoint(page_token=data.origin_url_2, limit=2, ids_only=True) assert result == PagedResult( results=[data.origin_url_3], next_page_token=None, ) # test mappings filtering result = endpoint(mappings=["npm"], ids_only=True) assert result == PagedResult( results=[data.origin_url_1, data.origin_url_2], next_page_token=None, ) result = endpoint(mappings=["npm", "gemspec"], ids_only=True) assert result == PagedResult( results=[data.origin_url_1, data.origin_url_2], next_page_token=None, ) result = endpoint(mappings=["gemspec"], ids_only=True) assert result == PagedResult( results=[data.origin_url_2], next_page_token=None, ) result = endpoint(mappings=["pkg-info"], ids_only=True) assert result == PagedResult( results=[data.origin_url_3], next_page_token=None, ) result = endpoint(mappings=["foobar"], ids_only=True) assert result == PagedResult( results=[], next_page_token=None, ) # test pagination + mappings result = endpoint(mappings=["npm"], limit=1, ids_only=True) assert result == PagedResult( results=[data.origin_url_1], next_page_token=data.origin_url_1, ) # test tool filtering result = endpoint(tool_ids=[tool1["id"]], ids_only=True) assert result == PagedResult( results=[data.origin_url_1], next_page_token=None, ) result = endpoint(tool_ids=[tool2["id"]], ids_only=True) assert sorted(result.results) == [data.origin_url_2, data.origin_url_3] assert result.next_page_token is None result = endpoint(tool_ids=[tool1["id"], tool2["id"]], ids_only=True) assert sorted(result.results) == [ data.origin_url_1, data.origin_url_2, data.origin_url_3, ] assert result.next_page_token is None # test ids_only=False assert endpoint(mappings=["gemspec"]) == PagedResult( results=[ OriginIntrinsicMetadataRow( id=data.origin_url_2, metadata={ "@context": "foo", "author": "Jane Doe", }, mappings=["npm", "gemspec"], tool=tool2, from_directory=data.directory_id_2, ) ], next_page_token=None, ) def test_origin_intrinsic_metadata_stats( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data self._fill_origin_intrinsic_metadata(swh_indexer_storage_with_data) result = storage.origin_intrinsic_metadata_stats() assert result == { "per_mapping": { "cff": 0, "gemspec": 1, "npm": 2, "pkg-info": 1, "codemeta": 0, "maven": 0, }, "total": 3, "non_empty": 2, } +class TestIndexerStorageOriginExtrinsicMetadata: + def test_origin_extrinsic_metadata_add( + self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] + ) -> None: + storage, data = swh_indexer_storage_with_data + # given + tool_id = data.tools["swh-metadata-detector"]["id"] + + metadata = { + "version": None, + "name": None, + } + metadata_origin = OriginExtrinsicMetadataRow( + id=data.origin_url_1, + metadata=metadata, + indexer_configuration_id=tool_id, + mappings=["mapping1"], + from_remd_id=b"\x02" * 20, + ) + + # when + storage.origin_extrinsic_metadata_add([metadata_origin]) + + # then + actual_metadata = list( + storage.origin_extrinsic_metadata_get([data.origin_url_1, "no://where"]) + ) + + expected_metadata = [ + OriginExtrinsicMetadataRow( + id=data.origin_url_1, + metadata=metadata, + tool=data.tools["swh-metadata-detector"], + from_remd_id=b"\x02" * 20, + mappings=["mapping1"], + ) + ] + + assert actual_metadata == expected_metadata + + journal_objects = storage.journal_writer.journal.objects # type: ignore + actual_journal_metadata = [ + obj + for (obj_type, obj) in journal_objects + if obj_type == "origin_extrinsic_metadata" + ] + assert list(sorted(actual_journal_metadata)) == list(sorted(expected_metadata)) + + def test_origin_extrinsic_metadata_add_update_in_place_duplicate( + self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] + ) -> None: + storage, data = swh_indexer_storage_with_data + # given + tool_id = data.tools["swh-metadata-detector"]["id"] + + metadata_v1: Dict[str, Any] = { + "version": None, + "name": None, + } + metadata_origin_v1 = OriginExtrinsicMetadataRow( + id=data.origin_url_1, + metadata=metadata_v1.copy(), + indexer_configuration_id=tool_id, + mappings=[], + from_remd_id=b"\x02" * 20, + ) + + # given + storage.origin_extrinsic_metadata_add([metadata_origin_v1]) + + # when + actual_metadata = list( + storage.origin_extrinsic_metadata_get([data.origin_url_1]) + ) + + # then + expected_metadata_v1 = [ + OriginExtrinsicMetadataRow( + id=data.origin_url_1, + metadata=metadata_v1, + tool=data.tools["swh-metadata-detector"], + from_remd_id=b"\x02" * 20, + mappings=[], + ) + ] + assert actual_metadata == expected_metadata_v1 + + # given + metadata_v2 = metadata_v1.copy() + metadata_v2.update( + { + "name": "test_update_duplicated_metadata", + "author": "MG", + } + ) + metadata_origin_v2 = OriginExtrinsicMetadataRow( + id=data.origin_url_1, + metadata=metadata_v2.copy(), + indexer_configuration_id=tool_id, + mappings=["github"], + from_remd_id=b"\x02" * 20, + ) + + storage.origin_extrinsic_metadata_add([metadata_origin_v2]) + + actual_metadata = list( + storage.origin_extrinsic_metadata_get([data.origin_url_1]) + ) + + expected_metadata_v2 = [ + OriginExtrinsicMetadataRow( + id=data.origin_url_1, + metadata=metadata_v2, + tool=data.tools["swh-metadata-detector"], + from_remd_id=b"\x02" * 20, + mappings=["github"], + ) + ] + + # metadata did change as the v2 was used to overwrite v1 + assert actual_metadata == expected_metadata_v2 + + def test_origin_extrinsic_metadata_add__deadlock( + self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] + ) -> None: + storage, data = swh_indexer_storage_with_data + # given + tool_id = data.tools["swh-metadata-detector"]["id"] + + origins = ["file:///tmp/origin{:02d}".format(i) for i in range(100)] + + example_data1: Dict[str, Any] = { + "metadata": { + "version": None, + "name": None, + }, + "mappings": [], + } + example_data2: Dict[str, Any] = { + "metadata": { + "version": "v1.1.1", + "name": "foo", + }, + "mappings": [], + } + + data_v1 = [ + OriginExtrinsicMetadataRow( + id=origin, + from_remd_id=b"\x02" * 20, + indexer_configuration_id=tool_id, + **example_data1, + ) + for origin in origins + ] + data_v2 = [ + OriginExtrinsicMetadataRow( + id=origin, + from_remd_id=b"\x02" * 20, + indexer_configuration_id=tool_id, + **example_data2, + ) + for origin in origins + ] + + # Remove one item from each, so that both queries have to succeed for + # all items to be in the DB. + data_v2a = data_v2[1:] + data_v2b = list(reversed(data_v2[0:-1])) + + # given + storage.origin_extrinsic_metadata_add(data_v1) + + # when + actual_data = list(storage.origin_extrinsic_metadata_get(origins)) + + expected_data_v1 = [ + OriginExtrinsicMetadataRow( + id=origin, + from_remd_id=b"\x02" * 20, + tool=data.tools["swh-metadata-detector"], + **example_data1, + ) + for origin in origins + ] + + # then + assert actual_data == expected_data_v1 + + # given + def f1() -> None: + storage.origin_extrinsic_metadata_add(data_v2a) + + def f2() -> None: + storage.origin_extrinsic_metadata_add(data_v2b) + + t1 = threading.Thread(target=f1) + t2 = threading.Thread(target=f2) + t2.start() + t1.start() + + t1.join() + t2.join() + + actual_data = list(storage.origin_extrinsic_metadata_get(origins)) + + expected_data_v2 = [ + OriginExtrinsicMetadataRow( + id=origin, + from_remd_id=b"\x02" * 20, + tool=data.tools["swh-metadata-detector"], + **example_data2, + ) + for origin in origins + ] + + actual_data.sort(key=lambda item: item.id) + assert len(actual_data) == len(expected_data_v1) == len(expected_data_v2) + for (item, expected_item_v1, expected_item_v2) in zip( + actual_data, expected_data_v1, expected_data_v2 + ): + assert item in (expected_item_v1, expected_item_v2) + + def test_origin_extrinsic_metadata_add__duplicate_twice( + self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] + ) -> None: + storage, data = swh_indexer_storage_with_data + # given + tool_id = data.tools["swh-metadata-detector"]["id"] + + metadata = { + "developmentStatus": None, + "name": None, + } + metadata_origin = OriginExtrinsicMetadataRow( + id=data.origin_url_1, + metadata=metadata, + indexer_configuration_id=tool_id, + mappings=["mapping1"], + from_remd_id=b"\x02" * 20, + ) + + # when + with pytest.raises(DuplicateId): + storage.origin_extrinsic_metadata_add([metadata_origin, metadata_origin]) + + class TestIndexerStorageIndexerConfiguration: def test_indexer_configuration_add( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data tool = { "tool_name": "some-unknown-tool", "tool_version": "some-version", "tool_configuration": {"debian-package": "some-package"}, } actual_tool = storage.indexer_configuration_get(tool) assert actual_tool is None # does not exist # add it actual_tools = list(storage.indexer_configuration_add([tool])) assert len(actual_tools) == 1 actual_tool = actual_tools[0] assert actual_tool is not None # now it exists new_id = actual_tool.pop("id") assert actual_tool == tool actual_tools2 = list(storage.indexer_configuration_add([tool])) actual_tool2 = actual_tools2[0] assert actual_tool2 is not None # now it exists new_id2 = actual_tool2.pop("id") assert new_id == new_id2 assert actual_tool == actual_tool2 def test_indexer_configuration_add_multiple( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data tool = { "tool_name": "some-unknown-tool", "tool_version": "some-version", "tool_configuration": {"debian-package": "some-package"}, } actual_tools = list(storage.indexer_configuration_add([tool])) assert len(actual_tools) == 1 new_tools = [ tool, { "tool_name": "yet-another-tool", "tool_version": "version", "tool_configuration": {}, }, ] actual_tools = list(storage.indexer_configuration_add(new_tools)) assert len(actual_tools) == 2 # order not guaranteed, so we iterate over results to check for tool in actual_tools: _id = tool.pop("id") assert _id is not None assert tool in new_tools def test_indexer_configuration_get_missing( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data tool = { "tool_name": "unknown-tool", "tool_version": "3.1.0rc2-31-ga2cbb8c", "tool_configuration": {"command_line": "nomossa "}, } actual_tool = storage.indexer_configuration_get(tool) assert actual_tool is None def test_indexer_configuration_get( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data tool = { "tool_name": "nomos", "tool_version": "3.1.0rc2-31-ga2cbb8c", "tool_configuration": {"command_line": "nomossa "}, } actual_tool = storage.indexer_configuration_get(tool) assert actual_tool expected_tool = tool.copy() del actual_tool["id"] assert expected_tool == actual_tool def test_indexer_configuration_metadata_get_missing_context( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data tool = { "tool_name": "swh-metadata-translator", "tool_version": "0.0.1", "tool_configuration": {"context": "unknown-context"}, } actual_tool = storage.indexer_configuration_get(tool) assert actual_tool is None def test_indexer_configuration_metadata_get( self, swh_indexer_storage_with_data: Tuple[IndexerStorageInterface, Any] ) -> None: storage, data = swh_indexer_storage_with_data tool = { "tool_name": "swh-metadata-translator", "tool_version": "0.0.1", "tool_configuration": {"type": "local", "context": "NpmMapping"}, } storage.indexer_configuration_add([tool]) actual_tool = storage.indexer_configuration_get(tool) assert actual_tool expected_tool = tool.copy() expected_tool["id"] = actual_tool["id"] assert expected_tool == actual_tool diff --git a/swh/indexer/tests/test_cli.py b/swh/indexer/tests/test_cli.py index f426ac1..cc2a6b2 100644 --- a/swh/indexer/tests/test_cli.py +++ b/swh/indexer/tests/test_cli.py @@ -1,655 +1,658 @@ # Copyright (C) 2019-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime from functools import reduce import re from typing import Any, Dict, List from unittest.mock import patch from click.testing import CliRunner from confluent_kafka import Consumer import pytest from swh.indexer.cli import indexer_cli_group from swh.indexer.storage.interface import IndexerStorageInterface from swh.indexer.storage.model import ( DirectoryIntrinsicMetadataRow, OriginIntrinsicMetadataRow, ) from swh.journal.writer import get_journal_writer from swh.model.hashutil import hash_to_bytes from swh.model.model import OriginVisitStatus from .utils import DIRECTORY2, REVISION def fill_idx_storage(idx_storage: IndexerStorageInterface, nb_rows: int) -> List[int]: tools: List[Dict[str, Any]] = [ { "tool_name": "tool %d" % i, "tool_version": "0.0.1", "tool_configuration": {}, } for i in range(2) ] tools = idx_storage.indexer_configuration_add(tools) origin_metadata = [ OriginIntrinsicMetadataRow( id="file://dev/%04d" % origin_id, from_directory=hash_to_bytes("abcd{:0>36}".format(origin_id)), indexer_configuration_id=tools[origin_id % 2]["id"], metadata={"name": "origin %d" % origin_id}, mappings=["mapping%d" % (origin_id % 10)], ) for origin_id in range(nb_rows) ] directory_metadata = [ DirectoryIntrinsicMetadataRow( id=hash_to_bytes("abcd{:0>36}".format(origin_id)), indexer_configuration_id=tools[origin_id % 2]["id"], metadata={"name": "origin %d" % origin_id}, mappings=["mapping%d" % (origin_id % 10)], ) for origin_id in range(nb_rows) ] idx_storage.directory_intrinsic_metadata_add(directory_metadata) idx_storage.origin_intrinsic_metadata_add(origin_metadata) return [tool["id"] for tool in tools] def _origins_in_task_args(tasks): """Returns the set of origins contained in the arguments of the provided tasks (assumed to be of type index-origin-metadata).""" return reduce( set.union, (set(task["arguments"]["args"][0]) for task in tasks), set() ) def _assert_tasks_for_origins(tasks, origins): expected_kwargs = {} assert {task["type"] for task in tasks} == {"index-origin-metadata"} assert all(len(task["arguments"]["args"]) == 1 for task in tasks) for task in tasks: assert task["arguments"]["kwargs"] == expected_kwargs, task assert _origins_in_task_args(tasks) == set(["file://dev/%04d" % i for i in origins]) @pytest.fixture def cli_runner(): return CliRunner() def test_cli_mapping_list(cli_runner, swh_config): result = cli_runner.invoke( indexer_cli_group, ["-C", swh_config, "mapping", "list"], catch_exceptions=False, ) expected_output = "\n".join( [ "cff", "codemeta", + "composer", "gemspec", + "github", "maven", "npm", "pkg-info", + "pubspec", "", ] # must be sorted for test to pass ) assert result.exit_code == 0, result.output assert result.output == expected_output def test_cli_mapping_list_terms(cli_runner, swh_config): result = cli_runner.invoke( indexer_cli_group, ["-C", swh_config, "mapping", "list-terms"], catch_exceptions=False, ) assert result.exit_code == 0, result.output assert re.search(r"http://schema.org/url:\n.*npm", result.output) assert re.search(r"http://schema.org/url:\n.*codemeta", result.output) assert re.search( r"https://codemeta.github.io/terms/developmentStatus:\n\tcodemeta", result.output, ) def test_cli_mapping_list_terms_exclude(cli_runner, swh_config): result = cli_runner.invoke( indexer_cli_group, ["-C", swh_config, "mapping", "list-terms", "--exclude-mapping", "codemeta"], catch_exceptions=False, ) assert result.exit_code == 0, result.output assert re.search(r"http://schema.org/url:\n.*npm", result.output) assert not re.search(r"http://schema.org/url:\n.*codemeta", result.output) assert not re.search( r"https://codemeta.github.io/terms/developmentStatus:\n\tcodemeta", result.output, ) @patch("swh.scheduler.cli.utils.TASK_BATCH_SIZE", 3) @patch("swh.scheduler.cli_utils.TASK_BATCH_SIZE", 3) def test_cli_origin_metadata_reindex_empty_db( cli_runner, swh_config, indexer_scheduler, idx_storage, storage ): result = cli_runner.invoke( indexer_cli_group, [ "-C", swh_config, "schedule", "reindex_origin_metadata", ], catch_exceptions=False, ) expected_output = "Nothing to do (no origin metadata matched the criteria).\n" assert result.exit_code == 0, result.output assert result.output == expected_output tasks = indexer_scheduler.search_tasks() assert len(tasks) == 0 @patch("swh.scheduler.cli.utils.TASK_BATCH_SIZE", 3) @patch("swh.scheduler.cli_utils.TASK_BATCH_SIZE", 3) def test_cli_origin_metadata_reindex_divisor( cli_runner, swh_config, indexer_scheduler, idx_storage, storage ): """Tests the re-indexing when origin_batch_size*task_batch_size is a divisor of nb_origins.""" fill_idx_storage(idx_storage, 90) result = cli_runner.invoke( indexer_cli_group, [ "-C", swh_config, "schedule", "reindex_origin_metadata", ], catch_exceptions=False, ) # Check the output expected_output = ( "Scheduled 3 tasks (30 origins).\n" "Scheduled 6 tasks (60 origins).\n" "Scheduled 9 tasks (90 origins).\n" "Done.\n" ) assert result.exit_code == 0, result.output assert result.output == expected_output # Check scheduled tasks tasks = indexer_scheduler.search_tasks() assert len(tasks) == 9 _assert_tasks_for_origins(tasks, range(90)) @patch("swh.scheduler.cli.utils.TASK_BATCH_SIZE", 3) @patch("swh.scheduler.cli_utils.TASK_BATCH_SIZE", 3) def test_cli_origin_metadata_reindex_dry_run( cli_runner, swh_config, indexer_scheduler, idx_storage, storage ): """Tests the re-indexing when origin_batch_size*task_batch_size is a divisor of nb_origins.""" fill_idx_storage(idx_storage, 90) result = cli_runner.invoke( indexer_cli_group, [ "-C", swh_config, "schedule", "--dry-run", "reindex_origin_metadata", ], catch_exceptions=False, ) # Check the output expected_output = ( "Scheduled 3 tasks (30 origins).\n" "Scheduled 6 tasks (60 origins).\n" "Scheduled 9 tasks (90 origins).\n" "Done.\n" ) assert result.exit_code == 0, result.output assert result.output == expected_output # Check scheduled tasks tasks = indexer_scheduler.search_tasks() assert len(tasks) == 0 @patch("swh.scheduler.cli.utils.TASK_BATCH_SIZE", 3) @patch("swh.scheduler.cli_utils.TASK_BATCH_SIZE", 3) def test_cli_origin_metadata_reindex_nondivisor( cli_runner, swh_config, indexer_scheduler, idx_storage, storage ): """Tests the re-indexing when neither origin_batch_size or task_batch_size is a divisor of nb_origins.""" fill_idx_storage(idx_storage, 70) result = cli_runner.invoke( indexer_cli_group, [ "-C", swh_config, "schedule", "reindex_origin_metadata", "--batch-size", "20", ], catch_exceptions=False, ) # Check the output expected_output = ( "Scheduled 3 tasks (60 origins).\n" "Scheduled 4 tasks (70 origins).\n" "Done.\n" ) assert result.exit_code == 0, result.output assert result.output == expected_output # Check scheduled tasks tasks = indexer_scheduler.search_tasks() assert len(tasks) == 4 _assert_tasks_for_origins(tasks, range(70)) @patch("swh.scheduler.cli.utils.TASK_BATCH_SIZE", 3) @patch("swh.scheduler.cli_utils.TASK_BATCH_SIZE", 3) def test_cli_origin_metadata_reindex_filter_one_mapping( cli_runner, swh_config, indexer_scheduler, idx_storage, storage ): """Tests the re-indexing when origin_batch_size*task_batch_size is a divisor of nb_origins.""" fill_idx_storage(idx_storage, 110) result = cli_runner.invoke( indexer_cli_group, [ "-C", swh_config, "schedule", "reindex_origin_metadata", "--mapping", "mapping1", ], catch_exceptions=False, ) # Check the output expected_output = "Scheduled 2 tasks (11 origins).\nDone.\n" assert result.exit_code == 0, result.output assert result.output == expected_output # Check scheduled tasks tasks = indexer_scheduler.search_tasks() assert len(tasks) == 2 _assert_tasks_for_origins(tasks, [1, 11, 21, 31, 41, 51, 61, 71, 81, 91, 101]) @patch("swh.scheduler.cli.utils.TASK_BATCH_SIZE", 3) @patch("swh.scheduler.cli_utils.TASK_BATCH_SIZE", 3) def test_cli_origin_metadata_reindex_filter_two_mappings( cli_runner, swh_config, indexer_scheduler, idx_storage, storage ): """Tests the re-indexing when origin_batch_size*task_batch_size is a divisor of nb_origins.""" fill_idx_storage(idx_storage, 110) result = cli_runner.invoke( indexer_cli_group, [ "--config-file", swh_config, "schedule", "reindex_origin_metadata", "--mapping", "mapping1", "--mapping", "mapping2", ], catch_exceptions=False, ) # Check the output expected_output = "Scheduled 3 tasks (22 origins).\nDone.\n" assert result.exit_code == 0, result.output assert result.output == expected_output # Check scheduled tasks tasks = indexer_scheduler.search_tasks() assert len(tasks) == 3 _assert_tasks_for_origins( tasks, [ 1, 11, 21, 31, 41, 51, 61, 71, 81, 91, 101, 2, 12, 22, 32, 42, 52, 62, 72, 82, 92, 102, ], ) @patch("swh.scheduler.cli.utils.TASK_BATCH_SIZE", 3) @patch("swh.scheduler.cli_utils.TASK_BATCH_SIZE", 3) def test_cli_origin_metadata_reindex_filter_one_tool( cli_runner, swh_config, indexer_scheduler, idx_storage, storage ): """Tests the re-indexing when origin_batch_size*task_batch_size is a divisor of nb_origins.""" tool_ids = fill_idx_storage(idx_storage, 110) result = cli_runner.invoke( indexer_cli_group, [ "-C", swh_config, "schedule", "reindex_origin_metadata", "--tool-id", str(tool_ids[0]), ], catch_exceptions=False, ) # Check the output expected_output = ( "Scheduled 3 tasks (30 origins).\n" "Scheduled 6 tasks (55 origins).\n" "Done.\n" ) assert result.exit_code == 0, result.output assert result.output == expected_output # Check scheduled tasks tasks = indexer_scheduler.search_tasks() assert len(tasks) == 6 _assert_tasks_for_origins(tasks, [x * 2 for x in range(55)]) def now(): return datetime.datetime.now(tz=datetime.timezone.utc) def test_cli_journal_client_schedule( cli_runner, swh_config, indexer_scheduler, kafka_prefix: str, kafka_server, consumer: Consumer, ): """Test the 'swh indexer journal-client' cli tool.""" journal_writer = get_journal_writer( "kafka", brokers=[kafka_server], prefix=kafka_prefix, client_id="test producer", value_sanitizer=lambda object_type, value: value, flush_timeout=3, # fail early if something is going wrong ) visit_statuses = [ OriginVisitStatus( origin="file:///dev/zero", visit=1, date=now(), status="full", snapshot=None, ), OriginVisitStatus( origin="file:///dev/foobar", visit=2, date=now(), status="full", snapshot=None, ), OriginVisitStatus( origin="file:///tmp/spamegg", visit=3, date=now(), status="full", snapshot=None, ), OriginVisitStatus( origin="file:///dev/0002", visit=6, date=now(), status="full", snapshot=None, ), OriginVisitStatus( # will be filtered out due to its 'partial' status origin="file:///dev/0000", visit=4, date=now(), status="partial", snapshot=None, ), OriginVisitStatus( # will be filtered out due to its 'ongoing' status origin="file:///dev/0001", visit=5, date=now(), status="ongoing", snapshot=None, ), ] journal_writer.write_additions("origin_visit_status", visit_statuses) visit_statuses_full = [vs for vs in visit_statuses if vs.status == "full"] result = cli_runner.invoke( indexer_cli_group, [ "-C", swh_config, "journal-client", "--broker", kafka_server, "--prefix", kafka_prefix, "--group-id", "test-consumer", "--stop-after-objects", len(visit_statuses), "--origin-metadata-task-type", "index-origin-metadata", ], catch_exceptions=False, ) # Check the output expected_output = "Done.\n" assert result.exit_code == 0, result.output assert result.output == expected_output # Check scheduled tasks tasks = indexer_scheduler.search_tasks(task_type="index-origin-metadata") # This can be split into multiple tasks but no more than the origin-visit-statuses # written in the journal assert len(tasks) <= len(visit_statuses_full) actual_origins = [] for task in tasks: actual_task = dict(task) assert actual_task["type"] == "index-origin-metadata" scheduled_origins = actual_task["arguments"]["args"][0] actual_origins.extend(scheduled_origins) assert set(actual_origins) == {vs.origin for vs in visit_statuses_full} def test_cli_journal_client_without_brokers( cli_runner, swh_config, kafka_prefix: str, kafka_server, consumer: Consumer ): """Without brokers configuration, the cli fails.""" with pytest.raises(ValueError, match="brokers"): cli_runner.invoke( indexer_cli_group, [ "-C", swh_config, "journal-client", ], catch_exceptions=False, ) @pytest.mark.parametrize("indexer_name", ["origin-intrinsic-metadata", "*"]) def test_cli_journal_client_index( cli_runner, swh_config, kafka_prefix: str, kafka_server, consumer: Consumer, idx_storage, storage, mocker, swh_indexer_config, indexer_name: str, ): """Test the 'swh indexer journal-client' cli tool.""" journal_writer = get_journal_writer( "kafka", brokers=[kafka_server], prefix=kafka_prefix, client_id="test producer", value_sanitizer=lambda object_type, value: value, flush_timeout=3, # fail early if something is going wrong ) visit_statuses = [ OriginVisitStatus( origin="file:///dev/zero", visit=1, date=now(), status="full", snapshot=None, ), OriginVisitStatus( origin="file:///dev/foobar", visit=2, date=now(), status="full", snapshot=None, ), OriginVisitStatus( origin="file:///tmp/spamegg", visit=3, date=now(), status="full", snapshot=None, ), OriginVisitStatus( origin="file:///dev/0002", visit=6, date=now(), status="full", snapshot=None, ), OriginVisitStatus( # will be filtered out due to its 'partial' status origin="file:///dev/0000", visit=4, date=now(), status="partial", snapshot=None, ), OriginVisitStatus( # will be filtered out due to its 'ongoing' status origin="file:///dev/0001", visit=5, date=now(), status="ongoing", snapshot=None, ), ] journal_writer.write_additions("origin_visit_status", visit_statuses) visit_statuses_full = [vs for vs in visit_statuses if vs.status == "full"] storage.revision_add([REVISION]) mocker.patch( "swh.indexer.metadata.get_head_swhid", return_value=REVISION.swhid(), ) mocker.patch( "swh.indexer.metadata.DirectoryMetadataIndexer.index", return_value=[ DirectoryIntrinsicMetadataRow( id=DIRECTORY2.id, indexer_configuration_id=1, mappings=["cff"], metadata={"foo": "bar"}, ) ], ) result = cli_runner.invoke( indexer_cli_group, [ "-C", swh_config, "journal-client", indexer_name, "--broker", kafka_server, "--prefix", kafka_prefix, "--group-id", "test-consumer", "--stop-after-objects", len(visit_statuses), ], catch_exceptions=False, ) # Check the output expected_output = "Done.\n" assert result.exit_code == 0, result.output assert result.output == expected_output results = idx_storage.origin_intrinsic_metadata_get( [status.origin for status in visit_statuses] ) expected_results = [ OriginIntrinsicMetadataRow( id=status.origin, from_directory=DIRECTORY2.id, tool={"id": 1, **swh_indexer_config["tools"]}, mappings=["cff"], metadata={"foo": "bar"}, ) for status in sorted(visit_statuses_full, key=lambda r: r.origin) ] assert sorted(results, key=lambda r: r.id) == expected_results diff --git a/swh/indexer/tests/test_metadata.py b/swh/indexer/tests/test_metadata.py index 65199fd..c01b8ee 100644 --- a/swh/indexer/tests/test_metadata.py +++ b/swh/indexer/tests/test_metadata.py @@ -1,1334 +1,270 @@ # Copyright (C) 2017-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information -import json -import logging +import datetime +from unittest.mock import call -from hypothesis import HealthCheck, given, settings, strategies -import pytest +import attr -from swh.indexer.codemeta import CODEMETA_TERMS -from swh.indexer.metadata import ContentMetadataIndexer, DirectoryMetadataIndexer -from swh.indexer.metadata_detector import detect_metadata -from swh.indexer.metadata_dictionary import MAPPINGS -from swh.indexer.metadata_dictionary.maven import MavenMapping -from swh.indexer.metadata_dictionary.npm import NpmMapping -from swh.indexer.metadata_dictionary.ruby import GemspecMapping -from swh.indexer.storage.model import ContentMetadataRow, DirectoryIntrinsicMetadataRow +from swh.indexer.metadata import ( + ContentMetadataIndexer, + DirectoryMetadataIndexer, + ExtrinsicMetadataIndexer, +) +from swh.indexer.storage.model import ( + ContentMetadataRow, + DirectoryIntrinsicMetadataRow, + OriginExtrinsicMetadataRow, +) from swh.indexer.tests.utils import DIRECTORY2 -from swh.model.hashutil import hash_to_bytes -from swh.model.model import Directory, DirectoryEntry +from swh.model.model import ( + Directory, + DirectoryEntry, + MetadataAuthority, + MetadataAuthorityType, + MetadataFetcher, + RawExtrinsicMetadata, +) +from swh.model.swhids import ExtendedObjectType, ExtendedSWHID from .utils import ( BASE_TEST_CONFIG, YARN_PARSER_METADATA, fill_obj_storage, fill_storage, - json_document_strategy, - xml_document_strategy, ) TRANSLATOR_TOOL = { "name": "swh-metadata-translator", "version": "0.0.2", "configuration": {"type": "local", "context": "NpmMapping"}, } class ContentMetadataTestIndexer(ContentMetadataIndexer): """Specific Metadata whose configuration is enough to satisfy the indexing tests. """ def parse_config_file(self, *args, **kwargs): assert False, "should not be called; the dir indexer configures it." DIRECTORY_METADATA_CONFIG = { **BASE_TEST_CONFIG, "tools": TRANSLATOR_TOOL, } +REMD = RawExtrinsicMetadata( + target=ExtendedSWHID( + object_type=ExtendedObjectType.ORIGIN, + object_id=b"\x01" * 20, + ), + discovery_date=datetime.datetime.now(tz=datetime.timezone.utc), + authority=MetadataAuthority( + type=MetadataAuthorityType.FORGE, + url="https://example.org/", + ), + fetcher=MetadataFetcher( + name="example-fetcher", + version="1.0.0", + ), + format="application/vnd.github.v3+json", + metadata=b'{"full_name": "test software"}', +) + class TestMetadata: """ Tests metadata_mock_tool tool for Metadata detection """ - def setup_method(self): - self.npm_mapping = MAPPINGS["NpmMapping"]() - self.codemeta_mapping = MAPPINGS["CodemetaMapping"]() - self.maven_mapping = MAPPINGS["MavenMapping"]() - self.pkginfo_mapping = MAPPINGS["PythonPkginfoMapping"]() - self.gemspec_mapping = MAPPINGS["GemspecMapping"]() - self.cff_mapping = MAPPINGS["CffMapping"]() - - def test_compute_metadata_none(self): - """ - testing content empty content is empty - should return None - """ - # given - content = b"" - - # None if no metadata was found or an error occurred - declared_metadata = None - # when - result = self.npm_mapping.translate(content) - # then - assert declared_metadata == result - - def test_compute_metadata_cff(self): - """ - testing CITATION.cff translation - """ - # given - content = """# YAML 1.2 ---- -abstract: "Command line program to convert from Citation File \ -Format to various other formats such as BibTeX, EndNote, RIS, \ -schema.org, CodeMeta, and .zenodo.json." -authors: - - - affiliation: "Netherlands eScience Center" - family-names: Klaver - given-names: Tom - - - affiliation: "Humboldt-Universität zu Berlin" - family-names: Druskat - given-names: Stephan - orcid: https://orcid.org/0000-0003-4925-7248 -cff-version: "1.0.3" -date-released: 2019-11-12 -doi: 10.5281/zenodo.1162057 -keywords: - - "citation" - - "bibliography" - - "cff" - - "CITATION.cff" -license: Apache-2.0 -message: "If you use this software, please cite it using these metadata." -repository-code: "https://github.com/citation-file-format/cff-converter-python" -title: cffconvert -version: "1.4.0-alpha0" - """.encode( - "utf-8" - ) - - expected = { - "@context": "https://doi.org/10.5063/schema/codemeta-2.0", - "type": "SoftwareSourceCode", - "author": [ - { - "type": "Person", - "affiliation": { - "type": "Organization", - "name": "Netherlands eScience Center", - }, - "familyName": "Klaver", - "givenName": "Tom", - }, - { - "id": "https://orcid.org/0000-0003-4925-7248", - "type": "Person", - "affiliation": { - "type": "Organization", - "name": "Humboldt-Universität zu Berlin", - }, - "familyName": "Druskat", - "givenName": "Stephan", - }, - ], - "codeRepository": ( - "https://github.com/citation-file-format/cff-converter-python" - ), - "datePublished": "2019-11-12", - "description": """Command line program to convert from \ -Citation File Format to various other formats such as BibTeX, EndNote, \ -RIS, schema.org, CodeMeta, and .zenodo.json.""", - "identifier": "https://doi.org/10.5281/zenodo.1162057", - "keywords": ["citation", "bibliography", "cff", "CITATION.cff"], - "license": "https://spdx.org/licenses/Apache-2.0", - "version": "1.4.0-alpha0", - } - - # when - result = self.cff_mapping.translate(content) - # then - assert expected == result - - def test_compute_metadata_cff_invalid_yaml(self): - """ - test yaml translation for invalid yaml file - """ - # given - content = """cff-version: 1.0.3 -message: To cite the SigMF specification, please include the following: -authors: - - name: The GNU Radio Foundation, Inc. - """.encode( - "utf-8" - ) - - expected = None - - result = self.cff_mapping.translate(content) - # then - assert expected == result - - def test_compute_metadata_cff_empty(self): - """ - test yaml translation for empty yaml file - """ - # given - content = """ - """.encode( - "utf-8" - ) - - expected = None - - result = self.cff_mapping.translate(content) - # then - assert expected == result - - def test_compute_metadata_cff_list(self): - """ - test yaml translation for empty yaml file - """ - # given - content = """ -- Foo -- Bar - """.encode( - "utf-8" - ) - - expected = None - - result = self.cff_mapping.translate(content) - # then - assert expected == result - - def test_compute_metadata_npm(self): - """ - testing only computation of metadata with hard_mapping_npm - """ - # given - content = b""" - { - "name": "test_metadata", - "version": "0.0.2", - "description": "Simple package.json test for indexer", - "repository": { - "type": "git", - "url": "https://github.com/moranegg/metadata_test" - }, - "author": { - "email": "moranegg@example.com", - "name": "Morane G" - } - } - """ - declared_metadata = { - "@context": "https://doi.org/10.5063/schema/codemeta-2.0", - "type": "SoftwareSourceCode", - "name": "test_metadata", - "version": "0.0.2", - "description": "Simple package.json test for indexer", - "codeRepository": "git+https://github.com/moranegg/metadata_test", - "author": [ - { - "type": "Person", - "name": "Morane G", - "email": "moranegg@example.com", - } - ], - } - - # when - result = self.npm_mapping.translate(content) - # then - assert declared_metadata == result - - def test_compute_metadata_invalid_description_npm(self): - """ - testing only computation of metadata with hard_mapping_npm - """ - # given - content = b""" - { - "name": "test_metadata", - "version": "0.0.2", - "description": 1234 - } - """ - declared_metadata = { - "@context": "https://doi.org/10.5063/schema/codemeta-2.0", - "type": "SoftwareSourceCode", - "name": "test_metadata", - "version": "0.0.2", - } - - # when - result = self.npm_mapping.translate(content) - # then - assert declared_metadata == result - - def test_index_content_metadata_npm(self): - """ - testing NPM with package.json - - one sha1 uses a file that can't be translated to metadata and - should return None in the translated metadata - """ - # given - sha1s = [ - hash_to_bytes("26a9f72a7c87cc9205725cfd879f514ff4f3d8d5"), - hash_to_bytes("d4c647f0fc257591cc9ba1722484229780d1c607"), - hash_to_bytes("02fb2c89e14f7fab46701478c83779c7beb7b069"), - ] - # this metadata indexer computes only metadata for package.json - # in npm context with a hard mapping - config = BASE_TEST_CONFIG.copy() - config["tools"] = [TRANSLATOR_TOOL] - metadata_indexer = ContentMetadataTestIndexer(config=config) - fill_obj_storage(metadata_indexer.objstorage) - fill_storage(metadata_indexer.storage) - - # when - metadata_indexer.run(sha1s) - results = list(metadata_indexer.idx_storage.content_metadata_get(sha1s)) - - expected_results = [ - ContentMetadataRow( - id=hash_to_bytes("26a9f72a7c87cc9205725cfd879f514ff4f3d8d5"), - tool=TRANSLATOR_TOOL, - metadata={ - "@context": "https://doi.org/10.5063/schema/codemeta-2.0", - "type": "SoftwareSourceCode", - "codeRepository": "git+https://github.com/moranegg/metadata_test", - "description": "Simple package.json test for indexer", - "name": "test_metadata", - "version": "0.0.1", - }, - ), - ContentMetadataRow( - id=hash_to_bytes("d4c647f0fc257591cc9ba1722484229780d1c607"), - tool=TRANSLATOR_TOOL, - metadata={ - "@context": "https://doi.org/10.5063/schema/codemeta-2.0", - "type": "SoftwareSourceCode", - "issueTracker": "https://github.com/npm/npm/issues", - "author": [ - { - "type": "Person", - "name": "Isaac Z. Schlueter", - "email": "i@izs.me", - "url": "http://blog.izs.me", - } - ], - "codeRepository": "git+https://github.com/npm/npm", - "description": "a package manager for JavaScript", - "license": "https://spdx.org/licenses/Artistic-2.0", - "version": "5.0.3", - "name": "npm", - "keywords": [ - "install", - "modules", - "package manager", - "package.json", - ], - "url": "https://docs.npmjs.com/", - }, - ), - ] - - for result in results: - del result.tool["id"] - - # The assertion below returns False sometimes because of nested lists - assert expected_results == results - - def test_npm_bugs_normalization(self): - # valid dictionary - package_json = b"""{ - "name": "foo", - "bugs": { - "url": "https://github.com/owner/project/issues", - "email": "foo@example.com" - } - }""" - result = self.npm_mapping.translate(package_json) - assert result == { - "@context": "https://doi.org/10.5063/schema/codemeta-2.0", - "name": "foo", - "issueTracker": "https://github.com/owner/project/issues", - "type": "SoftwareSourceCode", - } - - # "invalid" dictionary - package_json = b"""{ - "name": "foo", - "bugs": { - "email": "foo@example.com" - } - }""" - result = self.npm_mapping.translate(package_json) - assert result == { - "@context": "https://doi.org/10.5063/schema/codemeta-2.0", - "name": "foo", - "type": "SoftwareSourceCode", - } - - # string - package_json = b"""{ - "name": "foo", - "bugs": "https://github.com/owner/project/issues" - }""" - result = self.npm_mapping.translate(package_json) - assert result == { - "@context": "https://doi.org/10.5063/schema/codemeta-2.0", - "name": "foo", - "issueTracker": "https://github.com/owner/project/issues", - "type": "SoftwareSourceCode", - } - - def test_npm_repository_normalization(self): - # normal - package_json = b"""{ - "name": "foo", - "repository": { - "type" : "git", - "url" : "https://github.com/npm/cli.git" - } - }""" - result = self.npm_mapping.translate(package_json) - assert result == { - "@context": "https://doi.org/10.5063/schema/codemeta-2.0", - "name": "foo", - "codeRepository": "git+https://github.com/npm/cli.git", - "type": "SoftwareSourceCode", - } - - # missing url - package_json = b"""{ - "name": "foo", - "repository": { - "type" : "git" - } - }""" - result = self.npm_mapping.translate(package_json) - assert result == { - "@context": "https://doi.org/10.5063/schema/codemeta-2.0", - "name": "foo", - "type": "SoftwareSourceCode", - } - - # github shortcut - package_json = b"""{ - "name": "foo", - "repository": "github:npm/cli" - }""" - result = self.npm_mapping.translate(package_json) - expected_result = { - "@context": "https://doi.org/10.5063/schema/codemeta-2.0", - "name": "foo", - "codeRepository": "git+https://github.com/npm/cli.git", - "type": "SoftwareSourceCode", - } - assert result == expected_result - - # github shortshortcut - package_json = b"""{ - "name": "foo", - "repository": "npm/cli" - }""" - result = self.npm_mapping.translate(package_json) - assert result == expected_result - - # gitlab shortcut - package_json = b"""{ - "name": "foo", - "repository": "gitlab:user/repo" - }""" - result = self.npm_mapping.translate(package_json) - assert result == { - "@context": "https://doi.org/10.5063/schema/codemeta-2.0", - "name": "foo", - "codeRepository": "git+https://gitlab.com/user/repo.git", - "type": "SoftwareSourceCode", - } - - @pytest.mark.parametrize( - "filename", [b"package.json", b"Package.json", b"PACKAGE.json", b"PACKAGE.JSON"] - ) - def test_detect_metadata_package_json(self, filename): - # given - df = [ - { - "sha1_git": b"abc", - "name": b"index.js", - "target": b"abc", - "length": 897, - "status": "visible", - "type": "file", - "perms": 33188, - "dir_id": b"dir_a", - "sha1": b"bcd", - }, - { - "sha1_git": b"aab", - "name": filename, - "target": b"aab", - "length": 712, - "status": "visible", - "type": "file", - "perms": 33188, - "dir_id": b"dir_a", - "sha1": b"cde", - }, - ] - # when - results = detect_metadata(df) - - expected_results = {"NpmMapping": [b"cde"]} - # then - assert expected_results == results - - def test_detect_metadata_codemeta_json_uppercase(self): - # given - df = [ - { - "sha1_git": b"abc", - "name": b"index.html", - "target": b"abc", - "length": 897, - "status": "visible", - "type": "file", - "perms": 33188, - "dir_id": b"dir_a", - "sha1": b"bcd", - }, - { - "sha1_git": b"aab", - "name": b"CODEMETA.json", - "target": b"aab", - "length": 712, - "status": "visible", - "type": "file", - "perms": 33188, - "dir_id": b"dir_a", - "sha1": b"bcd", - }, - ] - # when - results = detect_metadata(df) - - expected_results = {"CodemetaMapping": [b"bcd"]} - # then - assert expected_results == results - - def test_compute_metadata_valid_codemeta(self): - raw_content = b"""{ - "@context": "https://doi.org/10.5063/schema/codemeta-2.0", - "@type": "SoftwareSourceCode", - "identifier": "CodeMeta", - "description": "CodeMeta is a concept vocabulary that can be used to standardize the exchange of software metadata across repositories and organizations.", - "name": "CodeMeta: Minimal metadata schemas for science software and code, in JSON-LD", - "codeRepository": "https://github.com/codemeta/codemeta", - "issueTracker": "https://github.com/codemeta/codemeta/issues", - "license": "https://spdx.org/licenses/Apache-2.0", - "version": "2.0", - "author": [ - { - "@type": "Person", - "givenName": "Carl", - "familyName": "Boettiger", - "email": "cboettig@gmail.com", - "@id": "http://orcid.org/0000-0002-1642-628X" - }, - { - "@type": "Person", - "givenName": "Matthew B.", - "familyName": "Jones", - "email": "jones@nceas.ucsb.edu", - "@id": "http://orcid.org/0000-0003-0077-4738" - } - ], - "maintainer": { - "@type": "Person", - "givenName": "Carl", - "familyName": "Boettiger", - "email": "cboettig@gmail.com", - "@id": "http://orcid.org/0000-0002-1642-628X" - }, - "contIntegration": "https://travis-ci.org/codemeta/codemeta", - "developmentStatus": "active", - "downloadUrl": "https://github.com/codemeta/codemeta/archive/2.0.zip", - "funder": { - "@id": "https://doi.org/10.13039/100000001", - "@type": "Organization", - "name": "National Science Foundation" - }, - "funding":"1549758; Codemeta: A Rosetta Stone for Metadata in Scientific Software", - "keywords": [ - "metadata", - "software" - ], - "version":"2.0", - "dateCreated":"2017-06-05", - "datePublished":"2017-06-05", - "programmingLanguage": "JSON-LD" - }""" # noqa - expected_result = { - "@context": "https://doi.org/10.5063/schema/codemeta-2.0", - "type": "SoftwareSourceCode", - "identifier": "CodeMeta", - "description": "CodeMeta is a concept vocabulary that can " - "be used to standardize the exchange of software metadata " - "across repositories and organizations.", - "name": "CodeMeta: Minimal metadata schemas for science " - "software and code, in JSON-LD", - "codeRepository": "https://github.com/codemeta/codemeta", - "issueTracker": "https://github.com/codemeta/codemeta/issues", - "license": "https://spdx.org/licenses/Apache-2.0", - "version": "2.0", - "author": [ - { - "type": "Person", - "givenName": "Carl", - "familyName": "Boettiger", - "email": "cboettig@gmail.com", - "id": "http://orcid.org/0000-0002-1642-628X", - }, - { - "type": "Person", - "givenName": "Matthew B.", - "familyName": "Jones", - "email": "jones@nceas.ucsb.edu", - "id": "http://orcid.org/0000-0003-0077-4738", - }, - ], - "maintainer": { - "type": "Person", - "givenName": "Carl", - "familyName": "Boettiger", - "email": "cboettig@gmail.com", - "id": "http://orcid.org/0000-0002-1642-628X", - }, - "contIntegration": "https://travis-ci.org/codemeta/codemeta", - "developmentStatus": "active", - "downloadUrl": "https://github.com/codemeta/codemeta/archive/2.0.zip", - "funder": { - "id": "https://doi.org/10.13039/100000001", - "type": "Organization", - "name": "National Science Foundation", - }, - "funding": "1549758; Codemeta: A Rosetta Stone for Metadata " - "in Scientific Software", - "keywords": ["metadata", "software"], - "version": "2.0", - "dateCreated": "2017-06-05", - "datePublished": "2017-06-05", - "programmingLanguage": "JSON-LD", - } - result = self.codemeta_mapping.translate(raw_content) - assert result == expected_result - - def test_compute_metadata_codemeta_alternate_context(self): - raw_content = b"""{ - "@context": "https://raw.githubusercontent.com/codemeta/codemeta/master/codemeta.jsonld", - "@type": "SoftwareSourceCode", - "identifier": "CodeMeta" - }""" # noqa - expected_result = { - "@context": "https://doi.org/10.5063/schema/codemeta-2.0", - "type": "SoftwareSourceCode", - "identifier": "CodeMeta", - } - result = self.codemeta_mapping.translate(raw_content) - assert result == expected_result - - def test_compute_metadata_maven(self): - raw_content = b""" - - Maven Default Project - 4.0.0 - com.mycompany.app - my-app - 1.2.3 - - - central - Maven Repository Switchboard - default - http://repo1.maven.org/maven2 - - false - - - - - - Apache License, Version 2.0 - https://www.apache.org/licenses/LICENSE-2.0.txt - repo - A business-friendly OSS license - - - """ - result = self.maven_mapping.translate(raw_content) - assert result == { - "@context": "https://doi.org/10.5063/schema/codemeta-2.0", - "type": "SoftwareSourceCode", - "name": "Maven Default Project", - "identifier": "com.mycompany.app", - "version": "1.2.3", - "license": "https://www.apache.org/licenses/LICENSE-2.0.txt", - "codeRepository": ( - "http://repo1.maven.org/maven2/com/mycompany/app/my-app" - ), - } - - def test_compute_metadata_maven_empty(self): - raw_content = b""" - - """ - result = self.maven_mapping.translate(raw_content) - assert result == { - "@context": "https://doi.org/10.5063/schema/codemeta-2.0", - "type": "SoftwareSourceCode", - } - - def test_compute_metadata_maven_almost_empty(self): - raw_content = b""" - - - """ - result = self.maven_mapping.translate(raw_content) - assert result == { - "@context": "https://doi.org/10.5063/schema/codemeta-2.0", - "type": "SoftwareSourceCode", - } - - def test_compute_metadata_maven_invalid_xml(self, caplog): - expected_warning = ( - "swh.indexer.metadata_dictionary.maven.MavenMapping", - logging.WARNING, - "Error parsing XML from foo", - ) - caplog.at_level(logging.WARNING, logger="swh.indexer.metadata_dictionary") - - raw_content = b""" - """ - caplog.clear() - result = MAPPINGS["MavenMapping"]("foo").translate(raw_content) - assert caplog.record_tuples == [expected_warning] - assert result is None - - raw_content = b""" - """ - caplog.clear() - result = MAPPINGS["MavenMapping"]("foo").translate(raw_content) - assert caplog.record_tuples == [expected_warning] - assert result is None - - def test_compute_metadata_maven_unknown_encoding(self, caplog): - expected_warning = ( - "swh.indexer.metadata_dictionary.maven.MavenMapping", - logging.WARNING, - "Error detecting XML encoding from foo", - ) - caplog.at_level(logging.WARNING, logger="swh.indexer.metadata_dictionary") - - raw_content = b""" - - """ - caplog.clear() - result = MAPPINGS["MavenMapping"]("foo").translate(raw_content) - assert caplog.record_tuples == [expected_warning] - assert result is None - - raw_content = b""" - - """ - caplog.clear() - result = MAPPINGS["MavenMapping"]("foo").translate(raw_content) - assert caplog.record_tuples == [expected_warning] - assert result is None - - def test_compute_metadata_maven_invalid_encoding(self, caplog): - expected_warning = [ - # libexpat1 <= 2.2.10-2+deb11u1 - [ - ( - "swh.indexer.metadata_dictionary.maven.MavenMapping", - logging.WARNING, - "Error unidecoding XML from foo", - ) - ], - # libexpat1 >= 2.2.10-2+deb11u2 - [ - ( - "swh.indexer.metadata_dictionary.maven.MavenMapping", - logging.WARNING, - "Error parsing XML from foo", - ) - ], - ] - caplog.at_level(logging.WARNING, logger="swh.indexer.metadata_dictionary") - - raw_content = b""" - - """ - caplog.clear() - result = MAPPINGS["MavenMapping"]("foo").translate(raw_content) - assert caplog.record_tuples in expected_warning - assert result is None - - def test_compute_metadata_maven_minimal(self): - raw_content = b""" - - Maven Default Project - 4.0.0 - com.mycompany.app - my-app - 1.2.3 - """ - result = self.maven_mapping.translate(raw_content) - assert result == { - "@context": "https://doi.org/10.5063/schema/codemeta-2.0", - "type": "SoftwareSourceCode", - "name": "Maven Default Project", - "identifier": "com.mycompany.app", - "version": "1.2.3", - "codeRepository": ( - "https://repo.maven.apache.org/maven2/com/mycompany/app/my-app" - ), - } - - def test_compute_metadata_maven_empty_nodes(self): - raw_content = b""" - - Maven Default Project - 4.0.0 - com.mycompany.app - my-app - 1.2.3 - - - """ - result = self.maven_mapping.translate(raw_content) - assert result == { - "@context": "https://doi.org/10.5063/schema/codemeta-2.0", - "type": "SoftwareSourceCode", - "name": "Maven Default Project", - "identifier": "com.mycompany.app", - "version": "1.2.3", - "codeRepository": ( - "https://repo.maven.apache.org/maven2/com/mycompany/app/my-app" - ), - } - - raw_content = b""" - - Maven Default Project - 4.0.0 - com.mycompany.app - my-app - - """ - result = self.maven_mapping.translate(raw_content) - assert result == { - "@context": "https://doi.org/10.5063/schema/codemeta-2.0", - "type": "SoftwareSourceCode", - "name": "Maven Default Project", - "identifier": "com.mycompany.app", - "codeRepository": ( - "https://repo.maven.apache.org/maven2/com/mycompany/app/my-app" - ), - } - - raw_content = b""" - - - 4.0.0 - com.mycompany.app - my-app - 1.2.3 - """ - result = self.maven_mapping.translate(raw_content) - assert result == { - "@context": "https://doi.org/10.5063/schema/codemeta-2.0", - "type": "SoftwareSourceCode", - "identifier": "com.mycompany.app", - "version": "1.2.3", - "codeRepository": ( - "https://repo.maven.apache.org/maven2/com/mycompany/app/my-app" - ), - } - - raw_content = b""" - - Maven Default Project - 4.0.0 - com.mycompany.app - my-app - 1.2.3 - - - """ - result = self.maven_mapping.translate(raw_content) - assert result == { - "@context": "https://doi.org/10.5063/schema/codemeta-2.0", - "type": "SoftwareSourceCode", - "name": "Maven Default Project", - "identifier": "com.mycompany.app", - "version": "1.2.3", - "codeRepository": ( - "https://repo.maven.apache.org/maven2/com/mycompany/app/my-app" - ), - } - - raw_content = b""" - - - 1.2.3 - """ - result = self.maven_mapping.translate(raw_content) - assert result == { - "@context": "https://doi.org/10.5063/schema/codemeta-2.0", - "type": "SoftwareSourceCode", - "version": "1.2.3", - } - - def test_compute_metadata_maven_invalid_licenses(self): - raw_content = b""" - - Maven Default Project - 4.0.0 - com.mycompany.app - my-app - 1.2.3 - - foo - - """ - result = self.maven_mapping.translate(raw_content) - assert result == { - "@context": "https://doi.org/10.5063/schema/codemeta-2.0", - "type": "SoftwareSourceCode", - "name": "Maven Default Project", - "identifier": "com.mycompany.app", - "version": "1.2.3", - "codeRepository": ( - "https://repo.maven.apache.org/maven2/com/mycompany/app/my-app" - ), - } - - def test_compute_metadata_maven_multiple(self): - """Tests when there are multiple code repos and licenses.""" - raw_content = b""" - - Maven Default Project - 4.0.0 - com.mycompany.app - my-app - 1.2.3 - - - central - Maven Repository Switchboard - default - http://repo1.maven.org/maven2 - - false - - - - example - Example Maven Repo - default - http://example.org/maven2 - - - - - Apache License, Version 2.0 - https://www.apache.org/licenses/LICENSE-2.0.txt - repo - A business-friendly OSS license - - - MIT license - https://opensource.org/licenses/MIT - - - """ - result = self.maven_mapping.translate(raw_content) - assert result == { - "@context": "https://doi.org/10.5063/schema/codemeta-2.0", - "type": "SoftwareSourceCode", - "name": "Maven Default Project", - "identifier": "com.mycompany.app", - "version": "1.2.3", - "license": [ - "https://www.apache.org/licenses/LICENSE-2.0.txt", - "https://opensource.org/licenses/MIT", - ], - "codeRepository": [ - "http://repo1.maven.org/maven2/com/mycompany/app/my-app", - "http://example.org/maven2/com/mycompany/app/my-app", - ], - } - - def test_compute_metadata_pkginfo(self): - raw_content = b"""\ -Metadata-Version: 2.1 -Name: swh.core -Version: 0.0.49 -Summary: Software Heritage core utilities -Home-page: https://forge.softwareheritage.org/diffusion/DCORE/ -Author: Software Heritage developers -Author-email: swh-devel@inria.fr -License: UNKNOWN -Project-URL: Bug Reports, https://forge.softwareheritage.org/maniphest -Project-URL: Funding, https://www.softwareheritage.org/donate -Project-URL: Source, https://forge.softwareheritage.org/source/swh-core -Description: swh-core - ======== - \x20 - core library for swh's modules: - - config parser - - hash computations - - serialization - - logging mechanism - \x20 -Platform: UNKNOWN -Classifier: Programming Language :: Python :: 3 -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3) -Classifier: Operating System :: OS Independent -Classifier: Development Status :: 5 - Production/Stable -Description-Content-Type: text/markdown -Provides-Extra: testing -""" # noqa - result = self.pkginfo_mapping.translate(raw_content) - assert result["description"] == [ - "Software Heritage core utilities", # note the comma here - "swh-core\n" - "========\n" - "\n" - "core library for swh's modules:\n" - "- config parser\n" - "- hash computations\n" - "- serialization\n" - "- logging mechanism\n" - "", - ], result - del result["description"] - assert result == { - "@context": "https://doi.org/10.5063/schema/codemeta-2.0", - "type": "SoftwareSourceCode", - "url": "https://forge.softwareheritage.org/diffusion/DCORE/", - "name": "swh.core", - "author": [ - { - "type": "Person", - "name": "Software Heritage developers", - "email": "swh-devel@inria.fr", - } - ], - "version": "0.0.49", - } - - def test_compute_metadata_pkginfo_utf8(self): - raw_content = b"""\ -Metadata-Version: 1.1 -Name: snowpyt -Description-Content-Type: UNKNOWN -Description: foo - Hydrology N\xc2\xb083 -""" # noqa - result = self.pkginfo_mapping.translate(raw_content) - assert result == { - "@context": "https://doi.org/10.5063/schema/codemeta-2.0", - "type": "SoftwareSourceCode", - "name": "snowpyt", - "description": "foo\nHydrology N°83", - } - - def test_compute_metadata_pkginfo_keywords(self): - raw_content = b"""\ -Metadata-Version: 2.1 -Name: foo -Keywords: foo bar baz -""" # noqa - result = self.pkginfo_mapping.translate(raw_content) - assert result == { - "@context": "https://doi.org/10.5063/schema/codemeta-2.0", - "type": "SoftwareSourceCode", - "name": "foo", - "keywords": ["foo", "bar", "baz"], - } - - def test_compute_metadata_pkginfo_license(self): - raw_content = b"""\ -Metadata-Version: 2.1 -Name: foo -License: MIT -""" # noqa - result = self.pkginfo_mapping.translate(raw_content) - assert result == { - "@context": "https://doi.org/10.5063/schema/codemeta-2.0", - "type": "SoftwareSourceCode", - "name": "foo", - "license": "MIT", - } - - def test_gemspec_base(self): - raw_content = b""" -Gem::Specification.new do |s| - s.name = 'example' - s.version = '0.1.0' - s.licenses = ['MIT'] - s.summary = "This is an example!" - s.description = "Much longer explanation of the example!" - s.authors = ["Ruby Coder"] - s.email = 'rubycoder@example.com' - s.files = ["lib/example.rb"] - s.homepage = 'https://rubygems.org/gems/example' - s.metadata = { "source_code_uri" => "https://github.com/example/example" } -end""" - result = self.gemspec_mapping.translate(raw_content) - assert set(result.pop("description")) == { - "This is an example!", - "Much longer explanation of the example!", - } - assert result == { - "@context": "https://doi.org/10.5063/schema/codemeta-2.0", - "type": "SoftwareSourceCode", - "author": [{"type": "Person", "name": "Ruby Coder"}], - "name": "example", - "license": "https://spdx.org/licenses/MIT", - "codeRepository": "https://rubygems.org/gems/example", - "email": "rubycoder@example.com", - "version": "0.1.0", - } - - def test_gemspec_two_author_fields(self): - raw_content = b""" -Gem::Specification.new do |s| - s.authors = ["Ruby Coder1"] - s.author = "Ruby Coder2" -end""" - result = self.gemspec_mapping.translate(raw_content) - assert result.pop("author") in ( - [ - {"type": "Person", "name": "Ruby Coder1"}, - {"type": "Person", "name": "Ruby Coder2"}, - ], - [ - {"type": "Person", "name": "Ruby Coder2"}, - {"type": "Person", "name": "Ruby Coder1"}, - ], - ) - assert result == { - "@context": "https://doi.org/10.5063/schema/codemeta-2.0", - "type": "SoftwareSourceCode", - } - - def test_gemspec_invalid_author(self): - raw_content = b""" -Gem::Specification.new do |s| - s.author = ["Ruby Coder"] -end""" - result = self.gemspec_mapping.translate(raw_content) - assert result == { - "@context": "https://doi.org/10.5063/schema/codemeta-2.0", - "type": "SoftwareSourceCode", - } - raw_content = b""" -Gem::Specification.new do |s| - s.author = "Ruby Coder1", -end""" - result = self.gemspec_mapping.translate(raw_content) - assert result == { - "@context": "https://doi.org/10.5063/schema/codemeta-2.0", - "type": "SoftwareSourceCode", - } - raw_content = b""" -Gem::Specification.new do |s| - s.authors = ["Ruby Coder1", ["Ruby Coder2"]] -end""" - result = self.gemspec_mapping.translate(raw_content) - assert result == { - "@context": "https://doi.org/10.5063/schema/codemeta-2.0", - "type": "SoftwareSourceCode", - "author": [{"type": "Person", "name": "Ruby Coder1"}], - } - - def test_gemspec_alternative_header(self): - raw_content = b""" -require './lib/version' - -Gem::Specification.new { |s| - s.name = 'rb-system-with-aliases' - s.summary = 'execute system commands with aliases' -} -""" - result = self.gemspec_mapping.translate(raw_content) - assert result == { - "@context": "https://doi.org/10.5063/schema/codemeta-2.0", - "type": "SoftwareSourceCode", - "name": "rb-system-with-aliases", - "description": "execute system commands with aliases", - } - - @settings(suppress_health_check=[HealthCheck.too_slow]) - @given(json_document_strategy(keys=list(NpmMapping.mapping))) - def test_npm_adversarial(self, doc): - raw = json.dumps(doc).encode() - self.npm_mapping.translate(raw) - - @settings(suppress_health_check=[HealthCheck.too_slow]) - @given(json_document_strategy(keys=CODEMETA_TERMS)) - def test_codemeta_adversarial(self, doc): - raw = json.dumps(doc).encode() - self.codemeta_mapping.translate(raw) - - @settings(suppress_health_check=[HealthCheck.too_slow]) - @given( - xml_document_strategy( - keys=list(MavenMapping.mapping), - root="project", - xmlns="http://maven.apache.org/POM/4.0.0", - ) - ) - def test_maven_adversarial(self, doc): - self.maven_mapping.translate(doc) - - @settings(suppress_health_check=[HealthCheck.too_slow]) - @given( - strategies.dictionaries( - # keys - strategies.one_of( - strategies.text(), *map(strategies.just, GemspecMapping.mapping) - ), - # values - strategies.recursive( - strategies.characters(), - lambda children: strategies.lists(children, min_size=1), - ), - ) - ) - def test_gemspec_adversarial(self, doc): - parts = [b"Gem::Specification.new do |s|\n"] - for (k, v) in doc.items(): - parts.append(" s.{} = {}\n".format(k, repr(v)).encode()) - parts.append(b"end\n") - self.gemspec_mapping.translate(b"".join(parts)) - def test_directory_metadata_indexer(self): metadata_indexer = DirectoryMetadataIndexer(config=DIRECTORY_METADATA_CONFIG) fill_obj_storage(metadata_indexer.objstorage) fill_storage(metadata_indexer.storage) tool = metadata_indexer.idx_storage.indexer_configuration_get( {f"tool_{k}": v for (k, v) in TRANSLATOR_TOOL.items()} ) assert tool is not None dir_ = DIRECTORY2 metadata_indexer.idx_storage.content_metadata_add( [ ContentMetadataRow( id=DIRECTORY2.entries[0].target, indexer_configuration_id=tool["id"], metadata=YARN_PARSER_METADATA, ) ] ) metadata_indexer.run([dir_.id]) results = list( metadata_indexer.idx_storage.directory_intrinsic_metadata_get( [DIRECTORY2.id] ) ) expected_results = [ DirectoryIntrinsicMetadataRow( id=dir_.id, tool=TRANSLATOR_TOOL, metadata=YARN_PARSER_METADATA, mappings=["npm"], ) ] for result in results: del result.tool["id"] - # then assert results == expected_results def test_directory_metadata_indexer_single_root_dir(self): metadata_indexer = DirectoryMetadataIndexer(config=DIRECTORY_METADATA_CONFIG) fill_obj_storage(metadata_indexer.objstorage) fill_storage(metadata_indexer.storage) # Add a parent directory, that is the only directory at the root # of the directory dir_ = DIRECTORY2 new_dir = Directory( entries=( DirectoryEntry( name=b"foobar-1.0.0", type="dir", target=dir_.id, perms=16384, ), ), ) assert new_dir.id is not None metadata_indexer.storage.directory_add([new_dir]) tool = metadata_indexer.idx_storage.indexer_configuration_get( {f"tool_{k}": v for (k, v) in TRANSLATOR_TOOL.items()} ) assert tool is not None metadata_indexer.idx_storage.content_metadata_add( [ ContentMetadataRow( id=DIRECTORY2.entries[0].target, indexer_configuration_id=tool["id"], metadata=YARN_PARSER_METADATA, ) ] ) metadata_indexer.run([new_dir.id]) results = list( metadata_indexer.idx_storage.directory_intrinsic_metadata_get([new_dir.id]) ) expected_results = [ DirectoryIntrinsicMetadataRow( id=new_dir.id, tool=TRANSLATOR_TOOL, metadata=YARN_PARSER_METADATA, mappings=["npm"], ) ] for result in results: del result.tool["id"] - # then assert results == expected_results + + def test_extrinsic_metadata_indexer_unknown_format(self, mocker): + """Should be ignored when unknown format""" + metadata_indexer = ExtrinsicMetadataIndexer(config=DIRECTORY_METADATA_CONFIG) + metadata_indexer.storage = mocker.patch.object(metadata_indexer, "storage") + + remd = attr.evolve(REMD, format="unknown format") + + results = metadata_indexer.index(remd.id, data=remd) + + assert metadata_indexer.storage.method_calls == [] + assert results == [] + + def test_extrinsic_metadata_indexer_github(self, mocker): + """Nominal case, calling the mapping and storing the result""" + origin = "https://example.org/jdoe/myrepo" + + metadata_indexer = ExtrinsicMetadataIndexer(config=DIRECTORY_METADATA_CONFIG) + metadata_indexer.catch_exceptions = False + metadata_indexer.storage = mocker.patch.object(metadata_indexer, "storage") + metadata_indexer.storage.origin_get_by_sha1.return_value = [{"url": origin}] + + tool = metadata_indexer.idx_storage.indexer_configuration_get( + {f"tool_{k}": v for (k, v) in TRANSLATOR_TOOL.items()} + ) + assert tool is not None + + assert metadata_indexer.process_journal_objects( + {"raw_extrinsic_metadata": [REMD.to_dict()]} + ) == {"status": "eventful", "origin_extrinsic_metadata:add": 1} + + assert metadata_indexer.storage.method_calls == [ + call.origin_get_by_sha1([b"\x01" * 20]) + ] + + results = list( + metadata_indexer.idx_storage.origin_extrinsic_metadata_get([origin]) + ) + assert results == [ + OriginExtrinsicMetadataRow( + id="https://example.org/jdoe/myrepo", + tool={"id": tool["id"], **TRANSLATOR_TOOL}, + metadata={ + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "type": "https://forgefed.org/ns#Repository", + "name": "test software", + }, + from_remd_id=REMD.id, + mappings=["GitHubMapping"], + ) + ] + + def test_extrinsic_metadata_indexer_nonforge_authority(self, mocker): + """Early abort on non-forge authorities""" + metadata_indexer = ExtrinsicMetadataIndexer(config=DIRECTORY_METADATA_CONFIG) + metadata_indexer.storage = mocker.patch.object(metadata_indexer, "storage") + + remd = attr.evolve( + REMD, + authority=attr.evolve(REMD.authority, type=MetadataAuthorityType.REGISTRY), + ) + + results = metadata_indexer.index(remd.id, data=remd) + + assert metadata_indexer.storage.method_calls == [] + assert results == [] + + def test_extrinsic_metadata_indexer_thirdparty_authority(self, mocker): + """Should be ignored when authority URL does not match the origin""" + + origin = "https://different-domain.example.org/jdoe/myrepo" + + metadata_indexer = ExtrinsicMetadataIndexer(config=DIRECTORY_METADATA_CONFIG) + metadata_indexer.catch_exceptions = False + metadata_indexer.storage = mocker.patch.object(metadata_indexer, "storage") + metadata_indexer.storage.origin_get_by_sha1.return_value = [{"url": origin}] + + tool = metadata_indexer.idx_storage.indexer_configuration_get( + {f"tool_{k}": v for (k, v) in TRANSLATOR_TOOL.items()} + ) + assert tool is not None + + results = metadata_indexer.index(REMD.id, data=REMD) + + assert metadata_indexer.storage.method_calls == [ + call.origin_get_by_sha1([b"\x01" * 20]) + ] + assert results == [] diff --git a/swh/indexer/tests/zz_celery/README b/swh/indexer/tests/zz_celery/README new file mode 100644 index 0000000..2a72132 --- /dev/null +++ b/swh/indexer/tests/zz_celery/README @@ -0,0 +1,2 @@ +this directory is named "zz_celery" so pytest runs it last, to prevent +Celery-related fixtures from interfering with other tests diff --git a/swh/indexer/tests/zz_celery/__init__.py b/swh/indexer/tests/zz_celery/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/swh/indexer/tests/test_tasks.py b/swh/indexer/tests/zz_celery/test_tasks.py similarity index 100% rename from swh/indexer/tests/test_tasks.py rename to swh/indexer/tests/zz_celery/test_tasks.py