diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index bce4ef54..1a2dcf9c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,57 +1,57 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v2.4.0 hooks: - id: trailing-whitespace - id: check-json - id: check-yaml - repo: https://gitlab.com/pycqa/flake8 rev: 3.8.3 hooks: - id: flake8 - repo: https://github.com/codespell-project/codespell rev: v1.16.0 hooks: - id: codespell exclude: TODO - args: [-L iff] + args: [-L iff, -L gae] - repo: local hooks: - id: mypy name: mypy entry: mypy args: [swh] pass_filenames: false language: system types: [python] - id: check-bumped-dbversion name: check-bumped-dbversion files: 'sql/upgrades/.*\.sql' entry: grep args: ['insert into dbversion'] language: system - repo: https://github.com/PyCQA/isort rev: 5.5.2 hooks: - id: isort - repo: https://github.com/python/black rev: 19.10b0 hooks: - id: black # unfortunately, we are far from being able to enable this... #- repo: https://github.com/PyCQA/pydocstyle.git # rev: 4.0.0 # hooks: # - id: pydocstyle # name: pydocstyle # description: pydocstyle is a static analysis tool for checking compliance with Python docstring conventions. # entry: pydocstyle --convention=google # language: python # types: [python] diff --git a/PKG-INFO b/PKG-INFO index 04d961ee..1d88a693 100644 --- a/PKG-INFO +++ b/PKG-INFO @@ -1,213 +1,213 @@ Metadata-Version: 2.1 Name: swh.storage -Version: 0.16.0 +Version: 0.17.0 Summary: Software Heritage storage manager Home-page: https://forge.softwareheritage.org/diffusion/DSTO/ Author: Software Heritage developers Author-email: swh-devel@inria.fr License: UNKNOWN Project-URL: Bug Reports, https://forge.softwareheritage.org/maniphest Project-URL: Funding, https://www.softwareheritage.org/donate Project-URL: Source, https://forge.softwareheritage.org/source/swh-storage Project-URL: Documentation, https://docs.softwareheritage.org/devel/swh-storage/ Description: swh-storage =========== Abstraction layer over the archive, allowing to access all stored source code artifacts as well as their metadata. See the [documentation](https://docs.softwareheritage.org/devel/swh-storage/index.html) for more details. ## Quick start ### Dependencies Python tests for this module include tests that cannot be run without a local Postgresql database, so you need the Postgresql server executable on your machine (no need to have a running Postgresql server). They also expect a cassandra server. #### Debian-like host ``` $ sudo apt install libpq-dev postgresql-11 cassandra ``` #### Non Debian-like host The tests expects the path to `cassandra` to either be unspecified, it is then looked up at `/usr/sbin/cassandra`, either specified through the environment variable `SWH_CASSANDRA_BIN`. Optionally, you can avoid running the cassandra tests. ``` (swh) :~/swh-storage$ tox -- -m 'not cassandra' ``` ### Installation It is strongly recommended to use a virtualenv. In the following, we consider you work in a virtualenv named `swh`. See the [developer setup guide](https://docs.softwareheritage.org/devel/developer-setup.html#developer-setup) for a more details on how to setup a working environment. You can install the package directly from [pypi](https://pypi.org/p/swh.storage): ``` (swh) :~$ pip install swh.storage [...] ``` Or from sources: ``` (swh) :~$ git clone https://forge.softwareheritage.org/source/swh-storage.git [...] (swh) :~$ cd swh-storage (swh) :~/swh-storage$ pip install . [...] ``` Then you can check it's properly installed: ``` (swh) :~$ swh storage --help Usage: swh storage [OPTIONS] COMMAND [ARGS]... Software Heritage Storage tools. Options: -h, --help Show this message and exit. Commands: rpc-serve Software Heritage Storage RPC server. ``` ## Tests The best way of running Python tests for this module is to use [tox](https://tox.readthedocs.io/). ``` (swh) :~$ pip install tox ``` ### tox From the sources directory, simply use tox: ``` (swh) :~/swh-storage$ tox [...] ========= 315 passed, 6 skipped, 15 warnings in 40.86 seconds ========== _______________________________ summary ________________________________ flake8: commands succeeded py3: commands succeeded congratulations :) ``` ## Development The storage server can be locally started. It requires a configuration file and a running Postgresql database. ### Sample configuration A typical configuration `storage.yml` file is: ``` storage: cls: local db: "dbname=softwareheritage-dev user= password=" objstorage: cls: pathslicing root: /tmp/swh-storage/ slicing: 0:2/2:4/4:6 ``` which means, this uses: - a local storage instance whose db connection is to `softwareheritage-dev` local instance, - the objstorage uses a local objstorage instance whose: - `root` path is /tmp/swh-storage, - slicing scheme is `0:2/2:4/4:6`. This means that the identifier of the content (sha1) which will be stored on disk at first level with the first 2 hex characters, the second level with the next 2 hex characters and the third level with the next 2 hex characters. And finally the complete hash file holding the raw content. For example: 00062f8bd330715c4f819373653d97b3cd34394c will be stored at 00/06/2f/00062f8bd330715c4f819373653d97b3cd34394c Note that the `root` path should exist on disk before starting the server. ### Starting the storage server If the python package has been properly installed (e.g. in a virtual env), you should be able to use the command: ``` (swh) :~/swh-storage$ swh storage rpc-serve storage.yml ``` This runs a local swh-storage api at 5002 port. ``` (swh) :~/swh-storage$ curl http://127.0.0.1:5002 Software Heritage storage server

You have reached the Software Heritage storage server.
See its documentation and API for more information

``` ### And then what? In your upper layer ([loader-git](https://forge.softwareheritage.org/source/swh-loader-git/), [loader-svn](https://forge.softwareheritage.org/source/swh-loader-svn/), etc...), you can define a remote storage with this snippet of yaml configuration. ``` storage: cls: remote url: http://localhost:5002/ ``` You could directly define a local storage with the following snippet: ``` storage: cls: local db: service=swh-dev objstorage: cls: pathslicing root: /home/storage/swh-storage/ slicing: 0:2/2:4/4:6 ``` Platform: UNKNOWN Classifier: Programming Language :: Python :: 3 Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3) Classifier: Operating System :: OS Independent Classifier: Development Status :: 5 - Production/Stable Requires-Python: >=3.7 Description-Content-Type: text/markdown Provides-Extra: testing Provides-Extra: schemata Provides-Extra: journal diff --git a/requirements-swh.txt b/requirements-swh.txt index 6d8a7806..bf6b6d59 100644 --- a/requirements-swh.txt +++ b/requirements-swh.txt @@ -1,3 +1,3 @@ -swh.core[db,http] >= 0.3 -swh.model >= 0.6.6 +swh.core[db,http] >= 0.5 +swh.model >= 0.7.2 swh.objstorage >= 0.2.2 diff --git a/sql/upgrades/164.sql b/sql/upgrades/164.sql new file mode 100644 index 00000000..7502af30 --- /dev/null +++ b/sql/upgrades/164.sql @@ -0,0 +1,9 @@ +-- SWH DB schema upgrade +-- from_version: 163 +-- to_version: 164 +-- description: rename raw_extrinsic_metadata.id to raw_extrinsic_metadata.target + +insert into dbversion(version, release, description) + values(164, now(), 'Work In Progress'); + +alter table raw_extrinsic_metadata rename id to target; diff --git a/swh.storage.egg-info/PKG-INFO b/swh.storage.egg-info/PKG-INFO index 04d961ee..1d88a693 100644 --- a/swh.storage.egg-info/PKG-INFO +++ b/swh.storage.egg-info/PKG-INFO @@ -1,213 +1,213 @@ Metadata-Version: 2.1 Name: swh.storage -Version: 0.16.0 +Version: 0.17.0 Summary: Software Heritage storage manager Home-page: https://forge.softwareheritage.org/diffusion/DSTO/ Author: Software Heritage developers Author-email: swh-devel@inria.fr License: UNKNOWN Project-URL: Bug Reports, https://forge.softwareheritage.org/maniphest Project-URL: Funding, https://www.softwareheritage.org/donate Project-URL: Source, https://forge.softwareheritage.org/source/swh-storage Project-URL: Documentation, https://docs.softwareheritage.org/devel/swh-storage/ Description: swh-storage =========== Abstraction layer over the archive, allowing to access all stored source code artifacts as well as their metadata. See the [documentation](https://docs.softwareheritage.org/devel/swh-storage/index.html) for more details. ## Quick start ### Dependencies Python tests for this module include tests that cannot be run without a local Postgresql database, so you need the Postgresql server executable on your machine (no need to have a running Postgresql server). They also expect a cassandra server. #### Debian-like host ``` $ sudo apt install libpq-dev postgresql-11 cassandra ``` #### Non Debian-like host The tests expects the path to `cassandra` to either be unspecified, it is then looked up at `/usr/sbin/cassandra`, either specified through the environment variable `SWH_CASSANDRA_BIN`. Optionally, you can avoid running the cassandra tests. ``` (swh) :~/swh-storage$ tox -- -m 'not cassandra' ``` ### Installation It is strongly recommended to use a virtualenv. In the following, we consider you work in a virtualenv named `swh`. See the [developer setup guide](https://docs.softwareheritage.org/devel/developer-setup.html#developer-setup) for a more details on how to setup a working environment. You can install the package directly from [pypi](https://pypi.org/p/swh.storage): ``` (swh) :~$ pip install swh.storage [...] ``` Or from sources: ``` (swh) :~$ git clone https://forge.softwareheritage.org/source/swh-storage.git [...] (swh) :~$ cd swh-storage (swh) :~/swh-storage$ pip install . [...] ``` Then you can check it's properly installed: ``` (swh) :~$ swh storage --help Usage: swh storage [OPTIONS] COMMAND [ARGS]... Software Heritage Storage tools. Options: -h, --help Show this message and exit. Commands: rpc-serve Software Heritage Storage RPC server. ``` ## Tests The best way of running Python tests for this module is to use [tox](https://tox.readthedocs.io/). ``` (swh) :~$ pip install tox ``` ### tox From the sources directory, simply use tox: ``` (swh) :~/swh-storage$ tox [...] ========= 315 passed, 6 skipped, 15 warnings in 40.86 seconds ========== _______________________________ summary ________________________________ flake8: commands succeeded py3: commands succeeded congratulations :) ``` ## Development The storage server can be locally started. It requires a configuration file and a running Postgresql database. ### Sample configuration A typical configuration `storage.yml` file is: ``` storage: cls: local db: "dbname=softwareheritage-dev user= password=" objstorage: cls: pathslicing root: /tmp/swh-storage/ slicing: 0:2/2:4/4:6 ``` which means, this uses: - a local storage instance whose db connection is to `softwareheritage-dev` local instance, - the objstorage uses a local objstorage instance whose: - `root` path is /tmp/swh-storage, - slicing scheme is `0:2/2:4/4:6`. This means that the identifier of the content (sha1) which will be stored on disk at first level with the first 2 hex characters, the second level with the next 2 hex characters and the third level with the next 2 hex characters. And finally the complete hash file holding the raw content. For example: 00062f8bd330715c4f819373653d97b3cd34394c will be stored at 00/06/2f/00062f8bd330715c4f819373653d97b3cd34394c Note that the `root` path should exist on disk before starting the server. ### Starting the storage server If the python package has been properly installed (e.g. in a virtual env), you should be able to use the command: ``` (swh) :~/swh-storage$ swh storage rpc-serve storage.yml ``` This runs a local swh-storage api at 5002 port. ``` (swh) :~/swh-storage$ curl http://127.0.0.1:5002 Software Heritage storage server

You have reached the Software Heritage storage server.
See its documentation and API for more information

``` ### And then what? In your upper layer ([loader-git](https://forge.softwareheritage.org/source/swh-loader-git/), [loader-svn](https://forge.softwareheritage.org/source/swh-loader-svn/), etc...), you can define a remote storage with this snippet of yaml configuration. ``` storage: cls: remote url: http://localhost:5002/ ``` You could directly define a local storage with the following snippet: ``` storage: cls: local db: service=swh-dev objstorage: cls: pathslicing root: /home/storage/swh-storage/ slicing: 0:2/2:4/4:6 ``` Platform: UNKNOWN Classifier: Programming Language :: Python :: 3 Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3) Classifier: Operating System :: OS Independent Classifier: Development Status :: 5 - Production/Stable Requires-Python: >=3.7 Description-Content-Type: text/markdown Provides-Extra: testing Provides-Extra: schemata Provides-Extra: journal diff --git a/swh.storage.egg-info/SOURCES.txt b/swh.storage.egg-info/SOURCES.txt index 5c228a27..9c7291fd 100644 --- a/swh.storage.egg-info/SOURCES.txt +++ b/swh.storage.egg-info/SOURCES.txt @@ -1,312 +1,313 @@ .gitignore .pre-commit-config.yaml AUTHORS CODE_OF_CONDUCT.md CONTRIBUTORS LICENSE MANIFEST.in Makefile Makefile.local README.md conftest.py mypy.ini pyproject.toml pytest.ini requirements-swh-journal.txt requirements-swh.txt requirements-test.txt requirements.txt setup.cfg setup.py tox.ini ./requirements-swh-journal.txt ./requirements-swh.txt ./requirements-test.txt ./requirements.txt bin/swh-storage-add-dir docs/.gitignore docs/Makefile docs/Makefile.local docs/archive-copies.rst docs/conf.py docs/extrinsic-metadata-specification.rst docs/index.rst docs/sql-storage.rst docs/_static/.placeholder docs/_templates/.placeholder docs/images/.gitignore docs/images/Makefile docs/images/swh-archive-copies.dia sql/.gitignore sql/Makefile sql/TODO sql/clusters.dot sql/bin/db-upgrade sql/bin/dot_add_content sql/doc/json sql/doc/json/.gitignore sql/doc/json/Makefile sql/doc/json/entity.lister_metadata.schema.json sql/doc/json/entity.metadata.schema.json sql/doc/json/entity_history.lister_metadata.schema.json sql/doc/json/entity_history.metadata.schema.json sql/doc/json/fetch_history.result.schema.json sql/doc/json/list_history.result.schema.json sql/doc/json/listable_entity.list_params.schema.json sql/doc/json/origin_visit.metadata.json sql/doc/json/tool.tool_configuration.schema.json sql/json/.gitignore sql/json/Makefile sql/json/entity.lister_metadata.schema.json sql/json/entity.metadata.schema.json sql/json/entity_history.lister_metadata.schema.json sql/json/entity_history.metadata.schema.json sql/json/fetch_history.result.schema.json sql/json/list_history.result.schema.json sql/json/listable_entity.list_params.schema.json sql/json/origin_visit.metadata.json sql/json/tool.tool_configuration.schema.json sql/upgrades/015.sql sql/upgrades/016.sql sql/upgrades/017.sql sql/upgrades/018.sql sql/upgrades/019.sql sql/upgrades/020.sql sql/upgrades/021.sql sql/upgrades/022.sql sql/upgrades/023.sql sql/upgrades/024.sql sql/upgrades/025.sql sql/upgrades/026.sql sql/upgrades/027.sql sql/upgrades/028.sql sql/upgrades/029.sql sql/upgrades/030.sql sql/upgrades/032.sql sql/upgrades/033.sql sql/upgrades/034.sql sql/upgrades/035.sql sql/upgrades/036.sql sql/upgrades/037.sql sql/upgrades/038.sql sql/upgrades/039.sql sql/upgrades/040.sql sql/upgrades/041.sql sql/upgrades/042.sql sql/upgrades/043.sql sql/upgrades/044.sql sql/upgrades/045.sql sql/upgrades/046.sql sql/upgrades/047.sql sql/upgrades/048.sql sql/upgrades/049.sql sql/upgrades/050.sql sql/upgrades/051.sql sql/upgrades/052.sql sql/upgrades/053.sql sql/upgrades/054.sql sql/upgrades/055.sql sql/upgrades/056.sql sql/upgrades/057.sql sql/upgrades/058.sql sql/upgrades/059.sql sql/upgrades/060.sql sql/upgrades/061.sql sql/upgrades/062.sql sql/upgrades/063.sql sql/upgrades/064.sql sql/upgrades/065.sql sql/upgrades/066.sql sql/upgrades/067.sql sql/upgrades/068.sql sql/upgrades/069.sql sql/upgrades/070.sql sql/upgrades/071.sql sql/upgrades/072.sql sql/upgrades/073.sql sql/upgrades/074.sql sql/upgrades/075.sql sql/upgrades/076.sql sql/upgrades/077.sql sql/upgrades/078.sql sql/upgrades/079.sql sql/upgrades/080.sql sql/upgrades/081.sql sql/upgrades/082.sql sql/upgrades/083.sql sql/upgrades/084.sql sql/upgrades/085.sql sql/upgrades/086.sql sql/upgrades/087.sql sql/upgrades/088.sql sql/upgrades/089.sql sql/upgrades/090.sql sql/upgrades/091.sql sql/upgrades/092.sql sql/upgrades/093.sql sql/upgrades/094.sql sql/upgrades/095.sql sql/upgrades/096.sql sql/upgrades/097.sql sql/upgrades/098.sql sql/upgrades/099.sql sql/upgrades/100.sql sql/upgrades/101.sql sql/upgrades/102.sql sql/upgrades/103.sql sql/upgrades/104.sql sql/upgrades/105.sql sql/upgrades/106.sql sql/upgrades/107.sql sql/upgrades/108.sql sql/upgrades/109.sql sql/upgrades/110.sql sql/upgrades/111.sql sql/upgrades/112.sql sql/upgrades/113.sql sql/upgrades/114.sql sql/upgrades/115.sql sql/upgrades/116.sql sql/upgrades/117.sql sql/upgrades/118.sql sql/upgrades/119.sql sql/upgrades/120.sql sql/upgrades/121.sql sql/upgrades/122.sql sql/upgrades/123.sql sql/upgrades/124.sql sql/upgrades/125.sql sql/upgrades/126.sql sql/upgrades/127.sql sql/upgrades/128.sql sql/upgrades/129.sql sql/upgrades/130.sql sql/upgrades/131.sql sql/upgrades/132.sql sql/upgrades/133.sql sql/upgrades/134.sql sql/upgrades/135.sql sql/upgrades/136.sql sql/upgrades/137.sql sql/upgrades/138.sql sql/upgrades/139.sql sql/upgrades/140.sql sql/upgrades/141.sql sql/upgrades/142.sql sql/upgrades/143.sql sql/upgrades/144.sql sql/upgrades/145.sql sql/upgrades/146.sql sql/upgrades/147.sql sql/upgrades/148.sql sql/upgrades/149.sql sql/upgrades/150.sql sql/upgrades/151.sql sql/upgrades/152.sql sql/upgrades/153.sql sql/upgrades/154.sql sql/upgrades/155.sql sql/upgrades/156.sql sql/upgrades/157.sql sql/upgrades/158.sql sql/upgrades/159.sql sql/upgrades/160.sql sql/upgrades/161.sql sql/upgrades/162.sql sql/upgrades/163.sql +sql/upgrades/164.sql swh/__init__.py swh.storage.egg-info/PKG-INFO swh.storage.egg-info/SOURCES.txt swh.storage.egg-info/dependency_links.txt swh.storage.egg-info/entry_points.txt swh.storage.egg-info/requires.txt swh.storage.egg-info/top_level.txt swh/storage/__init__.py swh/storage/backfill.py swh/storage/buffer.py swh/storage/cli.py swh/storage/common.py swh/storage/exc.py swh/storage/filter.py swh/storage/fixer.py swh/storage/in_memory.py swh/storage/interface.py swh/storage/metrics.py swh/storage/migrate_extrinsic_metadata.py swh/storage/objstorage.py swh/storage/py.typed swh/storage/pytest_plugin.py swh/storage/replay.py swh/storage/retry.py swh/storage/utils.py swh/storage/validate.py swh/storage/writer.py swh/storage/algos/__init__.py swh/storage/algos/diff.py swh/storage/algos/dir_iterators.py swh/storage/algos/origin.py swh/storage/algos/revisions_walker.py swh/storage/algos/snapshot.py swh/storage/api/__init__.py swh/storage/api/client.py swh/storage/api/serializers.py swh/storage/api/server.py swh/storage/cassandra/__init__.py swh/storage/cassandra/common.py swh/storage/cassandra/converters.py swh/storage/cassandra/cql.py swh/storage/cassandra/model.py swh/storage/cassandra/schema.py swh/storage/cassandra/storage.py swh/storage/postgresql/__init__.py swh/storage/postgresql/converters.py swh/storage/postgresql/db.py swh/storage/postgresql/storage.py swh/storage/sql/10-superuser-init.sql swh/storage/sql/15-flavor.sql swh/storage/sql/20-enums.sql swh/storage/sql/30-schema.sql swh/storage/sql/40-funcs.sql swh/storage/sql/60-indexes.sql swh/storage/sql/logical_replication/replication_source.sql swh/storage/tests/__init__.py swh/storage/tests/conftest.py swh/storage/tests/storage_data.py swh/storage/tests/storage_tests.py swh/storage/tests/test_api_client.py swh/storage/tests/test_backfill.py swh/storage/tests/test_buffer.py swh/storage/tests/test_cassandra.py swh/storage/tests/test_cassandra_converters.py swh/storage/tests/test_cli.py swh/storage/tests/test_exception.py swh/storage/tests/test_filter.py swh/storage/tests/test_in_memory.py swh/storage/tests/test_init.py swh/storage/tests/test_kafka_writer.py swh/storage/tests/test_metrics.py swh/storage/tests/test_postgresql.py swh/storage/tests/test_postgresql_converters.py swh/storage/tests/test_pytest_plugin.py swh/storage/tests/test_replay.py swh/storage/tests/test_retry.py swh/storage/tests/test_revision_bw_compat.py swh/storage/tests/test_serializers.py swh/storage/tests/test_server.py swh/storage/tests/test_storage_data.py swh/storage/tests/test_utils.py swh/storage/tests/test_validate.py swh/storage/tests/algos/__init__.py swh/storage/tests/algos/test_diff.py swh/storage/tests/algos/test_dir_iterator.py swh/storage/tests/algos/test_origin.py swh/storage/tests/algos/test_revisions_walker.py swh/storage/tests/algos/test_snapshot.py swh/storage/tests/data/storage.yml swh/storage/tests/migrate_extrinsic_metadata/test_cran.py swh/storage/tests/migrate_extrinsic_metadata/test_debian.py swh/storage/tests/migrate_extrinsic_metadata/test_deposit.py swh/storage/tests/migrate_extrinsic_metadata/test_gnu.py swh/storage/tests/migrate_extrinsic_metadata/test_nixguix.py swh/storage/tests/migrate_extrinsic_metadata/test_npm.py swh/storage/tests/migrate_extrinsic_metadata/test_pypi.py \ No newline at end of file diff --git a/swh.storage.egg-info/requires.txt b/swh.storage.egg-info/requires.txt index c07a030c..9413cb3f 100644 --- a/swh.storage.egg-info/requires.txt +++ b/swh.storage.egg-info/requires.txt @@ -1,30 +1,30 @@ click flask psycopg2 aiohttp tenacity cassandra-driver!=3.21.0,>=3.19.0 deprecated typing-extensions mypy_extensions iso8601 -swh.core[db,http]>=0.3 -swh.model>=0.6.6 +swh.core[db,http]>=0.5 +swh.model>=0.7.2 swh.objstorage>=0.2.2 [journal] swh.journal>=0.4 [schemata] SQLAlchemy [testing] hypothesis>=3.11.0 pytest pytest-mock pytest-postgresql>=2.1.0 sqlalchemy-stubs swh.model[testing]>=0.0.50 pytz pytest-xdist swh.journal>=0.4 diff --git a/swh/storage/algos/snapshot.py b/swh/storage/algos/snapshot.py index 21d5607b..c549c7a7 100644 --- a/swh/storage/algos/snapshot.py +++ b/swh/storage/algos/snapshot.py @@ -1,166 +1,220 @@ # Copyright (C) 2018-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information -from typing import Iterator, List, Optional, Tuple +from typing import Iterator, List, Optional, Tuple, cast from swh.model.hashutil import hash_to_hex from swh.model.model import ( OriginVisit, OriginVisitStatus, Sha1Git, Snapshot, + SnapshotBranch, TargetType, ) from swh.storage.algos.origin import ( iter_origin_visit_statuses, iter_origin_visits, origin_get_latest_visit_status, ) from swh.storage.interface import ListOrder, StorageInterface def snapshot_get_all_branches( storage: StorageInterface, snapshot_id: Sha1Git ) -> Optional[Snapshot]: """Get all the branches for a given snapshot Args: storage (swh.storage.interface.StorageInterface): the storage instance snapshot_id (bytes): the snapshot's identifier Returns: dict: a dict with two keys: * **id**: identifier of the snapshot * **branches**: a dict of branches contained in the snapshot whose keys are the branches' names. """ ret = storage.snapshot_get_branches(snapshot_id) if not ret: return None next_branch = ret["next_branch"] while next_branch: data = storage.snapshot_get_branches(snapshot_id, branches_from=next_branch) assert data, f"Snapshot {hash_to_hex(snapshot_id)} ceased to exist" ret["branches"].update(data["branches"]) next_branch = data["next_branch"] return Snapshot(id=ret["id"], branches=ret["branches"]) def snapshot_get_latest( storage: StorageInterface, origin: str, allowed_statuses: Optional[List[str]] = None, branches_count: Optional[int] = None, ) -> Optional[Snapshot]: """Get the latest snapshot for the given origin, optionally only from visits that have one of the given allowed_statuses. The branches of the snapshot are iterated in the lexicographical order of their names. Args: storage: Storage instance origin: the origin's URL allowed_statuses: list of visit statuses considered to find the latest snapshot for the visit. For instance, ``allowed_statuses=['full']`` will only consider visits that have successfully run to completion. branches_count: Optional parameter to retrieve snapshot with all branches (default behavior when None) or not. If set to positive number, the snapshot will be partial with only that number of branches. Raises: ValueError if branches_count is not a positive value Returns: The snapshot object if one is found matching the criteria or None. """ visit_and_status = origin_get_latest_visit_status( storage, origin, allowed_statuses=allowed_statuses, require_snapshot=True, ) if not visit_and_status: return None _, visit_status = visit_and_status snapshot_id = visit_status.snapshot if not snapshot_id: return None if branches_count: # partial snapshot if not isinstance(branches_count, int) or branches_count <= 0: raise ValueError( "Parameter branches_count must be a positive integer. " f"Current value is {branches_count}" ) snapshot = storage.snapshot_get_branches( snapshot_id, branches_count=branches_count ) if snapshot is None: return None return Snapshot(id=snapshot["id"], branches=snapshot["branches"]) else: return snapshot_get_all_branches(storage, snapshot_id) def snapshot_id_get_from_revision( storage: StorageInterface, origin: str, revision_id: bytes ) -> Optional[bytes]: """Retrieve the most recent snapshot id targeting the revision_id for the given origin. *Warning* This is a potentially highly costly operation Returns The snapshot id if found. None otherwise. """ res = visits_and_snapshots_get_from_revision(storage, origin, revision_id) # they are sorted by descending date, so we just need to return the first one, # if any. for (visit, status, snapshot) in res: return snapshot.id return None def visits_and_snapshots_get_from_revision( storage: StorageInterface, origin: str, revision_id: bytes ) -> Iterator[Tuple[OriginVisit, OriginVisitStatus, Snapshot]]: """Retrieve all visits, visit statuses, and matching snapshot of the given origin, such that the snapshot targets the revision_id. *Warning* This is a potentially highly costly operation Yields: Tuples of (visit, status, snapshot) """ revision = storage.revision_get([revision_id]) if not revision: return for visit in iter_origin_visits(storage, origin, order=ListOrder.DESC): assert visit.visit is not None for visit_status in iter_origin_visit_statuses( storage, origin, visit.visit, order=ListOrder.DESC ): snapshot_id = visit_status.snapshot if snapshot_id is None: continue snapshot = snapshot_get_all_branches(storage, snapshot_id) if not snapshot: continue for branch_name, branch in snapshot.branches.items(): if ( branch is not None and branch.target_type == TargetType.REVISION and branch.target == revision_id ): # snapshot found yield (visit, visit_status, snapshot) + + +def snapshot_resolve_alias( + storage: StorageInterface, snapshot_id: Sha1Git, alias_name: bytes +) -> Optional[Tuple[List[SnapshotBranch], Optional[SnapshotBranch]]]: + """ + Resolve snapshot branch alias to its real target. + + Args: + storage: Storage instance + snapshot_id: snapshot identifier + alias_name: name of the branch alias to resolve + + Returns: + A tuple whose first member is the list of followed branches until the alias + got resolved to a branch whose target type is not an alias, and second member + the real targeted branch. + If a dangling branch is encountered during the resolve process, second member of + the tuple will be None. + If the target type of the tuple second member is an alias, it means that + a cycle has been detected during the resolve process. + """ + snapshot = storage.snapshot_get_branches( + snapshot_id, branches_from=alias_name, branches_count=1 + ) + if snapshot is None: + return None + + if alias_name not in snapshot["branches"]: + return ([], None) + + branch_info = snapshot["branches"][alias_name] + branches = [branch_info] + + seen_aliases = {alias_name} + + while ( + branch_info is not None + and branch_info.target_type == TargetType.ALIAS + and branch_info.target not in seen_aliases + ): + alias_target = branch_info.target + snapshot = storage.snapshot_get_branches( + snapshot_id, branches_from=alias_target, branches_count=1 + ) + assert snapshot is not None + if alias_target not in snapshot["branches"]: + break + seen_aliases.add(alias_target) + branch_info = snapshot["branches"][alias_target] + branches.append(branch_info) + + return (cast(List[SnapshotBranch], branches[:-1]), branches[-1]) diff --git a/swh/storage/api/server.py b/swh/storage/api/server.py index d72723fb..afce5528 100644 --- a/swh/storage/api/server.py +++ b/swh/storage/api/server.py @@ -1,129 +1,128 @@ -# Copyright (C) 2015-2019 The Software Heritage developers +# Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import logging import os +from typing import Any, Dict, Optional from swh.core import config from swh.core.api import RPCServerApp from swh.core.api import encode_data_server as encode_data from swh.core.api import error_handler from swh.storage import get_storage as get_swhstorage from ..exc import StorageArgumentException from ..interface import StorageInterface from ..metrics import timed from .serializers import DECODERS, ENCODERS def get_storage(): global storage if not storage: storage = get_swhstorage(**app.config["storage"]) return storage class StorageServerApp(RPCServerApp): extra_type_decoders = DECODERS extra_type_encoders = ENCODERS app = StorageServerApp( __name__, backend_class=StorageInterface, backend_factory=get_storage ) storage = None @app.errorhandler(StorageArgumentException) def argument_error_handler(exception): return error_handler(exception, encode_data, status_code=400) @app.errorhandler(Exception) def my_error_handler(exception): return error_handler(exception, encode_data) @app.route("/") @timed def index(): return """ Software Heritage storage server

You have reached the Software Heritage storage server.
See its documentation and API for more information

""" @app.route("/stat/counters", methods=["GET"]) @timed def stat_counters(): return encode_data(get_storage().stat_counters()) @app.route("/stat/refresh", methods=["GET"]) @timed def refresh_stat_counters(): return encode_data(get_storage().refresh_stat_counters()) api_cfg = None -def load_and_check_config(config_file, type="local"): +def load_and_check_config(config_path: Optional[str]) -> Dict[str, Any]: """Check the minimal configuration is set to run the api or raise an error explanation. Args: - config_file (str): Path to the configuration file to load - type (str): configuration type. For 'local' type, more - checks are done. + config_path: Path to the configuration file to load Raises: Error if the setup is not as expected Returns: configuration as a dict """ - if not config_file: + if not config_path: raise EnvironmentError("Configuration file must be defined") - if not os.path.exists(config_file): - raise FileNotFoundError("Configuration file %s does not exist" % (config_file,)) + if not os.path.exists(config_path): + raise FileNotFoundError(f"Configuration file {config_path} does not exist") - cfg = config.read(config_file) + cfg = config.read(config_path) if "storage" not in cfg: - raise KeyError("Missing '%storage' configuration") + raise KeyError("Missing 'storage' configuration") return cfg -def make_app_from_configfile(): +def make_app_from_configfile() -> StorageServerApp: """Run the WSGI app from the webserver, loading the configuration from a configuration file. SWH_CONFIG_FILENAME environment variable defines the configuration path to load. """ global api_cfg if not api_cfg: - config_file = os.environ.get("SWH_CONFIG_FILENAME") - api_cfg = load_and_check_config(config_file) + config_path = os.environ.get("SWH_CONFIG_FILENAME") + api_cfg = load_and_check_config(config_path) app.config.update(api_cfg) handler = logging.StreamHandler() app.logger.addHandler(handler) return app if __name__ == "__main__": print("Deprecated. Use swh-storage") diff --git a/swh/storage/backfill.py b/swh/storage/backfill.py index 3e64c6d5..4e910b20 100644 --- a/swh/storage/backfill.py +++ b/swh/storage/backfill.py @@ -1,549 +1,549 @@ # Copyright (C) 2017-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information """Storage backfiller. The backfiller goal is to produce back part or all of the objects from a storage to the journal topics Current implementation consists in the JournalBackfiller class. It simply reads the objects from the storage and sends every object identifier back to the journal. """ import logging from typing import Any, Callable, Dict from swh.core.db import BaseDb -from swh.journal.writer.kafka import KafkaJournalWriter +from swh.journal.writer import get_journal_writer from swh.model.model import ( BaseModel, Directory, DirectoryEntry, RawExtrinsicMetadata, Release, Revision, Snapshot, SnapshotBranch, TargetType, ) from swh.storage.postgresql.converters import ( db_to_raw_extrinsic_metadata, db_to_release, db_to_revision, ) from swh.storage.replay import object_converter_fn logger = logging.getLogger(__name__) PARTITION_KEY = { "content": "sha1", "skipped_content": "sha1", "directory": "id", "metadata_authority": "type, url", "metadata_fetcher": "name, version", - "raw_extrinsic_metadata": "id", + "raw_extrinsic_metadata": "target", "revision": "revision.id", "release": "release.id", "snapshot": "id", "origin": "id", "origin_visit": "origin_visit.origin", "origin_visit_status": "origin_visit_status.origin", } COLUMNS = { "content": [ "sha1", "sha1_git", "sha256", "blake2s256", "length", "status", "ctime", ], "skipped_content": [ "sha1", "sha1_git", "sha256", "blake2s256", "length", "ctime", "status", "reason", ], "directory": ["id", "dir_entries", "file_entries", "rev_entries"], "metadata_authority": ["type", "url", "metadata",], "metadata_fetcher": ["name", "version", "metadata",], "raw_extrinsic_metadata": [ "raw_extrinsic_metadata.type", - "raw_extrinsic_metadata.id", + "raw_extrinsic_metadata.target", "metadata_authority.type", "metadata_authority.url", "metadata_fetcher.name", "metadata_fetcher.version", "discovery_date", "format", "raw_extrinsic_metadata.metadata", "origin", "visit", "snapshot", "release", "revision", "path", "directory", ], "revision": [ ("revision.id", "id"), "date", "date_offset", "date_neg_utc_offset", "committer_date", "committer_date_offset", "committer_date_neg_utc_offset", "type", "directory", "message", "synthetic", "metadata", "extra_headers", ( "array(select parent_id::bytea from revision_history rh " "where rh.id = revision.id order by rh.parent_rank asc)", "parents", ), ("a.id", "author_id"), ("a.name", "author_name"), ("a.email", "author_email"), ("a.fullname", "author_fullname"), ("c.id", "committer_id"), ("c.name", "committer_name"), ("c.email", "committer_email"), ("c.fullname", "committer_fullname"), ], "release": [ ("release.id", "id"), "date", "date_offset", "date_neg_utc_offset", "comment", ("release.name", "name"), "synthetic", "target", "target_type", ("a.id", "author_id"), ("a.name", "author_name"), ("a.email", "author_email"), ("a.fullname", "author_fullname"), ], "snapshot": ["id", "object_id"], "origin": ["url"], "origin_visit": ["visit", "type", ("origin.url", "origin"), "date",], "origin_visit_status": [ "visit", ("origin.url", "origin"), "date", "snapshot", "status", "metadata", ], } JOINS = { "release": ["person a on release.author=a.id"], "revision": [ "person a on revision.author=a.id", "person c on revision.committer=c.id", ], "origin_visit": ["origin on origin_visit.origin=origin.id"], "origin_visit_status": ["origin on origin_visit_status.origin=origin.id"], "raw_extrinsic_metadata": [ "metadata_authority on " "raw_extrinsic_metadata.authority_id=metadata_authority.id", "metadata_fetcher on raw_extrinsic_metadata.fetcher_id=metadata_fetcher.id", ], } def directory_converter(db: BaseDb, directory_d: Dict[str, Any]) -> Directory: """Convert directory from the flat representation to swh model compatible objects. """ columns = ["target", "name", "perms"] query_template = """ select %(columns)s from directory_entry_%(type)s where id in %%s """ types = ["file", "dir", "rev"] entries = [] with db.cursor() as cur: for type in types: ids = directory_d.pop("%s_entries" % type) if not ids: continue query = query_template % { "columns": ",".join(columns), "type": type, } cur.execute(query, (tuple(ids),)) for row in cur: entry_d = dict(zip(columns, row)) entry = DirectoryEntry( name=entry_d["name"], type=type, target=entry_d["target"], perms=entry_d["perms"], ) entries.append(entry) return Directory(id=directory_d["id"], entries=tuple(entries),) def raw_extrinsic_metadata_converter( db: BaseDb, metadata: Dict[str, Any] ) -> RawExtrinsicMetadata: """Convert revision from the flat representation to swh model compatible objects. """ return db_to_raw_extrinsic_metadata(metadata) def revision_converter(db: BaseDb, revision_d: Dict[str, Any]) -> Revision: """Convert revision from the flat representation to swh model compatible objects. """ revision = db_to_revision(revision_d) assert revision is not None, revision_d["id"] return revision def release_converter(db: BaseDb, release_d: Dict[str, Any]) -> Release: """Convert release from the flat representation to swh model compatible objects. """ release = db_to_release(release_d) assert release is not None, release_d["id"] return release def snapshot_converter(db: BaseDb, snapshot_d: Dict[str, Any]) -> Snapshot: """Convert snapshot from the flat representation to swh model compatible objects. """ columns = ["name", "target", "target_type"] query = """ select %s from snapshot_branches sbs inner join snapshot_branch sb on sb.object_id=sbs.branch_id where sbs.snapshot_id=%%s """ % ", ".join( columns ) with db.cursor() as cur: cur.execute(query, (snapshot_d["object_id"],)) branches = {} for name, *row in cur: branch_d = dict(zip(columns[1:], row)) if branch_d["target"] or branch_d["target_type"]: branch = None else: branch = SnapshotBranch( target=branch_d["target"], target_type=TargetType(branch_d["target_type"]), ) branches[name] = branch return Snapshot(id=snapshot_d["id"], branches=branches,) CONVERTERS: Dict[str, Callable[[BaseDb, Dict[str, Any]], BaseModel]] = { "directory": directory_converter, "raw_extrinsic_metadata": raw_extrinsic_metadata_converter, "revision": revision_converter, "release": release_converter, "snapshot": snapshot_converter, } def object_to_offset(object_id, numbits): """Compute the index of the range containing object id, when dividing space into 2^numbits. Args: object_id (str): The hex representation of object_id numbits (int): Number of bits in which we divide input space Returns: The index of the range containing object id """ q, r = divmod(numbits, 8) length = q + (r != 0) shift_bits = 8 - r if r else 0 truncated_id = object_id[: length * 2] if len(truncated_id) < length * 2: truncated_id += "0" * (length * 2 - len(truncated_id)) truncated_id_bytes = bytes.fromhex(truncated_id) return int.from_bytes(truncated_id_bytes, byteorder="big") >> shift_bits def byte_ranges(numbits, start_object=None, end_object=None): """Generate start/end pairs of bytes spanning numbits bits and constrained by optional start_object and end_object. Args: numbits (int): Number of bits in which we divide input space start_object (str): Hex object id contained in the first range returned end_object (str): Hex object id contained in the last range returned Yields: 2^numbits pairs of bytes """ q, r = divmod(numbits, 8) length = q + (r != 0) shift_bits = 8 - r if r else 0 def to_bytes(i): return int.to_bytes(i << shift_bits, length=length, byteorder="big") start_offset = 0 end_offset = 1 << numbits if start_object is not None: start_offset = object_to_offset(start_object, numbits) if end_object is not None: end_offset = object_to_offset(end_object, numbits) + 1 for start in range(start_offset, end_offset): end = start + 1 if start == 0: yield None, to_bytes(end) elif end == 1 << numbits: yield to_bytes(start), None else: yield to_bytes(start), to_bytes(end) def integer_ranges(start, end, block_size=1000): for start in range(start, end, block_size): if start == 0: yield None, block_size elif start + block_size > end: yield start, end else: yield start, start + block_size RANGE_GENERATORS = { "content": lambda start, end: byte_ranges(24, start, end), "skipped_content": lambda start, end: [(None, None)], "directory": lambda start, end: byte_ranges(24, start, end), "revision": lambda start, end: byte_ranges(24, start, end), "release": lambda start, end: byte_ranges(16, start, end), "snapshot": lambda start, end: byte_ranges(16, start, end), "origin": integer_ranges, "origin_visit": integer_ranges, "origin_visit_status": integer_ranges, } def compute_query(obj_type, start, end): columns = COLUMNS.get(obj_type) join_specs = JOINS.get(obj_type, []) join_clause = "\n".join("left join %s" % clause for clause in join_specs) where = [] where_args = [] if start: where.append("%(keys)s >= %%s") where_args.append(start) if end: where.append("%(keys)s < %%s") where_args.append(end) where_clause = "" if where: where_clause = ("where " + " and ".join(where)) % { "keys": "(%s)" % PARTITION_KEY[obj_type] } column_specs = [] column_aliases = [] for column in columns: if isinstance(column, str): column_specs.append(column) column_aliases.append(column) else: column_specs.append("%s as %s" % column) column_aliases.append(column[1]) query = """ select %(columns)s from %(table)s %(join)s %(where)s """ % { "columns": ",".join(column_specs), "table": obj_type, "join": join_clause, "where": where_clause, } return query, where_args, column_aliases def fetch(db, obj_type, start, end): """Fetch all obj_type's identifiers from db. This opens one connection, stream objects and when done, close the connection. Args: db (BaseDb): Db connection object obj_type (str): Object type start (Union[bytes|Tuple]): Range start identifier end (Union[bytes|Tuple]): Range end identifier Raises: ValueError if obj_type is not supported Yields: Objects in the given range """ query, where_args, column_aliases = compute_query(obj_type, start, end) converter = CONVERTERS.get(obj_type) with db.cursor() as cursor: logger.debug("Fetching data for table %s", obj_type) logger.debug("query: %s %s", query, where_args) cursor.execute(query, where_args) for row in cursor: record = dict(zip(column_aliases, row)) if converter: record = converter(db, record) else: record = object_converter_fn[obj_type](record) logger.debug("record: %s" % record) yield record def _format_range_bound(bound): if isinstance(bound, bytes): return bound.hex() else: return str(bound) MANDATORY_KEYS = ["storage", "journal_writer"] class JournalBackfiller: """Class in charge of reading the storage's objects and sends those back to the journal's topics. This is designed to be run periodically. """ def __init__(self, config=None): self.config = config self.check_config(config) def check_config(self, config): missing_keys = [] for key in MANDATORY_KEYS: if not config.get(key): missing_keys.append(key) if missing_keys: raise ValueError( "Configuration error: The following keys must be" " provided: %s" % (",".join(missing_keys),) ) if "cls" not in config["storage"] or config["storage"]["cls"] != "local": raise ValueError( "swh storage backfiller must be configured to use a local" " (PostgreSQL) storage" ) def parse_arguments(self, object_type, start_object, end_object): """Parse arguments Raises: ValueError for unsupported object type ValueError if object ids are not parseable Returns: Parsed start and end object ids """ if object_type not in COLUMNS: raise ValueError( "Object type %s is not supported. " "The only possible values are %s" % (object_type, ", ".join(COLUMNS.keys())) ) if object_type in ["origin", "origin_visit"]: if start_object: start_object = int(start_object) else: start_object = 0 if end_object: end_object = int(end_object) else: end_object = 100 * 1000 * 1000 # hard-coded limit return start_object, end_object def run(self, object_type, start_object, end_object, dry_run=False): """Reads storage's subscribed object types and send them to the journal's reading topic. """ start_object, end_object = self.parse_arguments( object_type, start_object, end_object ) db = BaseDb.connect(self.config["storage"]["db"]) - writer = KafkaJournalWriter(**self.config["journal_writer"]) + writer = get_journal_writer(cls="kafka", **self.config["journal_writer"]) for range_start, range_end in RANGE_GENERATORS[object_type]( start_object, end_object ): logger.info( "Processing %s range %s to %s", object_type, _format_range_bound(range_start), _format_range_bound(range_end), ) for obj in fetch(db, object_type, start=range_start, end=range_end,): if dry_run: continue writer.write_addition(object_type=object_type, object_=obj) writer.producer.flush() if __name__ == "__main__": print('Please use the "swh-journal backfiller run" command') diff --git a/swh/storage/cassandra/cql.py b/swh/storage/cassandra/cql.py index 560de43a..1aa7f43c 100644 --- a/swh/storage/cassandra/cql.py +++ b/swh/storage/cassandra/cql.py @@ -1,973 +1,976 @@ # Copyright (C) 2019-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import dataclasses import datetime import functools import logging import random from typing import ( Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Type, TypeVar, Union, ) from cassandra import CoordinationFailure from cassandra.cluster import EXEC_PROFILE_DEFAULT, Cluster, ExecutionProfile, ResultSet from cassandra.policies import DCAwareRoundRobinPolicy, TokenAwarePolicy from cassandra.query import BoundStatement, PreparedStatement, dict_factory from mypy_extensions import NamedArg from tenacity import ( retry, retry_if_exception_type, stop_after_attempt, wait_random_exponential, ) from swh.model.model import ( Content, Person, Sha1Git, SkippedContent, Timestamp, TimestampWithTimezone, ) from swh.storage.interface import ListOrder from .common import TOKEN_BEGIN, TOKEN_END, hash_url, remove_keys from .model import ( MAGIC_NULL_PK, BaseRow, ContentRow, DirectoryEntryRow, DirectoryRow, MetadataAuthorityRow, MetadataFetcherRow, ObjectCountRow, OriginRow, OriginVisitRow, OriginVisitStatusRow, RawExtrinsicMetadataRow, ReleaseRow, RevisionParentRow, RevisionRow, SkippedContentRow, SnapshotBranchRow, SnapshotRow, ) from .schema import CREATE_TABLES_QUERIES, HASH_ALGORITHMS logger = logging.getLogger(__name__) _execution_profiles = { EXEC_PROFILE_DEFAULT: ExecutionProfile( load_balancing_policy=TokenAwarePolicy(DCAwareRoundRobinPolicy()), row_factory=dict_factory, ), } # Configuration for cassandra-driver's access to servers: # * hit the right server directly when sending a query (TokenAwarePolicy), # * if there's more than one, then pick one at random that's in the same # datacenter as the client (DCAwareRoundRobinPolicy) def create_keyspace( hosts: List[str], keyspace: str, port: int = 9042, *, durable_writes=True ): cluster = Cluster(hosts, port=port, execution_profiles=_execution_profiles) session = cluster.connect() extra_params = "" if not durable_writes: extra_params = "AND durable_writes = false" session.execute( """CREATE KEYSPACE IF NOT EXISTS "%s" WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 } %s; """ % (keyspace, extra_params) ) session.execute('USE "%s"' % keyspace) for query in CREATE_TABLES_QUERIES: session.execute(query) TRet = TypeVar("TRet") def _prepared_statement( query: str, ) -> Callable[[Callable[..., TRet]], Callable[..., TRet]]: """Returns a decorator usable on methods of CqlRunner, to inject them with a 'statement' argument, that is a prepared statement corresponding to the query. This only works on methods of CqlRunner, as preparing a statement requires a connection to a Cassandra server.""" def decorator(f): @functools.wraps(f) def newf(self, *args, **kwargs) -> TRet: if f.__name__ not in self._prepared_statements: statement: PreparedStatement = self._session.prepare(query) self._prepared_statements[f.__name__] = statement return f( self, *args, **kwargs, statement=self._prepared_statements[f.__name__] ) return newf return decorator TArg = TypeVar("TArg") TSelf = TypeVar("TSelf") def _prepared_insert_statement( row_class: Type[BaseRow], ) -> Callable[ [Callable[[TSelf, TArg, NamedArg(Any, "statement")], TRet]], # noqa Callable[[TSelf, TArg], TRet], ]: """Shorthand for using `_prepared_statement` for `INSERT INTO` statements.""" columns = row_class.cols() return _prepared_statement( "INSERT INTO %s (%s) VALUES (%s)" % (row_class.TABLE, ", ".join(columns), ", ".join("?" for _ in columns),) ) def _prepared_exists_statement( table_name: str, ) -> Callable[ [Callable[[TSelf, TArg, NamedArg(Any, "statement")], TRet]], # noqa Callable[[TSelf, TArg], TRet], ]: """Shorthand for using `_prepared_statement` for queries that only check which ids in a list exist in the table.""" return _prepared_statement(f"SELECT id FROM {table_name} WHERE id IN ?") def _prepared_select_statement( row_class: Type[BaseRow], clauses: str = "", cols: Optional[List[str]] = None, ) -> Callable[[Callable[..., TRet]], Callable[..., TRet]]: if cols is None: cols = row_class.cols() return _prepared_statement( f"SELECT {', '.join(cols)} FROM {row_class.TABLE} {clauses}" ) def _prepared_select_statements( row_class: Type[BaseRow], queries: Dict[Any, str], ) -> Callable[[Callable[..., TRet]], Callable[..., TRet]]: """Like _prepared_statement, but supports multiple statements, passed a dict, and passes a dict of prepared statements to the decorated method""" cols = row_class.cols() statement_start = f"SELECT {', '.join(cols)} FROM {row_class.TABLE} " def decorator(f): @functools.wraps(f) def newf(self, *args, **kwargs) -> TRet: if f.__name__ not in self._prepared_statements: self._prepared_statements[f.__name__] = { key: self._session.prepare(statement_start + query) for (key, query) in queries.items() } return f( self, *args, **kwargs, statements=self._prepared_statements[f.__name__] ) return newf return decorator class CqlRunner: """Class managing prepared statements and building queries to be sent to Cassandra.""" def __init__(self, hosts: List[str], keyspace: str, port: int): self._cluster = Cluster( hosts, port=port, execution_profiles=_execution_profiles ) self._session = self._cluster.connect(keyspace) self._cluster.register_user_type( keyspace, "microtimestamp_with_timezone", TimestampWithTimezone ) self._cluster.register_user_type(keyspace, "microtimestamp", Timestamp) self._cluster.register_user_type(keyspace, "person", Person) # directly a PreparedStatement for methods decorated with # @_prepared_statements (and its wrappers, _prepared_insert_statement, # _prepared_exists_statement, and _prepared_select_statement); # and a dict of PreparedStatements with @_prepared_select_statements self._prepared_statements: Dict[ str, Union[PreparedStatement, Dict[Any, PreparedStatement]] ] = {} ########################## # Common utility functions ########################## MAX_RETRIES = 3 @retry( wait=wait_random_exponential(multiplier=1, max=10), stop=stop_after_attempt(MAX_RETRIES), retry=retry_if_exception_type(CoordinationFailure), ) def _execute_with_retries(self, statement, args) -> ResultSet: return self._session.execute(statement, args, timeout=1000.0) @_prepared_statement( "UPDATE object_count SET count = count + ? " "WHERE partition_key = 0 AND object_type = ?" ) def _increment_counter( self, object_type: str, nb: int, *, statement: PreparedStatement ) -> None: self._execute_with_retries(statement, [nb, object_type]) def _add_one(self, statement, obj: BaseRow) -> None: self._increment_counter(obj.TABLE, 1) self._execute_with_retries(statement, dataclasses.astuple(obj)) _T = TypeVar("_T", bound=BaseRow) def _get_random_row(self, row_class: Type[_T], statement) -> Optional[_T]: # noqa """Takes a prepared statement of the form "SELECT * FROM WHERE token() > ? LIMIT 1" and uses it to return a random row""" token = random.randint(TOKEN_BEGIN, TOKEN_END) rows = self._execute_with_retries(statement, [token]) if not rows: # There are no row with a greater token; wrap around to get # the row with the smallest token rows = self._execute_with_retries(statement, [TOKEN_BEGIN]) if rows: return row_class.from_dict(rows.one()) # type: ignore else: return None def _missing(self, statement, ids): rows = self._execute_with_retries(statement, [ids]) found_ids = {row["id"] for row in rows} return [id_ for id_ in ids if id_ not in found_ids] ########################## # 'content' table ########################## def _content_add_finalize(self, statement: BoundStatement) -> None: """Returned currified by content_add_prepare, to be called when the content row should be added to the primary table.""" self._execute_with_retries(statement, None) self._increment_counter("content", 1) @_prepared_insert_statement(ContentRow) def content_add_prepare( self, content: ContentRow, *, statement ) -> Tuple[int, Callable[[], None]]: """Prepares insertion of a Content to the main 'content' table. Returns a token (to be used in secondary tables), and a function to be called to perform the insertion in the main table.""" statement = statement.bind(dataclasses.astuple(content)) # Type used for hashing keys (usually, it will be # cassandra.metadata.Murmur3Token) token_class = self._cluster.metadata.token_map.token_class # Token of the row when it will be inserted. This is equivalent to # "SELECT token({', '.join(ContentRow.PARTITION_KEY)}) FROM content WHERE ..." # after the row is inserted; but we need the token to insert in the # index tables *before* inserting to the main 'content' table token = token_class.from_key(statement.routing_key).value assert TOKEN_BEGIN <= token <= TOKEN_END # Function to be called after the indexes contain their respective # row finalizer = functools.partial(self._content_add_finalize, statement) return (token, finalizer) @_prepared_select_statement( ContentRow, f"WHERE {' AND '.join(map('%s = ?'.__mod__, HASH_ALGORITHMS))}" ) def content_get_from_pk( self, content_hashes: Dict[str, bytes], *, statement ) -> Optional[ContentRow]: rows = list( self._execute_with_retries( statement, [content_hashes[algo] for algo in HASH_ALGORITHMS] ) ) assert len(rows) <= 1 if rows: return ContentRow(**rows[0]) else: return None @_prepared_select_statement( ContentRow, f"WHERE token({', '.join(ContentRow.PARTITION_KEY)}) = ?" ) def content_get_from_token(self, token, *, statement) -> Iterable[ContentRow]: return map(ContentRow.from_dict, self._execute_with_retries(statement, [token])) @_prepared_select_statement( ContentRow, f"WHERE token({', '.join(ContentRow.PARTITION_KEY)}) > ? LIMIT 1" ) def content_get_random(self, *, statement) -> Optional[ContentRow]: return self._get_random_row(ContentRow, statement) @_prepared_statement( ( "SELECT token({0}) AS tok, {1} FROM content " "WHERE token({0}) >= ? AND token({0}) <= ? LIMIT ?" ).format(", ".join(ContentRow.PARTITION_KEY), ", ".join(ContentRow.cols())) ) def content_get_token_range( self, start: int, end: int, limit: int, *, statement ) -> Iterable[Tuple[int, ContentRow]]: """Returns an iterable of (token, row)""" return ( (row["tok"], ContentRow.from_dict(remove_keys(row, ("tok",)))) for row in self._execute_with_retries(statement, [start, end, limit]) ) ########################## # 'content_by_*' tables ########################## @_prepared_statement( "SELECT sha1_git AS id FROM content_by_sha1_git WHERE sha1_git IN ?" ) def content_missing_by_sha1_git( self, ids: List[bytes], *, statement ) -> List[bytes]: return self._missing(statement, ids) def content_index_add_one(self, algo: str, content: Content, token: int) -> None: """Adds a row mapping content[algo] to the token of the Content in the main 'content' table.""" query = ( f"INSERT INTO content_by_{algo} ({algo}, target_token) " f"VALUES (%s, %s)" ) self._execute_with_retries(query, [content.get_hash(algo), token]) def content_get_tokens_from_single_hash( self, algo: str, hash_: bytes ) -> Iterable[int]: assert algo in HASH_ALGORITHMS query = f"SELECT target_token FROM content_by_{algo} WHERE {algo} = %s" return ( row["target_token"] for row in self._execute_with_retries(query, [hash_]) ) ########################## # 'skipped_content' table ########################## def _skipped_content_add_finalize(self, statement: BoundStatement) -> None: """Returned currified by skipped_content_add_prepare, to be called when the content row should be added to the primary table.""" self._execute_with_retries(statement, None) self._increment_counter("skipped_content", 1) @_prepared_insert_statement(SkippedContentRow) def skipped_content_add_prepare( self, content, *, statement ) -> Tuple[int, Callable[[], None]]: """Prepares insertion of a Content to the main 'skipped_content' table. Returns a token (to be used in secondary tables), and a function to be called to perform the insertion in the main table.""" # Replace NULLs (which are not allowed in the partition key) with # an empty byte string for key in SkippedContentRow.PARTITION_KEY: if getattr(content, key) is None: setattr(content, key, MAGIC_NULL_PK) statement = statement.bind(dataclasses.astuple(content)) # Type used for hashing keys (usually, it will be # cassandra.metadata.Murmur3Token) token_class = self._cluster.metadata.token_map.token_class # Token of the row when it will be inserted. This is equivalent to # "SELECT token({', '.join(SkippedContentRow.PARTITION_KEY)}) # FROM skipped_content WHERE ..." # after the row is inserted; but we need the token to insert in the # index tables *before* inserting to the main 'skipped_content' table token = token_class.from_key(statement.routing_key).value assert TOKEN_BEGIN <= token <= TOKEN_END # Function to be called after the indexes contain their respective # row finalizer = functools.partial(self._skipped_content_add_finalize, statement) return (token, finalizer) @_prepared_select_statement( SkippedContentRow, f"WHERE {' AND '.join(map('%s = ?'.__mod__, HASH_ALGORITHMS))}", ) def skipped_content_get_from_pk( self, content_hashes: Dict[str, bytes], *, statement ) -> Optional[SkippedContentRow]: rows = list( self._execute_with_retries( statement, [content_hashes[algo] or MAGIC_NULL_PK for algo in HASH_ALGORITHMS], ) ) assert len(rows) <= 1 if rows: return SkippedContentRow.from_dict(rows[0]) else: return None @_prepared_select_statement( SkippedContentRow, f"WHERE token({', '.join(SkippedContentRow.PARTITION_KEY)}) = ?", ) def skipped_content_get_from_token( self, token, *, statement ) -> Iterable[SkippedContentRow]: return map( SkippedContentRow.from_dict, self._execute_with_retries(statement, [token]) ) ########################## # 'skipped_content_by_*' tables ########################## def skipped_content_index_add_one( self, algo: str, content: SkippedContent, token: int ) -> None: """Adds a row mapping content[algo] to the token of the SkippedContent in the main 'skipped_content' table.""" query = ( f"INSERT INTO skipped_content_by_{algo} ({algo}, target_token) " f"VALUES (%s, %s)" ) self._execute_with_retries( query, [content.get_hash(algo) or MAGIC_NULL_PK, token] ) def skipped_content_get_tokens_from_single_hash( self, algo: str, hash_: bytes ) -> Iterable[int]: assert algo in HASH_ALGORITHMS query = f"SELECT target_token FROM skipped_content_by_{algo} WHERE {algo} = %s" return ( row["target_token"] for row in self._execute_with_retries(query, [hash_]) ) ########################## # 'revision' table ########################## @_prepared_exists_statement("revision") def revision_missing(self, ids: List[bytes], *, statement) -> List[bytes]: return self._missing(statement, ids) @_prepared_insert_statement(RevisionRow) def revision_add_one(self, revision: RevisionRow, *, statement) -> None: self._add_one(statement, revision) @_prepared_statement("SELECT id FROM revision WHERE id IN ?") def revision_get_ids(self, revision_ids, *, statement) -> Iterable[int]: return ( row["id"] for row in self._execute_with_retries(statement, [revision_ids]) ) @_prepared_select_statement(RevisionRow, "WHERE id IN ?") def revision_get( self, revision_ids: List[Sha1Git], *, statement ) -> Iterable[RevisionRow]: return map( RevisionRow.from_dict, self._execute_with_retries(statement, [revision_ids]) ) @_prepared_select_statement(RevisionRow, "WHERE token(id) > ? LIMIT 1") def revision_get_random(self, *, statement) -> Optional[RevisionRow]: return self._get_random_row(RevisionRow, statement) ########################## # 'revision_parent' table ########################## @_prepared_insert_statement(RevisionParentRow) def revision_parent_add_one( self, revision_parent: RevisionParentRow, *, statement ) -> None: self._add_one(statement, revision_parent) @_prepared_statement("SELECT parent_id FROM revision_parent WHERE id = ?") def revision_parent_get( self, revision_id: Sha1Git, *, statement ) -> Iterable[bytes]: return ( row["parent_id"] for row in self._execute_with_retries(statement, [revision_id]) ) ########################## # 'release' table ########################## @_prepared_exists_statement("release") def release_missing(self, ids: List[bytes], *, statement) -> List[bytes]: return self._missing(statement, ids) @_prepared_insert_statement(ReleaseRow) def release_add_one(self, release: ReleaseRow, *, statement) -> None: self._add_one(statement, release) @_prepared_select_statement(ReleaseRow, "WHERE id in ?") def release_get(self, release_ids: List[str], *, statement) -> Iterable[ReleaseRow]: return map( ReleaseRow.from_dict, self._execute_with_retries(statement, [release_ids]) ) @_prepared_select_statement(ReleaseRow, "WHERE token(id) > ? LIMIT 1") def release_get_random(self, *, statement) -> Optional[ReleaseRow]: return self._get_random_row(ReleaseRow, statement) ########################## # 'directory' table ########################## @_prepared_exists_statement("directory") def directory_missing(self, ids: List[bytes], *, statement) -> List[bytes]: return self._missing(statement, ids) @_prepared_insert_statement(DirectoryRow) def directory_add_one(self, directory: DirectoryRow, *, statement) -> None: """Called after all calls to directory_entry_add_one, to commit/finalize the directory.""" self._add_one(statement, directory) @_prepared_select_statement(DirectoryRow, "WHERE token(id) > ? LIMIT 1") def directory_get_random(self, *, statement) -> Optional[DirectoryRow]: return self._get_random_row(DirectoryRow, statement) ########################## # 'directory_entry' table ########################## @_prepared_insert_statement(DirectoryEntryRow) def directory_entry_add_one(self, entry: DirectoryEntryRow, *, statement) -> None: self._add_one(statement, entry) @_prepared_select_statement(DirectoryEntryRow, "WHERE directory_id IN ?") def directory_entry_get( self, directory_ids, *, statement ) -> Iterable[DirectoryEntryRow]: return map( DirectoryEntryRow.from_dict, self._execute_with_retries(statement, [directory_ids]), ) ########################## # 'snapshot' table ########################## @_prepared_exists_statement("snapshot") def snapshot_missing(self, ids: List[bytes], *, statement) -> List[bytes]: return self._missing(statement, ids) @_prepared_insert_statement(SnapshotRow) def snapshot_add_one(self, snapshot: SnapshotRow, *, statement) -> None: self._add_one(statement, snapshot) @_prepared_select_statement(SnapshotRow, "WHERE token(id) > ? LIMIT 1") def snapshot_get_random(self, *, statement) -> Optional[SnapshotRow]: return self._get_random_row(SnapshotRow, statement) ########################## # 'snapshot_branch' table ########################## @_prepared_insert_statement(SnapshotBranchRow) def snapshot_branch_add_one(self, branch: SnapshotBranchRow, *, statement) -> None: self._add_one(statement, branch) @_prepared_statement( "SELECT ascii_bins_count(target_type) AS counts " "FROM snapshot_branch " "WHERE snapshot_id = ? " ) def snapshot_count_branches( self, snapshot_id: Sha1Git, *, statement ) -> Dict[Optional[str], int]: """Returns a dictionary from type names to the number of branches of that type.""" row = self._execute_with_retries(statement, [snapshot_id]).one() (nb_none, counts) = row["counts"] return {None: nb_none, **counts} @_prepared_select_statement( SnapshotBranchRow, "WHERE snapshot_id = ? AND name >= ? LIMIT ?" ) def snapshot_branch_get( self, snapshot_id: Sha1Git, from_: bytes, limit: int, *, statement ) -> Iterable[SnapshotBranchRow]: return map( SnapshotBranchRow.from_dict, self._execute_with_retries(statement, [snapshot_id, from_, limit]), ) ########################## # 'origin' table ########################## @_prepared_insert_statement(OriginRow) def origin_add_one(self, origin: OriginRow, *, statement) -> None: self._add_one(statement, origin) @_prepared_select_statement(OriginRow, "WHERE sha1 = ?") def origin_get_by_sha1(self, sha1: bytes, *, statement) -> Iterable[OriginRow]: return map(OriginRow.from_dict, self._execute_with_retries(statement, [sha1])) def origin_get_by_url(self, url: str) -> Iterable[OriginRow]: return self.origin_get_by_sha1(hash_url(url)) @_prepared_statement( f'SELECT token(sha1) AS tok, {", ".join(OriginRow.cols())} ' f"FROM origin WHERE token(sha1) >= ? LIMIT ?" ) def origin_list( self, start_token: int, limit: int, *, statement ) -> Iterable[Tuple[int, OriginRow]]: """Returns an iterable of (token, origin)""" return ( (row["tok"], OriginRow.from_dict(remove_keys(row, ("tok",)))) for row in self._execute_with_retries(statement, [start_token, limit]) ) @_prepared_select_statement(OriginRow) def origin_iter_all(self, *, statement) -> Iterable[OriginRow]: return map(OriginRow.from_dict, self._execute_with_retries(statement, [])) @_prepared_statement("SELECT next_visit_id FROM origin WHERE sha1 = ?") def _origin_get_next_visit_id(self, origin_sha1: bytes, *, statement) -> int: rows = list(self._execute_with_retries(statement, [origin_sha1])) assert len(rows) == 1 # TODO: error handling return rows[0]["next_visit_id"] @_prepared_statement( "UPDATE origin SET next_visit_id=? WHERE sha1 = ? IF next_visit_id=?" ) def origin_generate_unique_visit_id(self, origin_url: str, *, statement) -> int: origin_sha1 = hash_url(origin_url) next_id = self._origin_get_next_visit_id(origin_sha1) while True: res = list( self._execute_with_retries( statement, [next_id + 1, origin_sha1, next_id] ) ) assert len(res) == 1 if res[0]["[applied]"]: # No data race return next_id else: # Someone else updated it before we did, let's try again next_id = res[0]["next_visit_id"] # TODO: abort after too many attempts return next_id ########################## # 'origin_visit' table ########################## @_prepared_select_statements( OriginVisitRow, { (True, ListOrder.ASC): ( "WHERE origin = ? AND visit > ? ORDER BY visit ASC LIMIT ?" ), (True, ListOrder.DESC): ( "WHERE origin = ? AND visit < ? ORDER BY visit DESC LIMIT ?" ), (False, ListOrder.ASC): "WHERE origin = ? ORDER BY visit ASC LIMIT ?", (False, ListOrder.DESC): "WHERE origin = ? ORDER BY visit DESC LIMIT ?", }, ) def origin_visit_get( self, origin_url: str, last_visit: Optional[int], limit: int, order: ListOrder, *, statements, ) -> Iterable[OriginVisitRow]: args: List[Any] = [origin_url] if last_visit is not None: args.append(last_visit) args.append(limit) statement = statements[(last_visit is not None, order)] return map( OriginVisitRow.from_dict, self._execute_with_retries(statement, args) ) @_prepared_insert_statement(OriginVisitRow) def origin_visit_add_one(self, visit: OriginVisitRow, *, statement) -> None: self._add_one(statement, visit) @_prepared_select_statement(OriginVisitRow, "WHERE origin = ? AND visit = ?") def origin_visit_get_one( self, origin_url: str, visit_id: int, *, statement ) -> Optional[OriginVisitRow]: # TODO: error handling rows = list(self._execute_with_retries(statement, [origin_url, visit_id])) if rows: return OriginVisitRow.from_dict(rows[0]) else: return None @_prepared_select_statement(OriginVisitRow, "WHERE origin = ?") def origin_visit_get_all( self, origin_url: str, *, statement ) -> Iterable[OriginVisitRow]: return map( OriginVisitRow.from_dict, self._execute_with_retries(statement, [origin_url]), ) @_prepared_select_statement(OriginVisitRow, "WHERE token(origin) >= ?") def _origin_visit_iter_from( self, min_token: int, *, statement ) -> Iterable[OriginVisitRow]: return map( OriginVisitRow.from_dict, self._execute_with_retries(statement, [min_token]) ) @_prepared_select_statement(OriginVisitRow, "WHERE token(origin) < ?") def _origin_visit_iter_to( self, max_token: int, *, statement ) -> Iterable[OriginVisitRow]: return map( OriginVisitRow.from_dict, self._execute_with_retries(statement, [max_token]) ) def origin_visit_iter(self, start_token: int) -> Iterator[OriginVisitRow]: """Returns all origin visits in order from this token, and wraps around the token space.""" yield from self._origin_visit_iter_from(start_token) yield from self._origin_visit_iter_to(start_token) ########################## # 'origin_visit_status' table ########################## @_prepared_select_statements( OriginVisitStatusRow, { (True, ListOrder.ASC): ( "WHERE origin = ? AND visit = ? AND date >= ? " "ORDER BY visit ASC LIMIT ?" ), (True, ListOrder.DESC): ( "WHERE origin = ? AND visit = ? AND date <= ? " "ORDER BY visit DESC LIMIT ?" ), (False, ListOrder.ASC): ( "WHERE origin = ? AND visit = ? ORDER BY visit ASC LIMIT ?" ), (False, ListOrder.DESC): ( "WHERE origin = ? AND visit = ? ORDER BY visit DESC LIMIT ?" ), }, ) def origin_visit_status_get_range( self, origin: str, visit: int, date_from: Optional[datetime.datetime], limit: int, order: ListOrder, *, statements, ) -> Iterable[OriginVisitStatusRow]: args: List[Any] = [origin, visit] if date_from is not None: args.append(date_from) args.append(limit) statement = statements[(date_from is not None, order)] return map( OriginVisitStatusRow.from_dict, self._execute_with_retries(statement, args) ) @_prepared_insert_statement(OriginVisitStatusRow) def origin_visit_status_add_one( self, visit_update: OriginVisitStatusRow, *, statement ) -> None: self._add_one(statement, visit_update) def origin_visit_status_get_latest( self, origin: str, visit: int, ) -> Optional[OriginVisitStatusRow]: """Given an origin visit id, return its latest origin_visit_status """ return next(self.origin_visit_status_get(origin, visit), None) @_prepared_select_statement( OriginVisitStatusRow, "WHERE origin = ? AND visit = ? ORDER BY date DESC" ) def origin_visit_status_get( self, origin: str, visit: int, *, statement, ) -> Iterator[OriginVisitStatusRow]: """Return all origin visit statuses for a given visit """ return map( OriginVisitStatusRow.from_dict, self._execute_with_retries(statement, [origin, visit]), ) ########################## # 'metadata_authority' table ########################## @_prepared_insert_statement(MetadataAuthorityRow) def metadata_authority_add(self, authority: MetadataAuthorityRow, *, statement): self._add_one(statement, authority) @_prepared_select_statement(MetadataAuthorityRow, "WHERE type = ? AND url = ?") def metadata_authority_get( self, type, url, *, statement ) -> Optional[MetadataAuthorityRow]: rows = list(self._execute_with_retries(statement, [type, url])) if rows: return MetadataAuthorityRow.from_dict(rows[0]) else: return None ########################## # 'metadata_fetcher' table ########################## @_prepared_insert_statement(MetadataFetcherRow) def metadata_fetcher_add(self, fetcher, *, statement): self._add_one(statement, fetcher) @_prepared_select_statement(MetadataFetcherRow, "WHERE name = ? AND version = ?") def metadata_fetcher_get( self, name, version, *, statement ) -> Optional[MetadataFetcherRow]: rows = list(self._execute_with_retries(statement, [name, version])) if rows: return MetadataFetcherRow.from_dict(rows[0]) else: return None ######################### # 'raw_extrinsic_metadata' table ######################### @_prepared_insert_statement(RawExtrinsicMetadataRow) def raw_extrinsic_metadata_add(self, raw_extrinsic_metadata, *, statement): self._add_one(statement, raw_extrinsic_metadata) @_prepared_select_statement( RawExtrinsicMetadataRow, - "WHERE id=? AND authority_url=? AND discovery_date>? AND authority_type=?", + "WHERE target=? AND authority_url=? AND discovery_date>? AND authority_type=?", ) def raw_extrinsic_metadata_get_after_date( self, - id: str, + target: str, authority_type: str, authority_url: str, after: datetime.datetime, *, statement, ) -> Iterable[RawExtrinsicMetadataRow]: return map( RawExtrinsicMetadataRow.from_dict, self._execute_with_retries( - statement, [id, authority_url, after, authority_type] + statement, [target, authority_url, after, authority_type] ), ) @_prepared_select_statement( RawExtrinsicMetadataRow, - "WHERE id=? AND authority_type=? AND authority_url=? " + "WHERE target=? AND authority_type=? AND authority_url=? " "AND (discovery_date, fetcher_name, fetcher_version) > (?, ?, ?)", ) def raw_extrinsic_metadata_get_after_date_and_fetcher( self, - id: str, + target: str, authority_type: str, authority_url: str, after_date: datetime.datetime, after_fetcher_name: str, after_fetcher_version: str, *, statement, ) -> Iterable[RawExtrinsicMetadataRow]: return map( RawExtrinsicMetadataRow.from_dict, self._execute_with_retries( statement, [ - id, + target, authority_type, authority_url, after_date, after_fetcher_name, after_fetcher_version, ], ), ) @_prepared_select_statement( - RawExtrinsicMetadataRow, "WHERE id=? AND authority_url=? AND authority_type=?" + RawExtrinsicMetadataRow, + "WHERE target=? AND authority_url=? AND authority_type=?", ) def raw_extrinsic_metadata_get( - self, id: str, authority_type: str, authority_url: str, *, statement + self, target: str, authority_type: str, authority_url: str, *, statement ) -> Iterable[RawExtrinsicMetadataRow]: return map( RawExtrinsicMetadataRow.from_dict, - self._execute_with_retries(statement, [id, authority_url, authority_type]), + self._execute_with_retries( + statement, [target, authority_url, authority_type] + ), ) ########################## # Miscellaneous ########################## @_prepared_statement("SELECT uuid() FROM revision LIMIT 1;") def check_read(self, *, statement): self._execute_with_retries(statement, []) @_prepared_select_statement(ObjectCountRow, "WHERE partition_key=0") def stat_counters(self, *, statement) -> Iterable[ObjectCountRow]: return map(ObjectCountRow.from_dict, self._execute_with_retries(statement, [])) diff --git a/swh/storage/cassandra/model.py b/swh/storage/cassandra/model.py index 500b147c..740a8802 100644 --- a/swh/storage/cassandra/model.py +++ b/swh/storage/cassandra/model.py @@ -1,277 +1,277 @@ # Copyright (C) 2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information """Classes representing tables in the Cassandra database. They are very close to classes found in swh.model.model, but most of them are subtly different: * Large objects are split into other classes (eg. RevisionRow has no 'parents' field, because parents are stored in a different table, represented by RevisionParentRow) * They have a "cols" field, which returns the list of column names of the table * They only use types that map directly to Cassandra's schema (ie. no enums) Therefore, this model doesn't reuse swh.model.model, except for types that can be mapped to UDTs (Person and TimestampWithTimezone). """ import dataclasses import datetime from typing import Any, ClassVar, Dict, List, Optional, Tuple, Type, TypeVar from swh.model.model import Person, TimestampWithTimezone MAGIC_NULL_PK = b"" """ NULLs (or all-empty blobs) are not allowed in primary keys; instead we use a special value that can't possibly be a valid hash. """ T = TypeVar("T", bound="BaseRow") class BaseRow: TABLE: ClassVar[str] PARTITION_KEY: ClassVar[Tuple[str, ...]] CLUSTERING_KEY: ClassVar[Tuple[str, ...]] = () @classmethod def from_dict(cls: Type[T], d: Dict[str, Any]) -> T: return cls(**d) # type: ignore @classmethod def cols(cls) -> List[str]: return [field.name for field in dataclasses.fields(cls)] def to_dict(self) -> Dict[str, Any]: return dataclasses.asdict(self) @dataclasses.dataclass class ContentRow(BaseRow): TABLE = "content" PARTITION_KEY = ("sha1", "sha1_git", "sha256", "blake2s256") sha1: bytes sha1_git: bytes sha256: bytes blake2s256: bytes length: int ctime: datetime.datetime status: str @dataclasses.dataclass class SkippedContentRow(BaseRow): TABLE = "skipped_content" PARTITION_KEY = ("sha1", "sha1_git", "sha256", "blake2s256") sha1: Optional[bytes] sha1_git: Optional[bytes] sha256: Optional[bytes] blake2s256: Optional[bytes] length: Optional[int] ctime: Optional[datetime.datetime] status: str reason: str origin: str @classmethod def from_dict(cls, d: Dict[str, Any]) -> "SkippedContentRow": d = d.copy() for k in ("sha1", "sha1_git", "sha256", "blake2s256"): if d[k] == MAGIC_NULL_PK: d[k] = None return super().from_dict(d) @dataclasses.dataclass class DirectoryRow(BaseRow): TABLE = "directory" PARTITION_KEY = ("id",) id: bytes @dataclasses.dataclass class DirectoryEntryRow(BaseRow): TABLE = "directory_entry" PARTITION_KEY = ("directory_id",) CLUSTERING_KEY = ("name",) directory_id: bytes name: bytes target: bytes perms: int type: str @dataclasses.dataclass class RevisionRow(BaseRow): TABLE = "revision" PARTITION_KEY = ("id",) id: bytes date: Optional[TimestampWithTimezone] committer_date: Optional[TimestampWithTimezone] type: str directory: bytes message: bytes author: Person committer: Person synthetic: bool metadata: str extra_headers: dict @dataclasses.dataclass class RevisionParentRow(BaseRow): TABLE = "revision_parent" PARTITION_KEY = ("id",) CLUSTERING_KEY = ("parent_rank",) id: bytes parent_rank: int parent_id: bytes @dataclasses.dataclass class ReleaseRow(BaseRow): TABLE = "release" PARTITION_KEY = ("id",) id: bytes target_type: str target: bytes date: TimestampWithTimezone name: bytes message: bytes author: Person synthetic: bool @dataclasses.dataclass class SnapshotRow(BaseRow): TABLE = "snapshot" PARTITION_KEY = ("id",) id: bytes @dataclasses.dataclass class SnapshotBranchRow(BaseRow): TABLE = "snapshot_branch" PARTITION_KEY = ("snapshot_id",) CLUSTERING_KEY = ("name",) snapshot_id: bytes name: bytes target_type: Optional[str] target: Optional[bytes] @dataclasses.dataclass class OriginVisitRow(BaseRow): TABLE = "origin_visit" PARTITION_KEY = ("origin",) CLUSTERING_KEY = ("visit",) origin: str visit: int date: datetime.datetime type: str @dataclasses.dataclass class OriginVisitStatusRow(BaseRow): TABLE = "origin_visit_status" PARTITION_KEY = ("origin",) CLUSTERING_KEY = ("visit", "date") origin: str visit: int date: datetime.datetime status: str metadata: str snapshot: bytes @dataclasses.dataclass class OriginRow(BaseRow): TABLE = "origin" PARTITION_KEY = ("sha1",) sha1: bytes url: str next_visit_id: int @dataclasses.dataclass class MetadataAuthorityRow(BaseRow): TABLE = "metadata_authority" PARTITION_KEY = ("url",) CLUSTERING_KEY = ("type",) url: str type: str metadata: str @dataclasses.dataclass class MetadataFetcherRow(BaseRow): TABLE = "metadata_fetcher" PARTITION_KEY = ("name",) CLUSTERING_KEY = ("version",) name: str version: str metadata: str @dataclasses.dataclass class RawExtrinsicMetadataRow(BaseRow): TABLE = "raw_extrinsic_metadata" - PARTITION_KEY = ("id",) + PARTITION_KEY = ("target",) CLUSTERING_KEY = ( "authority_type", "authority_url", "discovery_date", "fetcher_name", "fetcher_version", ) type: str - id: str + target: str authority_type: str authority_url: str discovery_date: datetime.datetime fetcher_name: str fetcher_version: str format: str metadata: bytes origin: Optional[str] visit: Optional[int] snapshot: Optional[str] release: Optional[str] revision: Optional[str] path: Optional[bytes] directory: Optional[str] @dataclasses.dataclass class ObjectCountRow(BaseRow): TABLE = "object_count" PARTITION_KEY = ("partition_key",) CLUSTERING_KEY = ("object_type",) partition_key: int object_type: str count: int diff --git a/swh/storage/cassandra/schema.py b/swh/storage/cassandra/schema.py index 3c393a1e..9c099d6c 100644 --- a/swh/storage/cassandra/schema.py +++ b/swh/storage/cassandra/schema.py @@ -1,282 +1,282 @@ # Copyright (C) 2019-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information CREATE_TABLES_QUERIES = """ CREATE OR REPLACE FUNCTION ascii_bins_count_sfunc ( state tuple>, -- (nb_none, map) bin_name ascii ) CALLED ON NULL INPUT RETURNS tuple> LANGUAGE java AS $$ if (bin_name == null) { state.setInt(0, state.getInt(0) + 1); } else { Map counters = state.getMap( 1, String.class, Integer.class); Integer nb = counters.get(bin_name); if (nb == null) { nb = 0; } counters.put(bin_name, nb + 1); state.setMap(1, counters, String.class, Integer.class); } return state; $$ ; CREATE OR REPLACE AGGREGATE ascii_bins_count ( ascii ) SFUNC ascii_bins_count_sfunc STYPE tuple> INITCOND (0, {}) ; CREATE TYPE IF NOT EXISTS microtimestamp ( seconds bigint, microseconds int ); CREATE TYPE IF NOT EXISTS microtimestamp_with_timezone ( timestamp frozen, offset smallint, negative_utc boolean ); CREATE TYPE IF NOT EXISTS person ( fullname blob, name blob, email blob ); CREATE TABLE IF NOT EXISTS content ( sha1 blob, sha1_git blob, sha256 blob, blake2s256 blob, length bigint, ctime timestamp, -- creation time, i.e. time of (first) injection into the storage status ascii, PRIMARY KEY ((sha1, sha1_git, sha256, blake2s256)) ); CREATE TABLE IF NOT EXISTS skipped_content ( sha1 blob, sha1_git blob, sha256 blob, blake2s256 blob, length bigint, ctime timestamp, -- creation time, i.e. time of (first) injection into the storage status ascii, reason text, origin text, PRIMARY KEY ((sha1, sha1_git, sha256, blake2s256)) ); CREATE TABLE IF NOT EXISTS revision ( id blob PRIMARY KEY, date microtimestamp_with_timezone, committer_date microtimestamp_with_timezone, type ascii, directory blob, -- source code "root" directory message blob, author person, committer person, synthetic boolean, -- true iff revision has been created by Software Heritage metadata text, -- extra metadata as JSON(tarball checksums, etc...) extra_headers frozen> > -- extra commit information as (tuple(key, value), ...) ); CREATE TABLE IF NOT EXISTS revision_parent ( id blob, parent_rank int, -- parent position in merge commits, 0-based parent_id blob, PRIMARY KEY ((id), parent_rank) ); CREATE TABLE IF NOT EXISTS release ( id blob PRIMARY KEY, target_type ascii, target blob, date microtimestamp_with_timezone, name blob, message blob, author person, synthetic boolean, -- true iff release has been created by Software Heritage ); CREATE TABLE IF NOT EXISTS directory ( id blob PRIMARY KEY, ); CREATE TABLE IF NOT EXISTS directory_entry ( directory_id blob, name blob, -- path name, relative to containing dir target blob, perms int, -- unix-like permissions type ascii, -- target type PRIMARY KEY ((directory_id), name) ); CREATE TABLE IF NOT EXISTS snapshot ( id blob PRIMARY KEY, ); -- For a given snapshot_id, branches are sorted by their name, -- allowing easy pagination. CREATE TABLE IF NOT EXISTS snapshot_branch ( snapshot_id blob, name blob, target_type ascii, target blob, PRIMARY KEY ((snapshot_id), name) ); CREATE TABLE IF NOT EXISTS origin_visit ( origin text, visit bigint, date timestamp, type text, PRIMARY KEY ((origin), visit) ); CREATE TABLE IF NOT EXISTS origin_visit_status ( origin text, visit bigint, date timestamp, status ascii, metadata text, snapshot blob, PRIMARY KEY ((origin), visit, date) ); CREATE TABLE IF NOT EXISTS origin ( sha1 blob PRIMARY KEY, url text, next_visit_id int, -- We need integer visit ids for compatibility with the pgsql -- storage, so we're using lightweight transactions with this trick: -- https://stackoverflow.com/a/29391877/539465 ); CREATE TABLE IF NOT EXISTS metadata_authority ( url text, type ascii, metadata text, PRIMARY KEY ((url), type) ); CREATE TABLE IF NOT EXISTS metadata_fetcher ( name ascii, version ascii, metadata text, PRIMARY KEY ((name), version) ); CREATE TABLE IF NOT EXISTS raw_extrinsic_metadata ( type text, - id text, + target text, -- metadata source authority_type text, authority_url text, discovery_date timestamp, fetcher_name ascii, fetcher_version ascii, -- metadata itself format ascii, metadata blob, -- context origin text, visit bigint, snapshot text, release text, revision text, path blob, directory text, - PRIMARY KEY ((id), authority_type, authority_url, discovery_date, - fetcher_name, fetcher_version) + PRIMARY KEY ((target), authority_type, authority_url, discovery_date, + fetcher_name, fetcher_version) ); CREATE TABLE IF NOT EXISTS object_count ( partition_key smallint, -- Constant, must always be 0 object_type ascii, count counter, PRIMARY KEY ((partition_key), object_type) ); """.split( "\n\n\n" ) CONTENT_INDEX_TEMPLATE = """ -- Secondary table, used for looking up "content" from a single hash CREATE TABLE IF NOT EXISTS content_by_{main_algo} ( {main_algo} blob, target_token bigint, -- value of token(pk) on the "primary" table PRIMARY KEY (({main_algo}), target_token) ); CREATE TABLE IF NOT EXISTS skipped_content_by_{main_algo} ( {main_algo} blob, target_token bigint, -- value of token(pk) on the "primary" table PRIMARY KEY (({main_algo}), target_token) ); """ TABLES = ( "skipped_content content revision revision_parent release " "directory directory_entry snapshot snapshot_branch " "origin_visit origin raw_extrinsic_metadata object_count " "origin_visit_status metadata_authority " "metadata_fetcher" ).split() HASH_ALGORITHMS = ["sha1", "sha1_git", "sha256", "blake2s256"] for main_algo in HASH_ALGORITHMS: CREATE_TABLES_QUERIES.extend( CONTENT_INDEX_TEMPLATE.format( main_algo=main_algo, other_algos=", ".join( [algo for algo in HASH_ALGORITHMS if algo != main_algo] ), ).split("\n\n") ) TABLES.append("content_by_%s" % main_algo) TABLES.append("skipped_content_by_%s" % main_algo) diff --git a/swh/storage/cassandra/storage.py b/swh/storage/cassandra/storage.py index f9d650e3..65b062b3 100644 --- a/swh/storage/cassandra/storage.py +++ b/swh/storage/cassandra/storage.py @@ -1,1324 +1,1324 @@ # Copyright (C) 2019-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import base64 import datetime import itertools import json import random import re from typing import ( Any, Callable, Dict, Iterable, List, Optional, Sequence, Set, Tuple, Union, ) import attr from swh.core.api.serializers import msgpack_dumps, msgpack_loads from swh.model.hashutil import DEFAULT_ALGORITHMS from swh.model.identifiers import SWHID, parse_swhid from swh.model.model import ( Content, Directory, DirectoryEntry, MetadataAuthority, MetadataAuthorityType, MetadataFetcher, MetadataTargetType, Origin, OriginVisit, OriginVisitStatus, RawExtrinsicMetadata, Release, Revision, Sha1Git, SkippedContent, Snapshot, SnapshotBranch, TargetType, ) from swh.storage.interface import ( VISIT_STATUSES, ListOrder, PagedResult, PartialBranches, Sha1, ) from swh.storage.objstorage import ObjStorage from swh.storage.utils import map_optional, now from swh.storage.writer import JournalWriter from . import converters from ..exc import HashCollision, StorageArgumentException from .common import TOKEN_BEGIN, TOKEN_END, hash_url, remove_keys from .cql import CqlRunner from .model import ( ContentRow, DirectoryEntryRow, DirectoryRow, MetadataAuthorityRow, MetadataFetcherRow, OriginRow, OriginVisitRow, OriginVisitStatusRow, RawExtrinsicMetadataRow, RevisionParentRow, SkippedContentRow, SnapshotBranchRow, SnapshotRow, ) from .schema import HASH_ALGORITHMS # Max block size of contents to return BULK_BLOCK_CONTENT_LEN_MAX = 10000 class CassandraStorage: def __init__(self, hosts, keyspace, objstorage, port=9042, journal_writer=None): self._cql_runner: CqlRunner = CqlRunner(hosts, keyspace, port) self.journal_writer: JournalWriter = JournalWriter(journal_writer) self.objstorage: ObjStorage = ObjStorage(objstorage) def check_config(self, *, check_write: bool) -> bool: self._cql_runner.check_read() return True def _content_get_from_hash(self, algo, hash_) -> Iterable: """From the name of a hash algorithm and a value of that hash, looks up the "hash -> token" secondary table (content_by_{algo}) to get tokens. Then, looks up the main table (content) to get all contents with that token, and filters out contents whose hash doesn't match.""" found_tokens = self._cql_runner.content_get_tokens_from_single_hash(algo, hash_) for token in found_tokens: assert isinstance(token, int), found_tokens # Query the main table ('content'). res = self._cql_runner.content_get_from_token(token) for row in res: # re-check the the hash (in case of murmur3 collision) if getattr(row, algo) == hash_: yield row def _content_add(self, contents: List[Content], with_data: bool) -> Dict: # Filter-out content already in the database. contents = [ c for c in contents if not self._cql_runner.content_get_from_pk(c.to_dict()) ] self.journal_writer.content_add(contents) if with_data: # First insert to the objstorage, if the endpoint is # `content_add` (as opposed to `content_add_metadata`). # TODO: this should probably be done in concurrently to inserting # in index tables (but still before the main table; so an entry is # only added to the main table after everything else was # successfully inserted. summary = self.objstorage.content_add( c for c in contents if c.status != "absent" ) content_add_bytes = summary["content:add:bytes"] content_add = 0 for content in contents: content_add += 1 # Check for sha1 or sha1_git collisions. This test is not atomic # with the insertion, so it won't detect a collision if both # contents are inserted at the same time, but it's good enough. # # The proper way to do it would probably be a BATCH, but this # would be inefficient because of the number of partitions we # need to affect (len(HASH_ALGORITHMS)+1, which is currently 5) for algo in {"sha1", "sha1_git"}: collisions = [] # Get tokens of 'content' rows with the same value for # sha1/sha1_git rows = self._content_get_from_hash(algo, content.get_hash(algo)) for row in rows: if getattr(row, algo) != content.get_hash(algo): # collision of token(partition key), ignore this # row continue for other_algo in HASH_ALGORITHMS: if getattr(row, other_algo) != content.get_hash(other_algo): # This hash didn't match; discard the row. collisions.append( {k: getattr(row, k) for k in HASH_ALGORITHMS} ) if collisions: collisions.append(content.hashes()) raise HashCollision(algo, content.get_hash(algo), collisions) (token, insertion_finalizer) = self._cql_runner.content_add_prepare( ContentRow(**remove_keys(content.to_dict(), ("data",))) ) # Then add to index tables for algo in HASH_ALGORITHMS: self._cql_runner.content_index_add_one(algo, content, token) # Then to the main table insertion_finalizer() summary = { "content:add": content_add, } if with_data: summary["content:add:bytes"] = content_add_bytes return summary def content_add(self, content: List[Content]) -> Dict: contents = [attr.evolve(c, ctime=now()) for c in content] return self._content_add(list(contents), with_data=True) def content_update( self, contents: List[Dict[str, Any]], keys: List[str] = [] ) -> None: raise NotImplementedError( "content_update is not supported by the Cassandra backend" ) def content_add_metadata(self, content: List[Content]) -> Dict: return self._content_add(content, with_data=False) def content_get_data(self, content: Sha1) -> Optional[bytes]: # FIXME: Make this method support slicing the `data` return self.objstorage.content_get(content) def content_get_partition( self, partition_id: int, nb_partitions: int, page_token: Optional[str] = None, limit: int = 1000, ) -> PagedResult[Content]: if limit is None: raise StorageArgumentException("limit should not be None") # Compute start and end of the range of tokens covered by the # requested partition partition_size = (TOKEN_END - TOKEN_BEGIN) // nb_partitions range_start = TOKEN_BEGIN + partition_id * partition_size range_end = TOKEN_BEGIN + (partition_id + 1) * partition_size # offset the range start according to the `page_token`. if page_token is not None: if not (range_start <= int(page_token) <= range_end): raise StorageArgumentException("Invalid page_token.") range_start = int(page_token) next_page_token: Optional[str] = None rows = self._cql_runner.content_get_token_range( range_start, range_end, limit + 1 ) contents = [] for counter, (tok, row) in enumerate(rows): if row.status == "absent": continue row_d = row.to_dict() if counter >= limit: next_page_token = str(tok) break row_d.pop("ctime") contents.append(Content(**row_d)) assert len(contents) <= limit return PagedResult(results=contents, next_page_token=next_page_token) def content_get(self, contents: List[Sha1]) -> List[Optional[Content]]: contents_by_sha1: Dict[Sha1, Optional[Content]] = {} for sha1 in contents: # Get all (sha1, sha1_git, sha256, blake2s256) whose sha1 # matches the argument, from the index table ('content_by_sha1') for row in self._content_get_from_hash("sha1", sha1): row_d = row.to_dict() row_d.pop("ctime") content = Content(**row_d) contents_by_sha1[content.sha1] = content return [contents_by_sha1.get(sha1) for sha1 in contents] def content_find(self, content: Dict[str, Any]) -> List[Content]: # Find an algorithm that is common to all the requested contents. # It will be used to do an initial filtering efficiently. filter_algos = list(set(content).intersection(HASH_ALGORITHMS)) if not filter_algos: raise StorageArgumentException( "content keys must contain at least one " f"of: {', '.join(sorted(HASH_ALGORITHMS))}" ) common_algo = filter_algos[0] results = [] rows = self._content_get_from_hash(common_algo, content[common_algo]) for row in rows: # Re-check all the hashes, in case of collisions (either of the # hash of the partition key, or the hashes in it) for algo in HASH_ALGORITHMS: if content.get(algo) and getattr(row, algo) != content[algo]: # This hash didn't match; discard the row. break else: # All hashes match, keep this row. row_d = row.to_dict() row_d["ctime"] = row.ctime.replace(tzinfo=datetime.timezone.utc) results.append(Content(**row_d)) return results def content_missing( self, contents: List[Dict[str, Any]], key_hash: str = "sha1" ) -> Iterable[bytes]: if key_hash not in DEFAULT_ALGORITHMS: raise StorageArgumentException( "key_hash should be one of {','.join(DEFAULT_ALGORITHMS)}" ) for content in contents: res = self.content_find(content) if not res: yield content[key_hash] def content_missing_per_sha1(self, contents: List[bytes]) -> Iterable[bytes]: return self.content_missing([{"sha1": c} for c in contents]) def content_missing_per_sha1_git( self, contents: List[Sha1Git] ) -> Iterable[Sha1Git]: return self.content_missing( [{"sha1_git": c for c in contents}], key_hash="sha1_git" ) def content_get_random(self) -> Sha1Git: content = self._cql_runner.content_get_random() assert content, "Could not find any content" return content.sha1_git def _skipped_content_add(self, contents: List[SkippedContent]) -> Dict: # Filter-out content already in the database. contents = [ c for c in contents if not self._cql_runner.skipped_content_get_from_pk(c.to_dict()) ] self.journal_writer.skipped_content_add(contents) for content in contents: # Compute token of the row in the main table (token, insertion_finalizer) = self._cql_runner.skipped_content_add_prepare( SkippedContentRow.from_dict({"origin": None, **content.to_dict()}) ) # Then add to index tables for algo in HASH_ALGORITHMS: self._cql_runner.skipped_content_index_add_one(algo, content, token) # Then to the main table insertion_finalizer() return {"skipped_content:add": len(contents)} def skipped_content_add(self, content: List[SkippedContent]) -> Dict: contents = [attr.evolve(c, ctime=now()) for c in content] return self._skipped_content_add(contents) def skipped_content_missing( self, contents: List[Dict[str, Any]] ) -> Iterable[Dict[str, Any]]: for content in contents: if not self._cql_runner.skipped_content_get_from_pk(content): yield {algo: content[algo] for algo in DEFAULT_ALGORITHMS} def directory_add(self, directories: List[Directory]) -> Dict: # Filter out directories that are already inserted. missing = self.directory_missing([dir_.id for dir_ in directories]) directories = [dir_ for dir_ in directories if dir_.id in missing] self.journal_writer.directory_add(directories) for directory in directories: # Add directory entries to the 'directory_entry' table for entry in directory.entries: self._cql_runner.directory_entry_add_one( DirectoryEntryRow(directory_id=directory.id, **entry.to_dict()) ) # Add the directory *after* adding all the entries, so someone # calling snapshot_get_branch in the meantime won't end up # with half the entries. self._cql_runner.directory_add_one(DirectoryRow(id=directory.id)) return {"directory:add": len(directories)} def directory_missing(self, directories: List[Sha1Git]) -> Iterable[Sha1Git]: return self._cql_runner.directory_missing(directories) def _join_dentry_to_content(self, dentry: DirectoryEntry) -> Dict[str, Any]: contents: Union[List[Content], List[SkippedContentRow]] keys = ( "status", "sha1", "sha1_git", "sha256", "length", ) ret = dict.fromkeys(keys) ret.update(dentry.to_dict()) if ret["type"] == "file": contents = self.content_find({"sha1_git": ret["target"]}) if not contents: tokens = list( self._cql_runner.skipped_content_get_tokens_from_single_hash( "sha1_git", ret["target"] ) ) if tokens: contents = list( self._cql_runner.skipped_content_get_from_token(tokens[0]) ) if contents: content = contents[0] for key in keys: ret[key] = getattr(content, key) return ret def _directory_ls( self, directory_id: Sha1Git, recursive: bool, prefix: bytes = b"" ) -> Iterable[Dict[str, Any]]: if self.directory_missing([directory_id]): return rows = list(self._cql_runner.directory_entry_get([directory_id])) for row in rows: entry_d = row.to_dict() # Build and yield the directory entry dict del entry_d["directory_id"] entry = DirectoryEntry.from_dict(entry_d) ret = self._join_dentry_to_content(entry) ret["name"] = prefix + ret["name"] ret["dir_id"] = directory_id yield ret if recursive and ret["type"] == "dir": yield from self._directory_ls( ret["target"], True, prefix + ret["name"] + b"/" ) def directory_entry_get_by_path( self, directory: Sha1Git, paths: List[bytes] ) -> Optional[Dict[str, Any]]: return self._directory_entry_get_by_path(directory, paths, b"") def _directory_entry_get_by_path( self, directory: Sha1Git, paths: List[bytes], prefix: bytes ) -> Optional[Dict[str, Any]]: if not paths: return None contents = list(self.directory_ls(directory)) if not contents: return None def _get_entry(entries, name): """Finds the entry with the requested name, prepends the prefix (to get its full path), and returns it. If no entry has that name, returns None.""" for entry in entries: if entry["name"] == name: entry = entry.copy() entry["name"] = prefix + entry["name"] return entry first_item = _get_entry(contents, paths[0]) if len(paths) == 1: return first_item if not first_item or first_item["type"] != "dir": return None return self._directory_entry_get_by_path( first_item["target"], paths[1:], prefix + paths[0] + b"/" ) def directory_ls( self, directory: Sha1Git, recursive: bool = False ) -> Iterable[Dict[str, Any]]: yield from self._directory_ls(directory, recursive) def directory_get_random(self) -> Sha1Git: directory = self._cql_runner.directory_get_random() assert directory, "Could not find any directory" return directory.id def revision_add(self, revisions: List[Revision]) -> Dict: # Filter-out revisions already in the database missing = self.revision_missing([rev.id for rev in revisions]) revisions = [rev for rev in revisions if rev.id in missing] self.journal_writer.revision_add(revisions) for revision in revisions: revobject = converters.revision_to_db(revision) if revobject: # Add parents first for (rank, parent) in enumerate(revision.parents): self._cql_runner.revision_parent_add_one( RevisionParentRow( id=revobject.id, parent_rank=rank, parent_id=parent ) ) # Then write the main revision row. # Writing this after all parents were written ensures that # read endpoints don't return a partial view while writing # the parents self._cql_runner.revision_add_one(revobject) return {"revision:add": len(revisions)} def revision_missing(self, revisions: List[Sha1Git]) -> Iterable[Sha1Git]: return self._cql_runner.revision_missing(revisions) def revision_get(self, revision_ids: List[Sha1Git]) -> List[Optional[Revision]]: rows = self._cql_runner.revision_get(revision_ids) revisions: Dict[Sha1Git, Revision] = {} for row in rows: # TODO: use a single query to get all parents? # (it might have lower latency, but requires more code and more # bandwidth, because revision id would be part of each returned # row) parents = tuple(self._cql_runner.revision_parent_get(row.id)) # parent_rank is the clustering key, so results are already # sorted by rank. rev = converters.revision_from_db(row, parents=parents) revisions[rev.id] = rev return [revisions.get(rev_id) for rev_id in revision_ids] def _get_parent_revs( self, rev_ids: Iterable[Sha1Git], seen: Set[Sha1Git], limit: Optional[int], short: bool, ) -> Union[ Iterable[Dict[str, Any]], Iterable[Tuple[Sha1Git, Tuple[Sha1Git, ...]]], ]: if limit and len(seen) >= limit: return rev_ids = [id_ for id_ in rev_ids if id_ not in seen] if not rev_ids: return seen |= set(rev_ids) # We need this query, even if short=True, to return consistent # results (ie. not return only a subset of a revision's parents # if it is being written) if short: ids = self._cql_runner.revision_get_ids(rev_ids) for id_ in ids: # TODO: use a single query to get all parents? # (it might have less latency, but requires less code and more # bandwidth (because revision id would be part of each returned # row) parents = tuple(self._cql_runner.revision_parent_get(id_)) # parent_rank is the clustering key, so results are already # sorted by rank. yield (id_, parents) yield from self._get_parent_revs(parents, seen, limit, short) else: rows = self._cql_runner.revision_get(rev_ids) for row in rows: # TODO: use a single query to get all parents? # (it might have less latency, but requires less code and more # bandwidth (because revision id would be part of each returned # row) parents = tuple(self._cql_runner.revision_parent_get(row.id)) # parent_rank is the clustering key, so results are already # sorted by rank. rev = converters.revision_from_db(row, parents=parents) yield rev.to_dict() yield from self._get_parent_revs(parents, seen, limit, short) def revision_log( self, revisions: List[Sha1Git], limit: Optional[int] = None ) -> Iterable[Optional[Dict[str, Any]]]: seen: Set[Sha1Git] = set() yield from self._get_parent_revs(revisions, seen, limit, False) def revision_shortlog( self, revisions: List[Sha1Git], limit: Optional[int] = None ) -> Iterable[Optional[Tuple[Sha1Git, Tuple[Sha1Git, ...]]]]: seen: Set[Sha1Git] = set() yield from self._get_parent_revs(revisions, seen, limit, True) def revision_get_random(self) -> Sha1Git: revision = self._cql_runner.revision_get_random() assert revision, "Could not find any revision" return revision.id def release_add(self, releases: List[Release]) -> Dict: to_add = [] for rel in releases: if rel not in to_add: to_add.append(rel) missing = set(self.release_missing([rel.id for rel in to_add])) to_add = [rel for rel in to_add if rel.id in missing] self.journal_writer.release_add(to_add) for release in to_add: if release: self._cql_runner.release_add_one(converters.release_to_db(release)) return {"release:add": len(to_add)} def release_missing(self, releases: List[Sha1Git]) -> Iterable[Sha1Git]: return self._cql_runner.release_missing(releases) def release_get(self, releases: List[Sha1Git]) -> List[Optional[Release]]: rows = self._cql_runner.release_get(releases) rels: Dict[Sha1Git, Release] = {} for row in rows: release = converters.release_from_db(row) rels[row.id] = release return [rels.get(rel_id) for rel_id in releases] def release_get_random(self) -> Sha1Git: release = self._cql_runner.release_get_random() assert release, "Could not find any release" return release.id def snapshot_add(self, snapshots: List[Snapshot]) -> Dict: missing = self._cql_runner.snapshot_missing([snp.id for snp in snapshots]) snapshots = [snp for snp in snapshots if snp.id in missing] for snapshot in snapshots: self.journal_writer.snapshot_add([snapshot]) # Add branches for (branch_name, branch) in snapshot.branches.items(): if branch is None: target_type: Optional[str] = None target: Optional[bytes] = None else: target_type = branch.target_type.value target = branch.target self._cql_runner.snapshot_branch_add_one( SnapshotBranchRow( snapshot_id=snapshot.id, name=branch_name, target_type=target_type, target=target, ) ) # Add the snapshot *after* adding all the branches, so someone # calling snapshot_get_branch in the meantime won't end up # with half the branches. self._cql_runner.snapshot_add_one(SnapshotRow(id=snapshot.id)) return {"snapshot:add": len(snapshots)} def snapshot_missing(self, snapshots: List[Sha1Git]) -> Iterable[Sha1Git]: return self._cql_runner.snapshot_missing(snapshots) def snapshot_get(self, snapshot_id: Sha1Git) -> Optional[Dict[str, Any]]: d = self.snapshot_get_branches(snapshot_id) if d is None: return None return { "id": d["id"], "branches": { name: branch.to_dict() if branch else None for (name, branch) in d["branches"].items() }, "next_branch": d["next_branch"], } def snapshot_count_branches( self, snapshot_id: Sha1Git ) -> Optional[Dict[Optional[str], int]]: if self._cql_runner.snapshot_missing([snapshot_id]): # Makes sure we don't fetch branches for a snapshot that is # being added. return None return self._cql_runner.snapshot_count_branches(snapshot_id) def snapshot_get_branches( self, snapshot_id: Sha1Git, branches_from: bytes = b"", branches_count: int = 1000, target_types: Optional[List[str]] = None, ) -> Optional[PartialBranches]: if self._cql_runner.snapshot_missing([snapshot_id]): # Makes sure we don't fetch branches for a snapshot that is # being added. return None branches: List = [] while len(branches) < branches_count + 1: new_branches = list( self._cql_runner.snapshot_branch_get( snapshot_id, branches_from, branches_count + 1 ) ) if not new_branches: break branches_from = new_branches[-1].name new_branches_filtered = new_branches # Filter by target_type if target_types: new_branches_filtered = [ branch for branch in new_branches_filtered if branch.target is not None and branch.target_type in target_types ] branches.extend(new_branches_filtered) if len(new_branches) < branches_count + 1: break if len(branches) > branches_count: last_branch = branches.pop(-1).name else: last_branch = None return PartialBranches( id=snapshot_id, branches={ branch.name: None if branch.target is None else SnapshotBranch( target=branch.target, target_type=TargetType(branch.target_type) ) for branch in branches }, next_branch=last_branch, ) def snapshot_get_random(self) -> Sha1Git: snapshot = self._cql_runner.snapshot_get_random() assert snapshot, "Could not find any snapshot" return snapshot.id def object_find_by_sha1_git(self, ids: List[Sha1Git]) -> Dict[Sha1Git, List[Dict]]: results: Dict[Sha1Git, List[Dict]] = {id_: [] for id_ in ids} missing_ids = set(ids) # Mind the order, revision is the most likely one for a given ID, # so we check revisions first. queries: List[Tuple[str, Callable[[List[Sha1Git]], List[Sha1Git]]]] = [ ("revision", self._cql_runner.revision_missing), ("release", self._cql_runner.release_missing), ("content", self._cql_runner.content_missing_by_sha1_git), ("directory", self._cql_runner.directory_missing), ] for (object_type, query_fn) in queries: found_ids = missing_ids - set(query_fn(list(missing_ids))) for sha1_git in found_ids: results[sha1_git].append( {"sha1_git": sha1_git, "type": object_type,} ) missing_ids.remove(sha1_git) if not missing_ids: # We found everything, skipping the next queries. break return results def origin_get(self, origins: List[str]) -> Iterable[Optional[Origin]]: return [self.origin_get_one(origin) for origin in origins] def origin_get_one(self, origin_url: str) -> Optional[Origin]: """Given an origin url, return the origin if it exists, None otherwise """ rows = list(self._cql_runner.origin_get_by_url(origin_url)) if rows: assert len(rows) == 1 return Origin(url=rows[0].url) else: return None def origin_get_by_sha1(self, sha1s: List[bytes]) -> List[Optional[Dict[str, Any]]]: results = [] for sha1 in sha1s: rows = list(self._cql_runner.origin_get_by_sha1(sha1)) origin = {"url": rows[0].url} if rows else None results.append(origin) return results def origin_list( self, page_token: Optional[str] = None, limit: int = 100 ) -> PagedResult[Origin]: # Compute what token to begin the listing from start_token = TOKEN_BEGIN if page_token: start_token = int(page_token) if not (TOKEN_BEGIN <= start_token <= TOKEN_END): raise StorageArgumentException("Invalid page_token.") next_page_token = None origins = [] # Take one more origin so we can reuse it as the next page token if any for (tok, row) in self._cql_runner.origin_list(start_token, limit + 1): origins.append(Origin(url=row.url)) # keep reference of the last id for pagination purposes last_id = tok if len(origins) > limit: # last origin id is the next page token next_page_token = str(last_id) # excluding that origin from the result to respect the limit size origins = origins[:limit] assert len(origins) <= limit return PagedResult(results=origins, next_page_token=next_page_token) def origin_search( self, url_pattern: str, page_token: Optional[str] = None, limit: int = 50, regexp: bool = False, with_visit: bool = False, ) -> PagedResult[Origin]: # TODO: remove this endpoint, swh-search should be used instead. next_page_token = None offset = int(page_token) if page_token else 0 origin_rows = [row for row in self._cql_runner.origin_iter_all()] if regexp: pat = re.compile(url_pattern) origin_rows = [row for row in origin_rows if pat.search(row.url)] else: origin_rows = [row for row in origin_rows if url_pattern in row.url] if with_visit: origin_rows = [row for row in origin_rows if row.next_visit_id > 1] origins = [Origin(url=row.url) for row in origin_rows] origins = origins[offset : offset + limit + 1] if len(origins) > limit: # next offset next_page_token = str(offset + limit) # excluding that origin from the result to respect the limit size origins = origins[:limit] assert len(origins) <= limit return PagedResult(results=origins, next_page_token=next_page_token) def origin_count( self, url_pattern: str, regexp: bool = False, with_visit: bool = False ) -> int: raise NotImplementedError( "The Cassandra backend does not implement origin_count" ) def origin_add(self, origins: List[Origin]) -> Dict[str, int]: to_add = [ori for ori in origins if self.origin_get_one(ori.url) is None] # keep only one occurrence of each given origin while keeping the list # sorted as originally given to_add = sorted(set(to_add), key=to_add.index) self.journal_writer.origin_add(to_add) for origin in to_add: self._cql_runner.origin_add_one( OriginRow(sha1=hash_url(origin.url), url=origin.url, next_visit_id=1) ) return {"origin:add": len(to_add)} def origin_visit_add(self, visits: List[OriginVisit]) -> Iterable[OriginVisit]: for visit in visits: origin = self.origin_get_one(visit.origin) if not origin: # Cannot add a visit without an origin raise StorageArgumentException("Unknown origin %s", visit.origin) all_visits = [] nb_visits = 0 for visit in visits: nb_visits += 1 if not visit.visit: visit_id = self._cql_runner.origin_generate_unique_visit_id( visit.origin ) visit = attr.evolve(visit, visit=visit_id) self.journal_writer.origin_visit_add([visit]) self._cql_runner.origin_visit_add_one(OriginVisitRow(**visit.to_dict())) assert visit.visit is not None all_visits.append(visit) self._origin_visit_status_add( OriginVisitStatus( origin=visit.origin, visit=visit.visit, date=visit.date, status="created", snapshot=None, ) ) return all_visits def _origin_visit_status_add(self, visit_status: OriginVisitStatus) -> None: """Add an origin visit status""" self.journal_writer.origin_visit_status_add([visit_status]) self._cql_runner.origin_visit_status_add_one( converters.visit_status_to_row(visit_status) ) def origin_visit_status_add(self, visit_statuses: List[OriginVisitStatus]) -> None: # First round to check existence (fail early if any is ko) for visit_status in visit_statuses: origin_url = self.origin_get_one(visit_status.origin) if not origin_url: raise StorageArgumentException(f"Unknown origin {visit_status.origin}") for visit_status in visit_statuses: self._origin_visit_status_add(visit_status) def _origin_visit_apply_status( self, visit: Dict[str, Any], visit_status: OriginVisitStatusRow ) -> Dict[str, Any]: """Retrieve the latest visit status information for the origin visit. Then merge it with the visit and return it. """ return { # default to the values in visit **visit, # override with the last update **visit_status.to_dict(), # visit['origin'] is the URL (via a join), while # visit_status['origin'] is only an id. "origin": visit["origin"], # but keep the date of the creation of the origin visit "date": visit["date"], } def _origin_visit_get_latest_status(self, visit: OriginVisit) -> OriginVisitStatus: """Retrieve the latest visit status information for the origin visit object. """ assert visit.visit row = self._cql_runner.origin_visit_status_get_latest(visit.origin, visit.visit) assert row is not None visit_status = converters.row_to_visit_status(row) return attr.evolve(visit_status, origin=visit.origin) @staticmethod def _format_origin_visit_row(visit): return { **visit.to_dict(), "origin": visit.origin, "date": visit.date.replace(tzinfo=datetime.timezone.utc), } def origin_visit_get( self, origin: str, page_token: Optional[str] = None, order: ListOrder = ListOrder.ASC, limit: int = 10, ) -> PagedResult[OriginVisit]: if not isinstance(order, ListOrder): raise StorageArgumentException("order must be a ListOrder value") if page_token and not isinstance(page_token, str): raise StorageArgumentException("page_token must be a string.") next_page_token = None visit_from = None if page_token is None else int(page_token) visits: List[OriginVisit] = [] extra_limit = limit + 1 rows = self._cql_runner.origin_visit_get(origin, visit_from, extra_limit, order) for row in rows: visits.append(converters.row_to_visit(row)) assert len(visits) <= extra_limit if len(visits) == extra_limit: visits = visits[:limit] next_page_token = str(visits[-1].visit) return PagedResult(results=visits, next_page_token=next_page_token) def origin_visit_status_get( self, origin: str, visit: int, page_token: Optional[str] = None, order: ListOrder = ListOrder.ASC, limit: int = 10, ) -> PagedResult[OriginVisitStatus]: next_page_token = None date_from = None if page_token is not None: date_from = datetime.datetime.fromisoformat(page_token) # Take one more visit status so we can reuse it as the next page token if any rows = self._cql_runner.origin_visit_status_get_range( origin, visit, date_from, limit + 1, order ) visit_statuses = [converters.row_to_visit_status(row) for row in rows] if len(visit_statuses) > limit: # last visit status date is the next page token next_page_token = str(visit_statuses[-1].date) # excluding that visit status from the result to respect the limit size visit_statuses = visit_statuses[:limit] return PagedResult(results=visit_statuses, next_page_token=next_page_token) def origin_visit_find_by_date( self, origin: str, visit_date: datetime.datetime ) -> Optional[OriginVisit]: # Iterator over all the visits of the origin # This should be ok for now, as there aren't too many visits # per origin. rows = list(self._cql_runner.origin_visit_get_all(origin)) def key(visit): dt = visit.date.replace(tzinfo=datetime.timezone.utc) - visit_date return (abs(dt), -visit.visit) if rows: return converters.row_to_visit(min(rows, key=key)) return None def origin_visit_get_by(self, origin: str, visit: int) -> Optional[OriginVisit]: row = self._cql_runner.origin_visit_get_one(origin, visit) if row: return converters.row_to_visit(row) return None def origin_visit_get_latest( self, origin: str, type: Optional[str] = None, allowed_statuses: Optional[List[str]] = None, require_snapshot: bool = False, ) -> Optional[OriginVisit]: if allowed_statuses and not set(allowed_statuses).intersection(VISIT_STATUSES): raise StorageArgumentException( f"Unknown allowed statuses {','.join(allowed_statuses)}, only " f"{','.join(VISIT_STATUSES)} authorized" ) # TODO: Do not fetch all visits rows = self._cql_runner.origin_visit_get_all(origin) latest_visit = None for row in rows: visit = self._format_origin_visit_row(row) for status_row in self._cql_runner.origin_visit_status_get( origin, visit["visit"] ): updated_visit = self._origin_visit_apply_status(visit, status_row) if type is not None and updated_visit["type"] != type: continue if allowed_statuses and updated_visit["status"] not in allowed_statuses: continue if require_snapshot and updated_visit["snapshot"] is None: continue # updated_visit is a candidate if latest_visit is not None: if updated_visit["date"] < latest_visit["date"]: continue if updated_visit["visit"] < latest_visit["visit"]: continue latest_visit = updated_visit if latest_visit is None: return None return OriginVisit( origin=latest_visit["origin"], visit=latest_visit["visit"], date=latest_visit["date"], type=latest_visit["type"], ) def origin_visit_status_get_latest( self, origin_url: str, visit: int, allowed_statuses: Optional[List[str]] = None, require_snapshot: bool = False, ) -> Optional[OriginVisitStatus]: if allowed_statuses and not set(allowed_statuses).intersection(VISIT_STATUSES): raise StorageArgumentException( f"Unknown allowed statuses {','.join(allowed_statuses)}, only " f"{','.join(VISIT_STATUSES)} authorized" ) rows = list(self._cql_runner.origin_visit_status_get(origin_url, visit)) # filtering is done python side as we cannot do it server side if allowed_statuses: rows = [row for row in rows if row.status in allowed_statuses] if require_snapshot: rows = [row for row in rows if row.snapshot is not None] if not rows: return None return converters.row_to_visit_status(rows[0]) def origin_visit_status_get_random( self, type: str ) -> Optional[Tuple[OriginVisit, OriginVisitStatus]]: back_in_the_day = now() - datetime.timedelta(weeks=12) # 3 months back # Random position to start iteration at start_token = random.randint(TOKEN_BEGIN, TOKEN_END) # Iterator over all visits, ordered by token(origins) then visit_id rows = self._cql_runner.origin_visit_iter(start_token) for row in rows: visit = converters.row_to_visit(row) visit_status = self._origin_visit_get_latest_status(visit) if visit.date > back_in_the_day and visit_status.status == "full": return visit, visit_status return None def stat_counters(self): rows = self._cql_runner.stat_counters() keys = ( "content", "directory", "origin", "origin_visit", "release", "revision", "skipped_content", "snapshot", ) stats = {key: 0 for key in keys} stats.update({row.object_type: row.count for row in rows}) return stats def refresh_stat_counters(self): pass def raw_extrinsic_metadata_add(self, metadata: List[RawExtrinsicMetadata]) -> None: self.journal_writer.raw_extrinsic_metadata_add(metadata) for metadata_entry in metadata: if not self._cql_runner.metadata_authority_get( metadata_entry.authority.type.value, metadata_entry.authority.url ): raise StorageArgumentException( f"Unknown authority {metadata_entry.authority}" ) if not self._cql_runner.metadata_fetcher_get( metadata_entry.fetcher.name, metadata_entry.fetcher.version ): raise StorageArgumentException( f"Unknown fetcher {metadata_entry.fetcher}" ) try: row = RawExtrinsicMetadataRow( type=metadata_entry.type.value, - id=str(metadata_entry.id), + target=str(metadata_entry.target), authority_type=metadata_entry.authority.type.value, authority_url=metadata_entry.authority.url, discovery_date=metadata_entry.discovery_date, fetcher_name=metadata_entry.fetcher.name, fetcher_version=metadata_entry.fetcher.version, format=metadata_entry.format, metadata=metadata_entry.metadata, origin=metadata_entry.origin, visit=metadata_entry.visit, snapshot=map_optional(str, metadata_entry.snapshot), release=map_optional(str, metadata_entry.release), revision=map_optional(str, metadata_entry.revision), path=metadata_entry.path, directory=map_optional(str, metadata_entry.directory), ) self._cql_runner.raw_extrinsic_metadata_add(row) except TypeError as e: raise StorageArgumentException(*e.args) def raw_extrinsic_metadata_get( self, type: MetadataTargetType, - id: Union[str, SWHID], + target: Union[str, SWHID], authority: MetadataAuthority, after: Optional[datetime.datetime] = None, page_token: Optional[bytes] = None, limit: int = 1000, ) -> PagedResult[RawExtrinsicMetadata]: if type == MetadataTargetType.ORIGIN: - if isinstance(id, SWHID): + if isinstance(target, SWHID): raise StorageArgumentException( f"raw_extrinsic_metadata_get called with type='origin', " - f"but provided id is an SWHID: {id!r}" + f"but provided target is a SWHID: {target!r}" ) else: - if not isinstance(id, SWHID): + if not isinstance(target, SWHID): raise StorageArgumentException( f"raw_extrinsic_metadata_get called with type!='origin', " - f"but provided id is not an SWHID: {id!r}" + f"but provided target is not a SWHID: {target!r}" ) if page_token is not None: (after_date, after_fetcher_name, after_fetcher_url) = msgpack_loads( base64.b64decode(page_token) ) if after and after_date < after: raise StorageArgumentException( "page_token is inconsistent with the value of 'after'." ) entries = self._cql_runner.raw_extrinsic_metadata_get_after_date_and_fetcher( # noqa - str(id), + str(target), authority.type.value, authority.url, after_date, after_fetcher_name, after_fetcher_url, ) elif after is not None: entries = self._cql_runner.raw_extrinsic_metadata_get_after_date( - str(id), authority.type.value, authority.url, after + str(target), authority.type.value, authority.url, after ) else: entries = self._cql_runner.raw_extrinsic_metadata_get( - str(id), authority.type.value, authority.url + str(target), authority.type.value, authority.url ) if limit: entries = itertools.islice(entries, 0, limit + 1) results = [] for entry in entries: discovery_date = entry.discovery_date.replace(tzinfo=datetime.timezone.utc) - assert str(id) == entry.id + assert str(target) == entry.target result = RawExtrinsicMetadata( type=MetadataTargetType(entry.type), - id=id, + target=target, authority=MetadataAuthority( type=MetadataAuthorityType(entry.authority_type), url=entry.authority_url, ), fetcher=MetadataFetcher( name=entry.fetcher_name, version=entry.fetcher_version, ), discovery_date=discovery_date, format=entry.format, metadata=entry.metadata, origin=entry.origin, visit=entry.visit, snapshot=map_optional(parse_swhid, entry.snapshot), release=map_optional(parse_swhid, entry.release), revision=map_optional(parse_swhid, entry.revision), path=entry.path, directory=map_optional(parse_swhid, entry.directory), ) results.append(result) if len(results) > limit: results.pop() assert len(results) == limit last_result = results[-1] next_page_token: Optional[str] = base64.b64encode( msgpack_dumps( ( last_result.discovery_date, last_result.fetcher.name, last_result.fetcher.version, ) ) ).decode() else: next_page_token = None return PagedResult(next_page_token=next_page_token, results=results,) def metadata_fetcher_add(self, fetchers: List[MetadataFetcher]) -> None: self.journal_writer.metadata_fetcher_add(fetchers) for fetcher in fetchers: self._cql_runner.metadata_fetcher_add( MetadataFetcherRow( name=fetcher.name, version=fetcher.version, metadata=json.dumps(map_optional(dict, fetcher.metadata)), ) ) def metadata_fetcher_get( self, name: str, version: str ) -> Optional[MetadataFetcher]: fetcher = self._cql_runner.metadata_fetcher_get(name, version) if fetcher: return MetadataFetcher( name=fetcher.name, version=fetcher.version, metadata=json.loads(fetcher.metadata), ) else: return None def metadata_authority_add(self, authorities: List[MetadataAuthority]) -> None: self.journal_writer.metadata_authority_add(authorities) for authority in authorities: self._cql_runner.metadata_authority_add( MetadataAuthorityRow( url=authority.url, type=authority.type.value, metadata=json.dumps(map_optional(dict, authority.metadata)), ) ) def metadata_authority_get( self, type: MetadataAuthorityType, url: str ) -> Optional[MetadataAuthority]: authority = self._cql_runner.metadata_authority_get(type.value, url) if authority: return MetadataAuthority( type=MetadataAuthorityType(authority.type), url=authority.url, metadata=json.loads(authority.metadata), ) else: return None def clear_buffers(self, object_types: Sequence[str] = ()) -> None: """Do nothing """ return None def flush(self, object_types: Sequence[str] = ()) -> Dict[str, int]: return {} diff --git a/swh/storage/in_memory.py b/swh/storage/in_memory.py index 51373eb8..f1589246 100644 --- a/swh/storage/in_memory.py +++ b/swh/storage/in_memory.py @@ -1,626 +1,628 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from collections import defaultdict import datetime import functools import random from typing import ( Any, Dict, Generic, Iterable, Iterator, List, Optional, Tuple, Type, TypeVar, Union, ) from swh.model.model import Content, Sha1Git, SkippedContent from swh.storage.cassandra import CassandraStorage from swh.storage.cassandra.model import ( BaseRow, ContentRow, DirectoryEntryRow, DirectoryRow, MetadataAuthorityRow, MetadataFetcherRow, ObjectCountRow, OriginRow, OriginVisitRow, OriginVisitStatusRow, RawExtrinsicMetadataRow, ReleaseRow, RevisionParentRow, RevisionRow, SkippedContentRow, SnapshotBranchRow, SnapshotRow, ) from swh.storage.interface import ListOrder from swh.storage.objstorage import ObjStorage from .common import origin_url_to_sha1 from .writer import JournalWriter TRow = TypeVar("TRow", bound=BaseRow) class Table(Generic[TRow]): def __init__(self, row_class: Type[TRow]): self.row_class = row_class self.primary_key_cols = row_class.PARTITION_KEY + row_class.CLUSTERING_KEY # Map from tokens to clustering keys to rows # These are not actually partitions (or rather, there is one partition # for each token) and they aren't sorted. # But it is good enough if we don't care about performance; # and makes the code a lot simpler. self.data: Dict[int, Dict[Tuple, TRow]] = defaultdict(dict) def __repr__(self): return f"<__module__.Table[{self.row_class.__name__}] object>" def partition_key(self, row: Union[TRow, Dict[str, Any]]) -> Tuple: """Returns the partition key of a row (ie. the cells which get hashed into the token.""" if isinstance(row, dict): row_d = row else: row_d = row.to_dict() return tuple(row_d[col] for col in self.row_class.PARTITION_KEY) def clustering_key(self, row: Union[TRow, Dict[str, Any]]) -> Tuple: """Returns the clustering key of a row (ie. the cells which are used for sorting rows within a partition.""" if isinstance(row, dict): row_d = row else: row_d = row.to_dict() return tuple(row_d[col] for col in self.row_class.CLUSTERING_KEY) def primary_key(self, row): return self.partition_key(row) + self.clustering_key(row) def primary_key_from_dict(self, d: Dict[str, Any]) -> Tuple: """Returns the primary key (ie. concatenation of partition key and clustering key) of the given dictionary interpreted as a row.""" return tuple(d[col] for col in self.primary_key_cols) def token(self, key: Tuple): """Returns the token of a row (ie. the hash of its partition key).""" return hash(key) def get_partition(self, token: int) -> Dict[Tuple, TRow]: """Returns the partition that contains this token.""" return self.data[token] def insert(self, row: TRow): partition = self.data[self.token(self.partition_key(row))] partition[self.clustering_key(row)] = row def split_primary_key(self, key: Tuple) -> Tuple[Tuple, Tuple]: """Returns (partition_key, clustering_key) from a partition key""" assert len(key) == len(self.primary_key_cols) partition_key = key[0 : len(self.row_class.PARTITION_KEY)] clustering_key = key[len(self.row_class.PARTITION_KEY) :] return (partition_key, clustering_key) def get_from_partition_key(self, partition_key: Tuple) -> Iterable[TRow]: """Returns at most one row, from its partition key.""" token = self.token(partition_key) for row in self.get_from_token(token): if self.partition_key(row) == partition_key: yield row def get_from_primary_key(self, primary_key: Tuple) -> Optional[TRow]: """Returns at most one row, from its primary key.""" (partition_key, clustering_key) = self.split_primary_key(primary_key) token = self.token(partition_key) partition = self.get_partition(token) return partition.get(clustering_key) def get_from_token(self, token: int) -> Iterable[TRow]: """Returns all rows whose token (ie. non-cryptographic hash of the partition key) is the one passed as argument.""" return (v for (k, v) in sorted(self.get_partition(token).items())) def iter_all(self) -> Iterator[Tuple[Tuple, TRow]]: return ( (self.primary_key(row), row) for (token, partition) in self.data.items() for (clustering_key, row) in partition.items() ) def get_random(self) -> Optional[TRow]: return random.choice([row for (pk, row) in self.iter_all()]) class InMemoryCqlRunner: def __init__(self): self._contents = Table(ContentRow) self._content_indexes = defaultdict(lambda: defaultdict(set)) self._skipped_contents = Table(ContentRow) self._skipped_content_indexes = defaultdict(lambda: defaultdict(set)) self._directories = Table(DirectoryRow) self._directory_entries = Table(DirectoryEntryRow) self._revisions = Table(RevisionRow) self._revision_parents = Table(RevisionParentRow) self._releases = Table(ReleaseRow) self._snapshots = Table(SnapshotRow) self._snapshot_branches = Table(SnapshotBranchRow) self._origins = Table(OriginRow) self._origin_visits = Table(OriginVisitRow) self._origin_visit_statuses = Table(OriginVisitStatusRow) self._metadata_authorities = Table(MetadataAuthorityRow) self._metadata_fetchers = Table(MetadataFetcherRow) self._raw_extrinsic_metadata = Table(RawExtrinsicMetadataRow) self._stat_counters = defaultdict(int) def increment_counter(self, object_type: str, nb: int): self._stat_counters[object_type] += nb def stat_counters(self) -> Iterable[ObjectCountRow]: for (object_type, count) in self._stat_counters.items(): yield ObjectCountRow(partition_key=0, object_type=object_type, count=count) ########################## # 'content' table ########################## def _content_add_finalize(self, content: ContentRow) -> None: self._contents.insert(content) self.increment_counter("content", 1) def content_add_prepare(self, content: ContentRow): finalizer = functools.partial(self._content_add_finalize, content) return (self._contents.token(self._contents.partition_key(content)), finalizer) def content_get_from_pk( self, content_hashes: Dict[str, bytes] ) -> Optional[ContentRow]: primary_key = self._contents.primary_key_from_dict(content_hashes) return self._contents.get_from_primary_key(primary_key) def content_get_from_token(self, token: int) -> Iterable[ContentRow]: return self._contents.get_from_token(token) def content_get_random(self) -> Optional[ContentRow]: return self._contents.get_random() def content_get_token_range( self, start: int, end: int, limit: int, ) -> Iterable[Tuple[int, ContentRow]]: matches = [ (token, row) for (token, partition) in self._contents.data.items() for (clustering_key, row) in partition.items() if start <= token <= end ] matches.sort() return matches[0:limit] ########################## # 'content_by_*' tables ########################## def content_missing_by_sha1_git(self, ids: List[bytes]) -> List[bytes]: missing = [] for id_ in ids: if id_ not in self._content_indexes["sha1_git"]: missing.append(id_) return missing def content_index_add_one(self, algo: str, content: Content, token: int) -> None: self._content_indexes[algo][content.get_hash(algo)].add(token) def content_get_tokens_from_single_hash( self, algo: str, hash_: bytes ) -> Iterable[int]: return self._content_indexes[algo][hash_] ########################## # 'skipped_content' table ########################## def _skipped_content_add_finalize(self, content: SkippedContentRow) -> None: self._skipped_contents.insert(content) self.increment_counter("skipped_content", 1) def skipped_content_add_prepare(self, content: SkippedContentRow): finalizer = functools.partial(self._skipped_content_add_finalize, content) return ( self._skipped_contents.token(self._contents.partition_key(content)), finalizer, ) def skipped_content_get_from_pk( self, content_hashes: Dict[str, bytes] ) -> Optional[SkippedContentRow]: primary_key = self._skipped_contents.primary_key_from_dict(content_hashes) return self._skipped_contents.get_from_primary_key(primary_key) def skipped_content_get_from_token(self, token: int) -> Iterable[SkippedContentRow]: return self._skipped_contents.get_from_token(token) ########################## # 'skipped_content_by_*' tables ########################## def skipped_content_index_add_one( self, algo: str, content: SkippedContent, token: int ) -> None: self._skipped_content_indexes[algo][content.get_hash(algo)].add(token) def skipped_content_get_tokens_from_single_hash( self, algo: str, hash_: bytes ) -> Iterable[int]: return self._skipped_content_indexes[algo][hash_] ########################## # 'directory' table ########################## def directory_missing(self, ids: List[bytes]) -> List[bytes]: missing = [] for id_ in ids: if self._directories.get_from_primary_key((id_,)) is None: missing.append(id_) return missing def directory_add_one(self, directory: DirectoryRow) -> None: self._directories.insert(directory) self.increment_counter("directory", 1) def directory_get_random(self) -> Optional[DirectoryRow]: return self._directories.get_random() ########################## # 'directory_entry' table ########################## def directory_entry_add_one(self, entry: DirectoryEntryRow) -> None: self._directory_entries.insert(entry) def directory_entry_get( self, directory_ids: List[Sha1Git] ) -> Iterable[DirectoryEntryRow]: for id_ in directory_ids: yield from self._directory_entries.get_from_partition_key((id_,)) ########################## # 'revision' table ########################## def revision_missing(self, ids: List[bytes]) -> Iterable[bytes]: missing = [] for id_ in ids: if self._revisions.get_from_primary_key((id_,)) is None: missing.append(id_) return missing def revision_add_one(self, revision: RevisionRow) -> None: self._revisions.insert(revision) self.increment_counter("revision", 1) def revision_get_ids(self, revision_ids) -> Iterable[int]: for id_ in revision_ids: if self._revisions.get_from_primary_key((id_,)) is not None: yield id_ def revision_get(self, revision_ids: List[Sha1Git]) -> Iterable[RevisionRow]: for id_ in revision_ids: row = self._revisions.get_from_primary_key((id_,)) if row: yield row def revision_get_random(self) -> Optional[RevisionRow]: return self._revisions.get_random() ########################## # 'revision_parent' table ########################## def revision_parent_add_one(self, revision_parent: RevisionParentRow) -> None: self._revision_parents.insert(revision_parent) def revision_parent_get(self, revision_id: Sha1Git) -> Iterable[bytes]: for parent in self._revision_parents.get_from_partition_key((revision_id,)): yield parent.parent_id ########################## # 'release' table ########################## def release_missing(self, ids: List[bytes]) -> List[bytes]: missing = [] for id_ in ids: if self._releases.get_from_primary_key((id_,)) is None: missing.append(id_) return missing def release_add_one(self, release: ReleaseRow) -> None: self._releases.insert(release) self.increment_counter("release", 1) def release_get(self, release_ids: List[str]) -> Iterable[ReleaseRow]: for id_ in release_ids: row = self._releases.get_from_primary_key((id_,)) if row: yield row def release_get_random(self) -> Optional[ReleaseRow]: return self._releases.get_random() ########################## # 'snapshot' table ########################## def snapshot_missing(self, ids: List[bytes]) -> List[bytes]: missing = [] for id_ in ids: if self._snapshots.get_from_primary_key((id_,)) is None: missing.append(id_) return missing def snapshot_add_one(self, snapshot: SnapshotRow) -> None: self._snapshots.insert(snapshot) self.increment_counter("snapshot", 1) def snapshot_get_random(self) -> Optional[SnapshotRow]: return self._snapshots.get_random() ########################## # 'snapshot_branch' table ########################## def snapshot_branch_add_one(self, branch: SnapshotBranchRow) -> None: self._snapshot_branches.insert(branch) def snapshot_count_branches(self, snapshot_id: Sha1Git) -> Dict[Optional[str], int]: """Returns a dictionary from type names to the number of branches of that type.""" counts: Dict[Optional[str], int] = defaultdict(int) for branch in self._snapshot_branches.get_from_partition_key((snapshot_id,)): if branch.target_type is None: target_type = None else: target_type = branch.target_type counts[target_type] += 1 return counts def snapshot_branch_get( self, snapshot_id: Sha1Git, from_: bytes, limit: int ) -> Iterable[SnapshotBranchRow]: count = 0 for branch in self._snapshot_branches.get_from_partition_key((snapshot_id,)): if branch.name >= from_: count += 1 yield branch if count >= limit: break ########################## # 'origin' table ########################## def origin_add_one(self, origin: OriginRow) -> None: self._origins.insert(origin) self.increment_counter("origin", 1) def origin_get_by_sha1(self, sha1: bytes) -> Iterable[OriginRow]: return self._origins.get_from_partition_key((sha1,)) def origin_get_by_url(self, url: str) -> Iterable[OriginRow]: return self.origin_get_by_sha1(origin_url_to_sha1(url)) def origin_list( self, start_token: int, limit: int ) -> Iterable[Tuple[int, OriginRow]]: """Returns an iterable of (token, origin)""" matches = [ (token, row) for (token, partition) in self._origins.data.items() for (clustering_key, row) in partition.items() if token >= start_token ] matches.sort() return matches[0:limit] def origin_iter_all(self) -> Iterable[OriginRow]: return ( row for (token, partition) in self._origins.data.items() for (clustering_key, row) in partition.items() ) def origin_generate_unique_visit_id(self, origin_url: str) -> int: origin = list(self.origin_get_by_url(origin_url))[0] visit_id = origin.next_visit_id origin.next_visit_id += 1 return visit_id ########################## # 'origin_visit' table ########################## def origin_visit_get( self, origin_url: str, last_visit: Optional[int], limit: int, order: ListOrder, ) -> Iterable[OriginVisitRow]: visits = list(self._origin_visits.get_from_partition_key((origin_url,))) if last_visit is not None: if order == ListOrder.ASC: visits = [v for v in visits if v.visit > last_visit] else: visits = [v for v in visits if v.visit < last_visit] visits.sort(key=lambda v: v.visit, reverse=order == ListOrder.DESC) visits = visits[0:limit] return visits def origin_visit_add_one(self, visit: OriginVisitRow) -> None: self._origin_visits.insert(visit) self.increment_counter("origin_visit", 1) def origin_visit_get_one( self, origin_url: str, visit_id: int ) -> Optional[OriginVisitRow]: return self._origin_visits.get_from_primary_key((origin_url, visit_id)) def origin_visit_get_all(self, origin_url: str) -> Iterable[OriginVisitRow]: return self._origin_visits.get_from_partition_key((origin_url,)) def origin_visit_iter(self, start_token: int) -> Iterator[OriginVisitRow]: """Returns all origin visits in order from this token, and wraps around the token space.""" return ( row for (token, partition) in self._origin_visits.data.items() for (clustering_key, row) in partition.items() ) ########################## # 'origin_visit_status' table ########################## def origin_visit_status_get_range( self, origin: str, visit: int, date_from: Optional[datetime.datetime], limit: int, order: ListOrder, ) -> Iterable[OriginVisitStatusRow]: statuses = list(self.origin_visit_status_get(origin, visit)) if date_from is not None: if order == ListOrder.ASC: statuses = [s for s in statuses if s.date >= date_from] else: statuses = [s for s in statuses if s.date <= date_from] statuses.sort(key=lambda s: s.date, reverse=order == ListOrder.DESC) return statuses[0:limit] def origin_visit_status_add_one(self, visit_update: OriginVisitStatusRow) -> None: self._origin_visit_statuses.insert(visit_update) self.increment_counter("origin_visit_status", 1) def origin_visit_status_get_latest( self, origin: str, visit: int, ) -> Optional[OriginVisitStatusRow]: """Given an origin visit id, return its latest origin_visit_status """ return next(self.origin_visit_status_get(origin, visit), None) def origin_visit_status_get( self, origin: str, visit: int, ) -> Iterator[OriginVisitStatusRow]: """Return all origin visit statuses for a given visit """ statuses = [ s for s in self._origin_visit_statuses.get_from_partition_key((origin,)) if s.visit == visit ] statuses.sort(key=lambda s: s.date, reverse=True) return iter(statuses) ########################## # 'metadata_authority' table ########################## def metadata_authority_add(self, authority: MetadataAuthorityRow): self._metadata_authorities.insert(authority) self.increment_counter("metadata_authority", 1) def metadata_authority_get(self, type, url) -> Optional[MetadataAuthorityRow]: return self._metadata_authorities.get_from_primary_key((url, type)) ########################## # 'metadata_fetcher' table ########################## def metadata_fetcher_add(self, fetcher: MetadataFetcherRow): self._metadata_fetchers.insert(fetcher) self.increment_counter("metadata_fetcher", 1) def metadata_fetcher_get(self, name, version) -> Optional[MetadataAuthorityRow]: return self._metadata_fetchers.get_from_primary_key((name, version)) ######################### # 'raw_extrinsic_metadata' table ######################### def raw_extrinsic_metadata_add(self, raw_extrinsic_metadata): self._raw_extrinsic_metadata.insert(raw_extrinsic_metadata) self.increment_counter("raw_extrinsic_metadata", 1) def raw_extrinsic_metadata_get_after_date( self, - id: str, + target: str, authority_type: str, authority_url: str, after: datetime.datetime, ) -> Iterable[RawExtrinsicMetadataRow]: - metadata = self.raw_extrinsic_metadata_get(id, authority_type, authority_url) + metadata = self.raw_extrinsic_metadata_get( + target, authority_type, authority_url + ) return (m for m in metadata if m.discovery_date > after) def raw_extrinsic_metadata_get_after_date_and_fetcher( self, - id: str, + target: str, authority_type: str, authority_url: str, after_date: datetime.datetime, after_fetcher_name: str, after_fetcher_version: str, ) -> Iterable[RawExtrinsicMetadataRow]: - metadata = self._raw_extrinsic_metadata.get_from_partition_key((id,)) + metadata = self._raw_extrinsic_metadata.get_from_partition_key((target,)) after_tuple = (after_date, after_fetcher_name, after_fetcher_version) return ( m for m in metadata if m.authority_type == authority_type and m.authority_url == authority_url and (m.discovery_date, m.fetcher_name, m.fetcher_version) > after_tuple ) def raw_extrinsic_metadata_get( - self, id: str, authority_type: str, authority_url: str + self, target: str, authority_type: str, authority_url: str ) -> Iterable[RawExtrinsicMetadataRow]: - metadata = self._raw_extrinsic_metadata.get_from_partition_key((id,)) + metadata = self._raw_extrinsic_metadata.get_from_partition_key((target,)) return ( m for m in metadata if m.authority_type == authority_type and m.authority_url == authority_url ) class InMemoryStorage(CassandraStorage): _cql_runner: InMemoryCqlRunner # type: ignore def __init__(self, journal_writer=None): self.reset() self.journal_writer = JournalWriter(journal_writer) def reset(self): self._cql_runner = InMemoryCqlRunner() self.objstorage = ObjStorage({"cls": "memory"}) def check_config(self, *, check_write: bool) -> bool: return True diff --git a/swh/storage/interface.py b/swh/storage/interface.py index a59a0773..aeff436b 100644 --- a/swh/storage/interface.py +++ b/swh/storage/interface.py @@ -1,1201 +1,1201 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime from enum import Enum from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, TypeVar, Union from typing_extensions import Protocol, TypedDict, runtime_checkable from swh.core.api import remote_api_endpoint from swh.core.api.classes import PagedResult as CorePagedResult from swh.model.identifiers import SWHID from swh.model.model import ( Content, Directory, MetadataAuthority, MetadataAuthorityType, MetadataFetcher, MetadataTargetType, Origin, OriginVisit, OriginVisitStatus, RawExtrinsicMetadata, Release, Revision, Sha1, Sha1Git, SkippedContent, Snapshot, SnapshotBranch, ) class ListOrder(Enum): """Specifies the order for paginated endpoints returning sorted results.""" ASC = "asc" DESC = "desc" class PartialBranches(TypedDict): """Type of the dictionary returned by snapshot_get_branches""" id: Sha1Git """Identifier of the snapshot""" branches: Dict[bytes, Optional[SnapshotBranch]] """A dict of branches contained in the snapshot whose keys are the branches' names""" next_branch: Optional[bytes] """The name of the first branch not returned or :const:`None` if the snapshot has less than the request number of branches.""" TResult = TypeVar("TResult") PagedResult = CorePagedResult[TResult, str] # TODO: Make it an enum (too much impact) VISIT_STATUSES = ["created", "ongoing", "full", "partial"] def deprecated(f): f.deprecated_endpoint = True return f @runtime_checkable class StorageInterface(Protocol): @remote_api_endpoint("check_config") def check_config(self, *, check_write: bool) -> bool: """Check that the storage is configured and ready to go.""" ... @remote_api_endpoint("content/add") def content_add(self, content: List[Content]) -> Dict: """Add content blobs to the storage Args: contents (iterable): iterable of dictionaries representing individual pieces of content to add. Each dictionary has the following keys: - data (bytes): the actual content - length (int): content length - one key for each checksum algorithm in :data:`swh.model.hashutil.ALGORITHMS`, mapped to the corresponding checksum - status (str): one of visible, hidden Raises: The following exceptions can occur: - HashCollision in case of collision - Any other exceptions raise by the db In case of errors, some of the content may have been stored in the DB and in the objstorage. Since additions to both idempotent, that should not be a problem. Returns: Summary dict with the following keys and associated values: content:add: New contents added content:add:bytes: Sum of the contents' length data """ ... @remote_api_endpoint("content/update") def content_update( self, contents: List[Dict[str, Any]], keys: List[str] = [] ) -> None: """Update content blobs to the storage. Does nothing for unknown contents or skipped ones. Args: content: iterable of dictionaries representing individual pieces of content to update. Each dictionary has the following keys: - data (bytes): the actual content - length (int): content length (default: -1) - one key for each checksum algorithm in :data:`swh.model.hashutil.ALGORITHMS`, mapped to the corresponding checksum - status (str): one of visible, hidden, absent keys (list): List of keys (str) whose values needs an update, e.g., new hash column """ ... @remote_api_endpoint("content/add_metadata") def content_add_metadata(self, content: List[Content]) -> Dict: """Add content metadata to the storage (like `content_add`, but without inserting to the objstorage). Args: content (iterable): iterable of dictionaries representing individual pieces of content to add. Each dictionary has the following keys: - length (int): content length (default: -1) - one key for each checksum algorithm in :data:`swh.model.hashutil.ALGORITHMS`, mapped to the corresponding checksum - status (str): one of visible, hidden, absent - reason (str): if status = absent, the reason why - origin (int): if status = absent, the origin we saw the content in - ctime (datetime): time of insertion in the archive Returns: Summary dict with the following key and associated values: content:add: New contents added skipped_content:add: New skipped contents (no data) added """ ... @remote_api_endpoint("content/data") def content_get_data(self, content: Sha1) -> Optional[bytes]: """Given a content identifier, returns its associated data if any. Args: content: sha1 identifier Returns: raw content data (bytes) """ ... @remote_api_endpoint("content/partition") def content_get_partition( self, partition_id: int, nb_partitions: int, page_token: Optional[str] = None, limit: int = 1000, ) -> PagedResult[Content]: """Splits contents into nb_partitions, and returns one of these based on partition_id (which must be in [0, nb_partitions-1]) There is no guarantee on how the partitioning is done, or the result order. Args: partition_id: index of the partition to fetch nb_partitions: total number of partitions to split into page_token: opaque token used for pagination. limit: Limit result (default to 1000) Returns: PagedResult of Content model objects within the partition. If next_page_token is None, there is no longer data to retrieve. """ ... @remote_api_endpoint("content/metadata") def content_get(self, contents: List[Sha1]) -> List[Optional[Content]]: """Retrieve content metadata in bulk Args: content: List of content identifiers Returns: List of contents model objects when they exist, None otherwise. """ ... @remote_api_endpoint("content/missing") def content_missing( self, contents: List[Dict[str, Any]], key_hash: str = "sha1" ) -> Iterable[bytes]: """List content missing from storage Args: content: iterable of dictionaries whose keys are either 'length' or an item of :data:`swh.model.hashutil.ALGORITHMS`; mapped to the corresponding checksum (or length). key_hash: name of the column to use as hash id result (default: 'sha1') Raises: StorageArgumentException when key_hash is unknown. TODO: an exception when we get a hash collision. Returns: iterable of missing content ids (as per the `key_hash` column) """ ... @remote_api_endpoint("content/missing/sha1") def content_missing_per_sha1(self, contents: List[bytes]) -> Iterable[bytes]: """List content missing from storage based only on sha1. Args: contents: List of sha1 to check for absence. Raises: TODO: an exception when we get a hash collision. Returns: Iterable of missing content ids (sha1) """ ... @remote_api_endpoint("content/missing/sha1_git") def content_missing_per_sha1_git( self, contents: List[Sha1Git] ) -> Iterable[Sha1Git]: """List content missing from storage based only on sha1_git. Args: contents (List): An iterable of content id (sha1_git) Yields: missing contents sha1_git """ ... @remote_api_endpoint("content/present") def content_find(self, content: Dict[str, Any]) -> List[Content]: """Find a content hash in db. Args: content: a dictionary representing one content hash, mapping checksum algorithm names (see swh.model.hashutil.ALGORITHMS) to checksum values Raises: ValueError: in case the key of the dictionary is not sha1, sha1_git nor sha256. Returns: an iterable of Content objects matching the search criteria if the content exist. Empty iterable otherwise. """ ... @remote_api_endpoint("content/get_random") def content_get_random(self) -> Sha1Git: """Finds a random content id. Returns: a sha1_git """ ... @remote_api_endpoint("content/skipped/add") def skipped_content_add(self, content: List[SkippedContent]) -> Dict: """Add contents to the skipped_content list, which contains (partial) information about content missing from the archive. Args: contents (iterable): iterable of dictionaries representing individual pieces of content to add. Each dictionary has the following keys: - length (Optional[int]): content length (default: -1) - one key for each checksum algorithm in :data:`swh.model.hashutil.ALGORITHMS`, mapped to the corresponding checksum; each is optional - status (str): must be "absent" - reason (str): the reason why the content is absent - origin (int): if status = absent, the origin we saw the content in Raises: The following exceptions can occur: - HashCollision in case of collision - Any other exceptions raise by the backend In case of errors, some content may have been stored in the DB and in the objstorage. Since additions to both idempotent, that should not be a problem. Returns: Summary dict with the following key and associated values: skipped_content:add: New skipped contents (no data) added """ ... @remote_api_endpoint("content/skipped/missing") def skipped_content_missing( self, contents: List[Dict[str, Any]] ) -> Iterable[Dict[str, Any]]: """List skipped contents missing from storage. Args: contents: iterable of dictionaries containing the data for each checksum algorithm. Returns: Iterable of missing skipped contents as dict """ ... @remote_api_endpoint("directory/add") def directory_add(self, directories: List[Directory]) -> Dict: """Add directories to the storage Args: directories (iterable): iterable of dictionaries representing the individual directories to add. Each dict has the following keys: - id (sha1_git): the id of the directory to add - entries (list): list of dicts for each entry in the directory. Each dict has the following keys: - name (bytes) - type (one of 'file', 'dir', 'rev'): type of the directory entry (file, directory, revision) - target (sha1_git): id of the object pointed at by the directory entry - perms (int): entry permissions Returns: Summary dict of keys with associated count as values: directory:add: Number of directories actually added """ ... @remote_api_endpoint("directory/missing") def directory_missing(self, directories: List[Sha1Git]) -> Iterable[Sha1Git]: """List directories missing from storage. Args: directories: list of directory ids Yields: missing directory ids """ ... @remote_api_endpoint("directory/ls") def directory_ls( self, directory: Sha1Git, recursive: bool = False ) -> Iterable[Dict[str, Any]]: """List entries for one directory. If `recursive=True`, names in the path of a dir/file not at the root are concatenated with a slash (`/`). Args: directory: the directory to list entries from. recursive: if flag on, this list recursively from this directory. Yields: directory entries for such directory. """ ... @remote_api_endpoint("directory/path") def directory_entry_get_by_path( self, directory: Sha1Git, paths: List[bytes] ) -> Optional[Dict[str, Any]]: """Get the directory entry (either file or dir) from directory with path. Args: directory: directory id paths: path to lookup from the top level directory. From left (top) to right (bottom). Returns: The corresponding directory entry as dict if found, None otherwise. """ ... @remote_api_endpoint("directory/get_random") def directory_get_random(self) -> Sha1Git: """Finds a random directory id. Returns: a sha1_git """ ... @remote_api_endpoint("revision/add") def revision_add(self, revisions: List[Revision]) -> Dict: """Add revisions to the storage Args: revisions (List[dict]): iterable of dictionaries representing the individual revisions to add. Each dict has the following keys: - **id** (:class:`sha1_git`): id of the revision to add - **date** (:class:`dict`): date the revision was written - **committer_date** (:class:`dict`): date the revision got added to the origin - **type** (one of 'git', 'tar'): type of the revision added - **directory** (:class:`sha1_git`): the directory the revision points at - **message** (:class:`bytes`): the message associated with the revision - **author** (:class:`Dict[str, bytes]`): dictionary with keys: name, fullname, email - **committer** (:class:`Dict[str, bytes]`): dictionary with keys: name, fullname, email - **metadata** (:class:`jsonb`): extra information as dictionary - **synthetic** (:class:`bool`): revision's nature (tarball, directory creates synthetic revision`) - **parents** (:class:`list[sha1_git]`): the parents of this revision date dictionaries have the form defined in :mod:`swh.model`. Returns: Summary dict of keys with associated count as values revision:add: New objects actually stored in db """ ... @remote_api_endpoint("revision/missing") def revision_missing(self, revisions: List[Sha1Git]) -> Iterable[Sha1Git]: """List revisions missing from storage Args: revisions: revision ids Yields: missing revision ids """ ... @remote_api_endpoint("revision") def revision_get(self, revision_ids: List[Sha1Git]) -> List[Optional[Revision]]: """Get revisions from storage Args: revisions: revision ids Returns: list of revision object (if the revision exists or None otherwise) """ ... @remote_api_endpoint("revision/log") def revision_log( self, revisions: List[Sha1Git], limit: Optional[int] = None ) -> Iterable[Optional[Dict[str, Any]]]: """Fetch revision entry from the given root revisions. Args: revisions: array of root revisions to lookup limit: limitation on the output result. Default to None. Yields: revision entries log from the given root root revisions """ ... @remote_api_endpoint("revision/shortlog") def revision_shortlog( self, revisions: List[Sha1Git], limit: Optional[int] = None ) -> Iterable[Optional[Tuple[Sha1Git, Tuple[Sha1Git, ...]]]]: """Fetch the shortlog for the given revisions Args: revisions: list of root revisions to lookup limit: depth limitation for the output Yields: a list of (id, parents) tuples """ ... @remote_api_endpoint("revision/get_random") def revision_get_random(self) -> Sha1Git: """Finds a random revision id. Returns: a sha1_git """ ... @remote_api_endpoint("release/add") def release_add(self, releases: List[Release]) -> Dict: """Add releases to the storage Args: releases (List[dict]): iterable of dictionaries representing the individual releases to add. Each dict has the following keys: - **id** (:class:`sha1_git`): id of the release to add - **revision** (:class:`sha1_git`): id of the revision the release points to - **date** (:class:`dict`): the date the release was made - **name** (:class:`bytes`): the name of the release - **comment** (:class:`bytes`): the comment associated with the release - **author** (:class:`Dict[str, bytes]`): dictionary with keys: name, fullname, email the date dictionary has the form defined in :mod:`swh.model`. Returns: Summary dict of keys with associated count as values release:add: New objects contents actually stored in db """ ... @remote_api_endpoint("release/missing") def release_missing(self, releases: List[Sha1Git]) -> Iterable[Sha1Git]: """List missing release ids from storage Args: releases: release ids Yields: a list of missing release ids """ ... @remote_api_endpoint("release") def release_get(self, releases: List[Sha1Git]) -> List[Optional[Release]]: """Given a list of sha1, return the releases's information Args: releases: list of sha1s Returns: List of releases matching the identifiers or None if the release does not exist. """ ... @remote_api_endpoint("release/get_random") def release_get_random(self) -> Sha1Git: """Finds a random release id. Returns: a sha1_git """ ... @remote_api_endpoint("snapshot/add") def snapshot_add(self, snapshots: List[Snapshot]) -> Dict: """Add snapshots to the storage. Args: snapshot ([dict]): the snapshots to add, containing the following keys: - **id** (:class:`bytes`): id of the snapshot - **branches** (:class:`dict`): branches the snapshot contains, mapping the branch name (:class:`bytes`) to the branch target, itself a :class:`dict` (or ``None`` if the branch points to an unknown object) - **target_type** (:class:`str`): one of ``content``, ``directory``, ``revision``, ``release``, ``snapshot``, ``alias`` - **target** (:class:`bytes`): identifier of the target (currently a ``sha1_git`` for all object kinds, or the name of the target branch for aliases) Raises: ValueError: if the origin or visit id does not exist. Returns: Summary dict of keys with associated count as values snapshot:add: Count of object actually stored in db """ ... @remote_api_endpoint("snapshot/missing") def snapshot_missing(self, snapshots: List[Sha1Git]) -> Iterable[Sha1Git]: """List snapshots missing from storage Args: snapshots: snapshot ids Yields: missing snapshot ids """ ... @remote_api_endpoint("snapshot") def snapshot_get(self, snapshot_id: Sha1Git) -> Optional[Dict[str, Any]]: """Get the content, possibly partial, of a snapshot with the given id The branches of the snapshot are iterated in the lexicographical order of their names. .. warning:: At most 1000 branches contained in the snapshot will be returned for performance reasons. In order to browse the whole set of branches, the method :meth:`snapshot_get_branches` should be used instead. Args: snapshot_id: snapshot identifier Returns: dict: a dict with three keys: * **id**: identifier of the snapshot * **branches**: a dict of branches contained in the snapshot whose keys are the branches' names. * **next_branch**: the name of the first branch not returned or :const:`None` if the snapshot has less than 1000 branches. """ ... @remote_api_endpoint("snapshot/count_branches") def snapshot_count_branches( self, snapshot_id: Sha1Git ) -> Optional[Dict[Optional[str], int]]: """Count the number of branches in the snapshot with the given id Args: snapshot_id: snapshot identifier Returns: A dict whose keys are the target types of branches and values their corresponding amount """ ... @remote_api_endpoint("snapshot/get_branches") def snapshot_get_branches( self, snapshot_id: Sha1Git, branches_from: bytes = b"", branches_count: int = 1000, target_types: Optional[List[str]] = None, ) -> Optional[PartialBranches]: """Get the content, possibly partial, of a snapshot with the given id The branches of the snapshot are iterated in the lexicographical order of their names. Args: snapshot_id: identifier of the snapshot branches_from: optional parameter used to skip branches whose name is lesser than it before returning them branches_count: optional parameter used to restrain the amount of returned branches target_types: optional parameter used to filter the target types of branch to return (possible values that can be contained in that list are `'content', 'directory', 'revision', 'release', 'snapshot', 'alias'`) Returns: dict: None if the snapshot does not exist; a dict with three keys otherwise: * **id**: identifier of the snapshot * **branches**: a dict of branches contained in the snapshot whose keys are the branches' names. * **next_branch**: the name of the first branch not returned or :const:`None` if the snapshot has less than `branches_count` branches after `branches_from` included. """ ... @remote_api_endpoint("snapshot/get_random") def snapshot_get_random(self) -> Sha1Git: """Finds a random snapshot id. Returns: a sha1_git """ ... @remote_api_endpoint("origin/visit/add") def origin_visit_add(self, visits: List[OriginVisit]) -> Iterable[OriginVisit]: """Add visits to storage. If the visits have no id, they will be created and assigned one. The resulted visits are visits with their visit id set. Args: visits: List of OriginVisit objects to add Raises: StorageArgumentException if some origin visit reference unknown origins Returns: List[OriginVisit] stored """ ... @remote_api_endpoint("origin/visit_status/add") def origin_visit_status_add(self, visit_statuses: List[OriginVisitStatus],) -> None: """Add origin visit statuses. If there is already a status for the same origin and visit id at the same date, the new one will be either dropped or will replace the existing one (it is unspecified which one of these two behaviors happens). Args: visit_statuses: origin visit statuses to add Raises: StorageArgumentException if the origin of the visit status is unknown """ ... @remote_api_endpoint("origin/visit/get") def origin_visit_get( self, origin: str, page_token: Optional[str] = None, order: ListOrder = ListOrder.ASC, limit: int = 10, ) -> PagedResult[OriginVisit]: """Retrieve page of OriginVisit information. Args: origin: The visited origin page_token: opaque string used to get the next results of a search order: Order on visit id fields to list origin visits (default to asc) limit: Number of visits to return Raises: StorageArgumentException if the order is wrong or the page_token type is mistyped. Returns: Page of OriginVisit data model objects. if next_page_token is None, there is no longer data to retrieve. """ ... @remote_api_endpoint("origin/visit/find_by_date") def origin_visit_find_by_date( self, origin: str, visit_date: datetime.datetime ) -> Optional[OriginVisit]: """Retrieves the origin visit whose date is closest to the provided timestamp. In case of a tie, the visit with largest id is selected. Args: origin: origin (URL) visit_date: expected visit date Returns: A visit if found, None otherwise """ ... @remote_api_endpoint("origin/visit/getby") def origin_visit_get_by(self, origin: str, visit: int) -> Optional[OriginVisit]: """Retrieve origin visit's information. Args: origin: origin (URL) visit: visit id Returns: The information on that particular OriginVisit or None if it does not exist """ ... @remote_api_endpoint("origin/visit/get_latest") def origin_visit_get_latest( self, origin: str, type: Optional[str] = None, allowed_statuses: Optional[List[str]] = None, require_snapshot: bool = False, ) -> Optional[OriginVisit]: """Get the latest origin visit for the given origin, optionally looking only for those with one of the given allowed_statuses or for those with a snapshot. Args: origin: origin URL type: Optional visit type to filter on (e.g git, tar, dsc, svn, hg, npm, pypi, ...) allowed_statuses: list of visit statuses considered to find the latest visit. For instance, ``allowed_statuses=['full']`` will only consider visits that have successfully run to completion. require_snapshot: If True, only a visit with a snapshot will be returned. Raises: StorageArgumentException if values for the allowed_statuses parameters are unknown Returns: OriginVisit matching the criteria if found, None otherwise. Note that as OriginVisit no longer held reference on the visit status or snapshot, you may want to use origin_visit_status_get_latest for those information. """ ... @remote_api_endpoint("origin/visit_status/get") def origin_visit_status_get( self, origin: str, visit: int, page_token: Optional[str] = None, order: ListOrder = ListOrder.ASC, limit: int = 10, ) -> PagedResult[OriginVisitStatus]: """Retrieve page of OriginVisitStatus information. Args: origin: The visited origin visit: The visit identifier page_token: opaque string used to get the next results of a search order: Order on visit status objects to list (default to asc) limit: Number of visit statuses to return Returns: Page of OriginVisitStatus data model objects. if next_page_token is None, there is no longer data to retrieve. """ ... @remote_api_endpoint("origin/visit_status/get_latest") def origin_visit_status_get_latest( self, origin_url: str, visit: int, allowed_statuses: Optional[List[str]] = None, require_snapshot: bool = False, ) -> Optional[OriginVisitStatus]: """Get the latest origin visit status for the given origin visit, optionally looking only for those with one of the given allowed_statuses or with a snapshot. Args: origin: origin URL allowed_statuses: list of visit statuses considered to find the latest visit. Possible values are {created, ongoing, partial, full}. For instance, ``allowed_statuses=['full']`` will only consider visits that have successfully run to completion. require_snapshot: If True, only a visit with a snapshot will be returned. Raises: StorageArgumentException if values for the allowed_statuses parameters are unknown Returns: The OriginVisitStatus matching the criteria """ ... @remote_api_endpoint("origin/visit_status/get_random") def origin_visit_status_get_random( self, type: str ) -> Optional[Tuple[OriginVisit, OriginVisitStatus]]: """Randomly select one successful origin visit with made in the last 3 months. Returns: One random tuple of (OriginVisit, OriginVisitStatus) matching the selection criteria """ ... @remote_api_endpoint("object/find_by_sha1_git") def object_find_by_sha1_git(self, ids: List[Sha1Git]) -> Dict[Sha1Git, List[Dict]]: """Return the objects found with the given ids. Args: ids: a generator of sha1_gits Returns: A dict from id to the list of objects found for that id. Each object found is itself a dict with keys: - sha1_git: the input id - type: the type of object found """ ... @remote_api_endpoint("origin/get") def origin_get(self, origins: List[str]) -> Iterable[Optional[Origin]]: """Return origins. Args: origin: a list of urls to find Returns: the list of associated existing origin model objects. The unknown origins will be returned as None at the same index as the input. """ ... @remote_api_endpoint("origin/get_sha1") def origin_get_by_sha1(self, sha1s: List[bytes]) -> List[Optional[Dict[str, Any]]]: """Return origins, identified by the sha1 of their URLs. Args: sha1s: a list of sha1s Returns: List of origins dict whose sha1 of their url match, None otherwise. """ ... @remote_api_endpoint("origin/list") def origin_list( self, page_token: Optional[str] = None, limit: int = 100 ) -> PagedResult[Origin]: """Returns the list of origins Args: page_token: opaque token used for pagination. limit: the maximum number of results to return Returns: Page of Origin data model objects. if next_page_token is None, there is no longer data to retrieve. """ ... @remote_api_endpoint("origin/search") def origin_search( self, url_pattern: str, page_token: Optional[str] = None, limit: int = 50, regexp: bool = False, with_visit: bool = False, ) -> PagedResult[Origin]: """Search for origins whose urls contain a provided string pattern or match a provided regular expression. The search is performed in a case insensitive way. Args: url_pattern: the string pattern to search for in origin urls page_token: opaque token used for pagination limit: the maximum number of found origins to return regexp: if True, consider the provided pattern as a regular expression and return origins whose urls match it with_visit: if True, filter out origins with no visit Yields: PagedResult of Origin """ ... @deprecated @remote_api_endpoint("origin/count") def origin_count( self, url_pattern: str, regexp: bool = False, with_visit: bool = False ) -> int: """Count origins whose urls contain a provided string pattern or match a provided regular expression. The pattern search in origin urls is performed in a case insensitive way. Args: url_pattern (str): the string pattern to search for in origin urls regexp (bool): if True, consider the provided pattern as a regular expression and return origins whose urls match it with_visit (bool): if True, filter out origins with no visit Returns: int: The number of origins matching the search criterion. """ ... @remote_api_endpoint("origin/add_multi") def origin_add(self, origins: List[Origin]) -> Dict[str, int]: """Add origins to the storage Args: origins: list of dictionaries representing the individual origins, with the following keys: - type: the origin type ('git', 'svn', 'deb', ...) - url (bytes): the url the origin points to Returns: Summary dict of keys with associated count as values origin:add: Count of object actually stored in db """ ... def stat_counters(self): """compute statistics about the number of tuples in various tables Returns: dict: a dictionary mapping textual labels (e.g., content) to integer values (e.g., the number of tuples in table content) """ ... def refresh_stat_counters(self): """Recomputes the statistics for `stat_counters`.""" ... @remote_api_endpoint("raw_extrinsic_metadata/add") def raw_extrinsic_metadata_add(self, metadata: List[RawExtrinsicMetadata],) -> None: """Add extrinsic metadata on objects (contents, directories, ...). The authority and fetcher must be known to the storage before using this endpoint. If there is already metadata for the same object, authority, fetcher, and at the same date; the new one will be either dropped or will replace the existing one (it is unspecified which one of these two behaviors happens). Args: metadata: iterable of RawExtrinsicMetadata objects to be inserted. """ ... @remote_api_endpoint("raw_extrinsic_metadata/get") def raw_extrinsic_metadata_get( self, type: MetadataTargetType, - id: Union[str, SWHID], + target: Union[str, SWHID], authority: MetadataAuthority, after: Optional[datetime.datetime] = None, page_token: Optional[bytes] = None, limit: int = 1000, ) -> PagedResult[RawExtrinsicMetadata]: """Retrieve list of all raw_extrinsic_metadata entries for the id Args: type: one of the values of swh.model.model.MetadataTargetType - id: an URL if type is 'origin', else a core SWHID + target: an URL if type is 'origin', else a core SWHID authority: a dict containing keys `type` and `url`. after: minimum discovery_date for a result to be returned page_token: opaque token, used to get the next page of results limit: maximum number of results to be returned Returns: PagedResult of RawExtrinsicMetadata """ ... @remote_api_endpoint("metadata_fetcher/add") def metadata_fetcher_add(self, fetchers: List[MetadataFetcher],) -> None: """Add new metadata fetchers to the storage. Their `name` and `version` together are unique identifiers of this fetcher; and `metadata` is an arbitrary dict of JSONable data with information about this fetcher, which must not be `None` (but may be empty). Args: fetchers: iterable of MetadataFetcher to be inserted """ ... @remote_api_endpoint("metadata_fetcher/get") def metadata_fetcher_get( self, name: str, version: str ) -> Optional[MetadataFetcher]: """Retrieve information about a fetcher Args: name: the name of the fetcher version: version of the fetcher Returns: a MetadataFetcher object (with a non-None metadata field) if it is known, else None. """ ... @remote_api_endpoint("metadata_authority/add") def metadata_authority_add(self, authorities: List[MetadataAuthority]) -> None: """Add new metadata authorities to the storage. Their `type` and `url` together are unique identifiers of this authority; and `metadata` is an arbitrary dict of JSONable data with information about this authority, which must not be `None` (but may be empty). Args: authorities: iterable of MetadataAuthority to be inserted """ ... @remote_api_endpoint("metadata_authority/get") def metadata_authority_get( self, type: MetadataAuthorityType, url: str ) -> Optional[MetadataAuthority]: """Retrieve information about an authority Args: type: one of "deposit_client", "forge", or "registry" url: unique URI identifying the authority Returns: a MetadataAuthority object (with a non-None metadata field) if it is known, else None. """ ... @remote_api_endpoint("clear/buffer") def clear_buffers(self, object_types: Sequence[str] = ()) -> None: """For backend storages (pg, storage, in-memory), this is a noop operation. For proxy storages (especially filter, buffer), this is an operation which cleans internal state. """ @remote_api_endpoint("flush") def flush(self, object_types: Sequence[str] = ()) -> Dict[str, int]: """For backend storages (pg, storage, in-memory), this is expected to be a noop operation. For proxy storages (especially buffer), this is expected to trigger actual writes to the backend. """ ... diff --git a/swh/storage/migrate_extrinsic_metadata.py b/swh/storage/migrate_extrinsic_metadata.py index 56637b77..277b8103 100644 --- a/swh/storage/migrate_extrinsic_metadata.py +++ b/swh/storage/migrate_extrinsic_metadata.py @@ -1,1113 +1,1184 @@ #!/usr/bin/env python3 # Copyright (C) 2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information """This is an executable script to migrate extrinsic revision metadata from the revision table to the new extrinsic metadata storage. This is designed to be as conservative as possible, following this principle: for each revision the script reads (in "handle_row"), it will read some of the fields, write them directly to the metadata storage, and remove them. Then it checks all the remaining fields are in a hardcoded list of fields that are known not to require migration. This means that every field that isn't migrated was explicitly reviewed while writing this script. Additionally, this script contains many assertions to prevent false positives in its heuristics. """ import datetime import hashlib import json import os import re import sys import time from typing import Any, Dict, Optional +from urllib.error import HTTPError from urllib.parse import unquote, urlparse +from urllib.request import urlopen import iso8601 import psycopg2 from swh.core.db import BaseDb from swh.model.hashutil import hash_to_hex from swh.model.identifiers import SWHID, parse_swhid from swh.model.model import ( MetadataAuthority, MetadataAuthorityType, MetadataFetcher, MetadataTargetType, RawExtrinsicMetadata, Sha1Git, ) from swh.storage import get_storage from swh.storage.algos.origin import iter_origin_visit_statuses, iter_origin_visits from swh.storage.algos.snapshot import snapshot_get_all_branches # XML namespaces and fields for metadata coming from the deposit: CODEMETA_NS = "https://doi.org/10.5063/SCHEMA/CODEMETA-2.0" ATOM_NS = "http://www.w3.org/2005/Atom" ATOM_KEYS = ["id", "author", "external_identifier", "title"] # columns of the revision table (of the storage DB) -REVISION_COLS = ["id", "date", "committer_date", "type", "message", "metadata"] +REVISION_COLS = [ + "id", + "directory", + "date", + "committer_date", + "type", + "message", + "metadata", +] # columns of the tables of the deposit DB DEPOSIT_COLS = [ "deposit.id", "deposit.external_id", "deposit.swhid_context", "deposit.status", "deposit_request.metadata", "deposit_request.date", "deposit_client.provider_url", "deposit_collection.name", "auth_user.username", ] # Formats we write to the extrinsic metadata storage OLD_DEPOSIT_FORMAT = ( "sword-v2-atom-codemeta-v2-in-json-with-expanded-namespaces" # before february 2018 ) NEW_DEPOSIT_FORMAT = "sword-v2-atom-codemeta-v2-in-json" # after february 2018 GNU_FORMAT = "gnu-tree-json" NIXGUIX_FORMAT = "nixguix-sources-json" NPM_FORMAT = "replicate-npm-package-json" ORIGINAL_ARTIFACT_FORMAT = "original-artifacts-json" PYPI_FORMAT = "pypi-project-json" # Information about this script, for traceability FETCHER = MetadataFetcher( name="migrate-extrinsic-metadata-from-revisions", version="0.0.1", ) # Authorities that we got the metadata from AUTHORITIES = { "npmjs": MetadataAuthority( type=MetadataAuthorityType.FORGE, url="https://npmjs.com/", metadata={} ), "pypi": MetadataAuthority( type=MetadataAuthorityType.FORGE, url="https://pypi.org/", metadata={} ), "gnu": MetadataAuthority( type=MetadataAuthorityType.FORGE, url="https://ftp.gnu.org/", metadata={} ), "swh": MetadataAuthority( type=MetadataAuthorityType.REGISTRY, url="https://softwareheritage.org/", metadata={}, ), # for original_artifact (which are checksums computed by SWH) } # Regular expression for the format of revision messages written by the # deposit loader deposit_revision_message_re = re.compile( b"(?P[a-z-]*): " b"Deposit (?P[0-9]+) in collection (?P[a-z-]+).*" ) # not reliable, because PyPI allows arbitrary names def pypi_project_from_filename(filename): + original_filename = filename if filename.endswith(".egg"): return None elif filename == "mongomotor-0.13.0.n.tar.gz": return "mongomotor" elif re.match(r"datahaven-rev[0-9]+\.tar\.gz", filename): return "datahaven" elif re.match(r"Dtls-[0-9]\.[0-9]\.[0-9]\.sdist_with_openssl\..*", filename): return "Dtls" elif re.match(r"(gae)?pytz-20[0-9][0-9][a-z]\.(tar\.gz|zip)", filename): return filename.split("-", 1)[0] elif filename.startswith(("powny-", "obedient.powny-",)): return filename.split("-")[0] elif filename.startswith("devpi-theme-16-"): return "devpi-theme-16" elif re.match("[^-]+-[0-9]+.tar.gz", filename): return filename.split("-")[0] elif filename == "ohai-1!0.tar.gz": return "ohai" elif filename == "collective.topicitemsevent-0.1dvl.tar.gz": return "collective.topicitemsevent" elif filename.startswith( ("SpiNNStorageHandlers-1!", "sPyNNakerExternalDevicesPlugin-1!") ): return filename.split("-")[0] elif filename.startswith("limnoria-201"): return "limnoria" elif filename.startswith("pytz-20"): return "pytz" elif filename.startswith("youtube_dl_server-alpha."): return "youtube_dl_server" elif filename == "json-extensions-b76bc7d.tar.gz": return "json-extensions" elif filename == "LitReview-0.6989ev.tar.gz": # typo of "dev" return "LitReview" elif filename.startswith("django_options-r"): return "django_options" elif filename == "Greater than, equal, or less Library-0.1.tar.gz": return "Greater-than-equal-or-less-Library" elif filename.startswith("upstart--main-"): return "upstart" + elif filename == "duckduckpy0.1.tar.gz": + return "duckduckpy" + elif filename == "QUI for MPlayer snapshot_9-14-2011.zip": + return "QUI-for-MPlayer" + elif filename == "Eddy's Memory Game-1.0.zip": + return "Eddy-s-Memory-Game" + elif filename == "jekyll2nikola-0-0-1.tar.gz": + return "jekyll2nikola" + elif filename.startswith("ore.workflowed"): + return "ore.workflowed" + elif re.match("instancemanager-[0-9]*", filename): + return "instancemanager" + elif filename == "OrzMC_W&L-1.0.0.tar.gz": + return "OrzMC-W-L" filename = filename.replace(" ", "-") match = re.match( r"^(?P[a-z_.-]+)" # project name r"\.(tar\.gz|tar\.bz2|tgz|zip)$", # extension filename, re.I, ) if match: return match.group("project_name") # First try with a rather strict format, but that allows accidentally # matching the version as part of the package name match = re.match( r"^(?P[a-z0-9_.]+?([-_][a-z][a-z0-9.]+?)*?)" # project name r"-v?" r"([0-9]+!)?" # epoch r"[0-9_.]+([a-z]+[0-9]+)?" # "main" version r"([.-]?(alpha|beta|dev|post|pre|rc)(\.?[0-9]+)?)*" # development status r"([.-]?20[012][0-9]{5,9})?" # date r"([.-]g?[0-9a-f]+)?" # git commit r"([-+]py(thon)?(3k|[23](\.?[0-9]{1,2})?))?" # python version r"\.(tar\.gz|tar\.bz2|tgz|zip)$", # extension filename, re.I, ) if match: return match.group("project_name") # If that doesn't work, give up on trying to parse version suffixes, # and just find the first version-like occurrence in the file name match = re.match( r"^(?P[a-z0-9_.-]+?)" # project name r"[-_.]v?" r"([0-9]+!)?" # epoch r"(" # "main" version r"[0-9_]+\.[0-9_.]+([a-z]+[0-9]+)?" # classic version number r"|20[012][0-9]{5,9}" # date as integer r"|20[012][0-9]-[01][0-9]-[0-3][0-9]" # date as ISO 8601 r")" # end of "main" version r"[a-z]?(dev|pre)?" # direct version suffix r"([._-].*)?" # extra suffixes r"\.(tar\.gz|tar\.bz2|tgz|zip)$", # extension filename, re.I, ) if match: return match.group("project_name") # If that still doesn't work, give one last chance if there's only one # dash or underscore in the name match = re.match( r"^(?P[^_-]+)" # project name r"[_-][^_-]+" # version r"\.(tar\.gz|tar\.bz2|tgz|zip)$", # extension filename, ) - assert match, filename + assert match, original_filename return match.group("project_name") +def pypi_origin_from_project_name(project_name: str) -> str: + return f"https://pypi.org/project/{project_name}/" + + +def pypi_origin_from_filename(storage, rev_id: bytes, filename: str) -> Optional[str]: + project_name = pypi_project_from_filename(filename) + origin = pypi_origin_from_project_name(project_name) + # But unfortunately, the filename is user-provided, and doesn't + # necessarily match the package name on pypi. Therefore, we need + # to check it. + if _check_revision_in_origin(storage, origin, rev_id): + return origin + + # if the origin we guessed does not exist, query the PyPI API with the + # project name we guessed. If only the capitalisation and dash/underscores + # are wrong (by far the most common case), PyPI kindly corrects them. + try: + resp = urlopen(f"https://pypi.org/pypi/{project_name}/json/") + except HTTPError as e: + assert e.code == 404 + # nope; PyPI couldn't correct the wrong project name + return None + assert resp.code == 200, resp.code + project_name = json.load(resp)["info"]["name"] + origin = pypi_origin_from_project_name(project_name) + + if _check_revision_in_origin(storage, origin, rev_id): + return origin + else: + # The origin exists, but the revision does not belong in it. + # This happens sometimes, as the filename we guessed the origin + # from is user-provided. + return None + + def cran_package_from_url(filename): match = re.match( r"^https://cran\.r-project\.org/src/contrib/" r"(?P[a-zA-Z0-9.]+)_[0-9.-]+(\.tar\.gz)?$", filename, ) assert match, filename return match.group("package_name") def npm_package_from_source_url(package_source_url): match = re.match( "^https://registry.npmjs.org/(?P.*)/-/[^/]+.tgz$", package_source_url, ) assert match, package_source_url return unquote(match.group("package_name")) def remove_atom_codemeta_metadata_with_xmlns(metadata): """Removes all known Atom and Codemeta metadata fields from the dict, assuming this is a dict generated by xmltodict without expanding namespaces. """ keys_to_remove = ATOM_KEYS + ["@xmlns", "@xmlns:codemeta"] for key in list(metadata): if key.startswith("codemeta:") or key in keys_to_remove: del metadata[key] def remove_atom_codemeta_metadata_without_xmlns(metadata): """Removes all known Atom and Codemeta metadata fields from the dict, assuming this is a dict generated by xmltodict with expanded namespaces. """ for key in list(metadata): if key.startswith(("{%s}" % ATOM_NS, "{%s}" % CODEMETA_NS)): del metadata[key] def _check_revision_in_origin(storage, origin, revision_id): seen_snapshots = set() # no need to visit them again seen_revisions = set() for visit in iter_origin_visits(storage, origin): for status in iter_origin_visit_statuses(storage, origin, visit.visit): if status.snapshot is None: continue if status.snapshot in seen_snapshots: continue seen_snapshots.add(status.snapshot) snapshot = snapshot_get_all_branches(storage, status.snapshot) for (branch_name, branch) in snapshot.branches.items(): if branch is None: continue # If it's the revision passed as argument, then it is indeed in the # origin if branch.target == revision_id: return True # Else, let's make sure the branch doesn't have any other revision # Get the revision at the top of the branch. if branch.target in seen_revisions: continue seen_revisions.add(branch.target) revision = storage.revision_get([branch.target])[0] if revision is None: # https://forge.softwareheritage.org/T997 continue # Check it doesn't have parents (else we would have to # recurse) assert revision.parents == (), "revision with parents" return False def debian_origins_from_row(row, storage): """Guesses a Debian origin from a row. May return an empty list if it cannot reliably guess it, but all results are guaranteed to be correct.""" filenames = [entry["filename"] for entry in row["metadata"]["original_artifact"]] package_names = {filename.split("_")[0] for filename in filenames} assert len(package_names) == 1, package_names (package_name,) = package_names candidate_origins = [ f"deb://Debian/packages/{package_name}", f"deb://Debian-Security/packages/{package_name}", f"http://snapshot.debian.org/package/{package_name}/", ] return [ origin for origin in candidate_origins if _check_revision_in_origin(storage, origin, row["id"]) ] # Cache of origins that are known to exist _origins = set() def assert_origin_exists(storage, origin): assert check_origin_exists(storage, origin), origin def check_origin_exists(storage, origin): return ( ( hashlib.sha1(origin.encode()).digest() in _origins # very fast or storage.origin_get([origin])[0] is not None # slow, but up to date ), origin, ) def load_metadata( storage, revision_id, + directory_id, discovery_date: datetime.datetime, metadata: Dict[str, Any], format: str, authority: MetadataAuthority, origin: Optional[str], dry_run: bool, ): """Does the actual loading to swh-storage.""" + directory_swhid = SWHID( + object_type="directory", object_id=hash_to_hex(directory_id) + ) revision_swhid = SWHID(object_type="revision", object_id=hash_to_hex(revision_id)) obj = RawExtrinsicMetadata( - type=MetadataTargetType.REVISION, - id=revision_swhid, + type=MetadataTargetType.DIRECTORY, + target=directory_swhid, discovery_date=discovery_date, authority=authority, fetcher=FETCHER, format=format, metadata=json.dumps(metadata).encode(), origin=origin, + revision=revision_swhid, ) if not dry_run: storage.raw_extrinsic_metadata_add([obj]) def handle_deposit_row( row, discovery_date: Optional[datetime.datetime], origin, storage, deposit_cur, dry_run: bool, ): """Loads metadata from the deposit database (which is more reliable as the metadata on the revision object, as some versions of the deposit loader were a bit lossy; and they used very different format for the field in the revision table). """ parsed_message = deposit_revision_message_re.match(row["message"]) assert parsed_message is not None, row["message"] deposit_id = int(parsed_message.group("deposit_id")) collection = parsed_message.group("collection").decode() client_name = parsed_message.group("client").decode() deposit_cur.execute( f"SELECT {', '.join(DEPOSIT_COLS)} FROM deposit " f"INNER JOIN deposit_collection " f" ON (deposit.collection_id=deposit_collection.id) " f"INNER JOIN deposit_client ON (deposit.client_id=deposit_client.user_ptr_id) " f"INNER JOIN auth_user ON (deposit.client_id=auth_user.id) " f"INNER JOIN deposit_request ON (deposit.id=deposit_request.deposit_id) " f"WHERE deposit.id = %s", (deposit_id,), ) provider_urls = set() swhids = set() metadata_entries = [] dates = set() external_identifiers = set() for deposit_request_row in deposit_cur: deposit_request = dict(zip(DEPOSIT_COLS, deposit_request_row)) # Sanity checks to make sure we selected the right deposit assert deposit_request["deposit.id"] == deposit_id assert deposit_request["deposit_collection.name"] == collection, deposit_request if client_name != "": # Sometimes it's missing from the commit message assert deposit_request["auth_user.username"] == client_name # Date of the deposit request (either the initial request, of subsequent ones) date = deposit_request["deposit_request.date"] dates.add(date) assert deposit_request["deposit.swhid_context"], deposit_request external_identifiers.add(deposit_request["deposit.external_id"]) swhids.add(deposit_request["deposit.swhid_context"]) # Client of the deposit provider_urls.add(deposit_request["deposit_client.provider_url"]) metadata = deposit_request["deposit_request.metadata"] if metadata is not None: json.dumps(metadata).encode() # check it's valid if "@xmlns" in metadata: assert metadata["@xmlns"] == ATOM_NS assert metadata["@xmlns:codemeta"] in (CODEMETA_NS, [CODEMETA_NS]) format = NEW_DEPOSIT_FORMAT - else: - assert "{http://www.w3.org/2005/Atom}id" in metadata + elif "{http://www.w3.org/2005/Atom}id" in metadata: assert ( "{https://doi.org/10.5063/SCHEMA/CODEMETA-2.0}author" in metadata or "{http://www.w3.org/2005/Atom}author" in metadata ) format = OLD_DEPOSIT_FORMAT + else: + # new format introduced in + # https://forge.softwareheritage.org/D4065 + # it's the same as the first case, but with the @xmlns + # declarations stripped + # Most of them should have the "id", but some revisions, + # like 4d3890004fade1f4ec3bf7004a4af0c490605128, are missing + # this field + assert "id" in metadata or "title" in metadata + assert "codemeta:author" in metadata + format = NEW_DEPOSIT_FORMAT metadata_entries.append((date, format, metadata)) if discovery_date is None: discovery_date = max(dates) # Sanity checks to make sure deposit requests are consistent with each other assert len(metadata_entries) >= 1, deposit_id assert len(provider_urls) == 1, f"expected 1 provider url, got {provider_urls}" (provider_url,) = provider_urls assert len(swhids) == 1 (swhid,) = swhids assert ( len(external_identifiers) == 1 ), f"expected 1 external identifier, got {external_identifiers}" (external_identifier,) = external_identifiers # computed the origin from the external_identifier if we don't have one if origin is None: origin = f"{provider_url.strip('/')}/{external_identifier}" # explicit list of mistakes that happened in the past, but shouldn't # happen again: if origin == "https://hal.archives-ouvertes.fr/hal-01588781": # deposit id 75 origin = "https://inria.halpreprod.archives-ouvertes.fr/hal-01588781" elif origin == "https://hal.archives-ouvertes.fr/hal-01588782": # deposit id 76 origin = "https://inria.halpreprod.archives-ouvertes.fr/hal-01588782" elif origin == "https://hal.archives-ouvertes.fr/hal-01592430": # deposit id 143 origin = "https://hal-preprod.archives-ouvertes.fr/hal-01592430" elif origin == "https://hal.archives-ouvertes.fr/hal-01588927": origin = "https://inria.halpreprod.archives-ouvertes.fr/hal-01588927" elif origin == "https://hal.archives-ouvertes.fr/hal-01593875": # deposit id 175 origin = "https://hal-preprod.archives-ouvertes.fr/hal-01593875" elif deposit_id == 160: assert origin == "https://www.softwareheritage.org/je-suis-gpl", origin origin = "https://forge.softwareheritage.org/source/jesuisgpl/" elif origin == "https://hal.archives-ouvertes.fr/hal-01588942": # deposit id 90 origin = "https://inria.halpreprod.archives-ouvertes.fr/hal-01588942" elif origin == "https://hal.archives-ouvertes.fr/hal-01592499": # deposit id 162 origin = "https://hal-preprod.archives-ouvertes.fr/hal-01592499" elif origin == "https://hal.archives-ouvertes.fr/hal-01588935": # deposit id 89 origin = "https://hal-preprod.archives-ouvertes.fr/hal-01588935" assert_origin_exists(storage, origin) # check the origin we computed matches the one in the deposit db swhid_origin = parse_swhid(swhid).metadata["origin"] if origin is not None: # explicit list of mistakes that happened in the past, but shouldn't # happen again: exceptions = [ ( # deposit id 229 "https://hal.archives-ouvertes.fr/hal-01243573", "https://hal-test.archives-ouvertes.fr/hal-01243573", ), ( # deposit id 199 "https://hal.archives-ouvertes.fr/hal-01243065", "https://hal-test.archives-ouvertes.fr/hal-01243065", ), ( # deposit id 164 "https://hal.archives-ouvertes.fr/hal-01593855", "https://hal-preprod.archives-ouvertes.fr/hal-01593855", ), ] if (origin, swhid_origin) not in exceptions: assert origin == swhid_origin, ( f"the origin we guessed from the deposit db or revision ({origin}) " f"doesn't match the one in the deposit db's SWHID ({swhid})" ) authority = MetadataAuthority( type=MetadataAuthorityType.DEPOSIT_CLIENT, url=provider_url, metadata={}, ) for (date, format, metadata) in metadata_entries: load_metadata( storage, row["id"], + row["directory"], date, metadata, format, authority=authority, origin=origin, dry_run=dry_run, ) return (origin, discovery_date) def handle_row(row: Dict[str, Any], storage, deposit_cur, dry_run: bool): type_ = row["type"] # default date in case we can't find a better one discovery_date = row["date"] or row["committer_date"] metadata = row["metadata"] if metadata is None: return if type_ == "dsc": origin = None # it will be defined later, using debian_origins_from_row # TODO: the debian loader writes the changelog date as the revision's # author date and committer date. Instead, we should use the visit's date if "extrinsic" in metadata: extrinsic_files = metadata["extrinsic"]["raw"]["files"] for artifact_entry in metadata["original_artifact"]: extrinsic_file = extrinsic_files[artifact_entry["filename"]] for key in ("sha256",): assert artifact_entry["checksums"][key] == extrinsic_file[key] artifact_entry["url"] = extrinsic_file["uri"] del metadata["extrinsic"] elif type_ == "tar": provider = metadata.get("extrinsic", {}).get("provider") if provider is not None: # This is the format all the package loaders currently write, and # it is the easiest, thanks to the 'provider' and 'when' fields, # which have all the information we need to tell them easily # and generate accurate metadata discovery_date = iso8601.parse_date(metadata["extrinsic"]["when"]) # New versions of the loaders write the provider; use it. if provider.startswith("https://replicate.npmjs.com/"): # npm loader format 1 parsed_url = urlparse(provider) assert re.match("^/[^/]+/?$", parsed_url.path), parsed_url package_name = unquote(parsed_url.path.strip("/")) origin = "https://www.npmjs.com/package/" + package_name assert_origin_exists(storage, origin) load_metadata( storage, row["id"], + row["directory"], discovery_date, metadata["extrinsic"]["raw"], NPM_FORMAT, authority=AUTHORITIES["npmjs"], origin=origin, dry_run=dry_run, ) del metadata["extrinsic"] elif provider.startswith("https://pypi.org/"): # pypi loader format 1 match = re.match( "https://pypi.org/pypi/(?P.*)/json", provider ) assert match, f"unexpected provider URL format: {provider}" project_name = match.group("project_name") origin = f"https://pypi.org/project/{project_name}/" assert_origin_exists(storage, origin) load_metadata( storage, row["id"], + row["directory"], discovery_date, metadata["extrinsic"]["raw"], PYPI_FORMAT, authority=AUTHORITIES["pypi"], origin=origin, dry_run=dry_run, ) del metadata["extrinsic"] elif provider.startswith("https://cran.r-project.org/"): # cran loader provider = metadata["extrinsic"]["provider"] if provider.startswith("https://cran.r-project.org/package="): origin = metadata["extrinsic"]["provider"] else: package_name = cran_package_from_url(provider) origin = f"https://cran.r-project.org/package={package_name}" # TODO https://forge.softwareheritage.org/T2536 assert origin is not None if ( hashlib.sha1(origin.encode()).digest() not in _origins and storage.origin_get([origin])[0] is None ): print("MISSING CRAN ORIGIN", hash_to_hex(row["id"]), origin) return raw_extrinsic_metadata = metadata["extrinsic"]["raw"] # this is actually intrinsic, ignore it if "version" in raw_extrinsic_metadata: del raw_extrinsic_metadata["version"] # Copy the URL to the original_artifacts metadata assert len(metadata["original_artifact"]) == 1 if "url" in metadata["original_artifact"][0]: assert ( metadata["original_artifact"][0]["url"] == raw_extrinsic_metadata["url"] ), row else: metadata["original_artifact"][0]["url"] = raw_extrinsic_metadata[ "url" ] del raw_extrinsic_metadata["url"] assert ( raw_extrinsic_metadata == {} ), f"Unexpected metadata keys: {list(raw_extrinsic_metadata)}" del metadata["extrinsic"] - elif provider.startswith("https://nix-community.github.io/nixpkgs-swh/"): + elif ( + provider.startswith("https://nix-community.github.io/nixpkgs-swh/") + or provider == "https://guix.gnu.org/sources.json" + ): # nixguix loader origin = provider assert_origin_exists(storage, origin) authority = MetadataAuthority( type=MetadataAuthorityType.FORGE, url=provider, metadata={}, ) assert row["date"] is None # the nixguix loader does not write dates load_metadata( storage, row["id"], + row["directory"], discovery_date, metadata["extrinsic"]["raw"], NIXGUIX_FORMAT, authority=authority, origin=origin, dry_run=dry_run, ) del metadata["extrinsic"] elif provider.startswith("https://ftp.gnu.org/"): # archive loader format 1 origin = provider assert_origin_exists(storage, origin) assert len(metadata["original_artifact"]) == 1 metadata["original_artifact"][0]["url"] = metadata["extrinsic"]["raw"][ "url" ] # Remove duplicate keys of original_artifacts for key in ("url", "time", "length", "version", "filename"): del metadata["extrinsic"]["raw"][key] assert metadata["extrinsic"]["raw"] == {} del metadata["extrinsic"] elif provider.startswith("https://deposit.softwareheritage.org/"): origin = metadata["extrinsic"]["raw"]["origin"]["url"] assert_origin_exists(storage, origin) if "@xmlns" in metadata: assert metadata["@xmlns"] == ATOM_NS assert metadata["@xmlns:codemeta"] in (CODEMETA_NS, [CODEMETA_NS]) assert "intrinsic" not in metadata assert "extra_headers" not in metadata # deposit loader format 1 # in this case, the metadata seems to be both directly in metadata # and in metadata["extrinsic"]["raw"]["metadata"] (origin, discovery_date) = handle_deposit_row( row, discovery_date, origin, storage, deposit_cur, dry_run ) remove_atom_codemeta_metadata_with_xmlns(metadata) if "client" in metadata: del metadata["client"] del metadata["extrinsic"] else: # deposit loader format 2 actual_metadata = metadata["extrinsic"]["raw"]["origin_metadata"][ "metadata" ] + if isinstance(actual_metadata, str): + # new format introduced in + # https://forge.softwareheritage.org/D4105 + actual_metadata = json.loads(actual_metadata) if "@xmlns" in actual_metadata: assert actual_metadata["@xmlns"] == ATOM_NS assert actual_metadata["@xmlns:codemeta"] in ( CODEMETA_NS, [CODEMETA_NS], ) - else: - assert "{http://www.w3.org/2005/Atom}id" in actual_metadata + elif "{http://www.w3.org/2005/Atom}id" in actual_metadata: assert ( "{https://doi.org/10.5063/SCHEMA/CODEMETA-2.0}author" in actual_metadata ) + else: + # new format introduced in + # https://forge.softwareheritage.org/D4065 + # it's the same as the first case, but with the @xmlns + # declarations stripped + # Most of them should have the "id", but some revisions, + # like 4d3890004fade1f4ec3bf7004a4af0c490605128, are missing + # this field + assert "id" in actual_metadata or "title" in actual_metadata + assert "codemeta:author" in actual_metadata (origin, discovery_date) = handle_deposit_row( row, discovery_date, origin, storage, deposit_cur, dry_run ) del metadata["extrinsic"] else: assert False, f"unknown provider {provider}" # Older versions don't write the provider; use heuristics instead. elif ( metadata.get("package_source", {}) .get("url", "") .startswith("https://registry.npmjs.org/") ): # npm loader format 2 package_source_url = metadata["package_source"]["url"] package_name = npm_package_from_source_url(package_source_url) origin = "https://www.npmjs.com/package/" + package_name assert_origin_exists(storage, origin) load_metadata( storage, row["id"], + row["directory"], discovery_date, metadata["package"], NPM_FORMAT, authority=AUTHORITIES["npmjs"], origin=origin, dry_run=dry_run, ) del metadata["package"] assert "original_artifact" not in metadata # rebuild an "original_artifact"-like metadata dict from what we # can salvage of "package_source" package_source_metadata = metadata["package_source"] keep_keys = {"blake2s256", "filename", "sha1", "sha256", "url"} discard_keys = { "date", # is equal to the revision date "name", # was loaded above "version", # same } assert ( set(package_source_metadata) == keep_keys | discard_keys ), package_source_metadata # will be loaded below metadata["original_artifact"] = [ { "filename": package_source_metadata["filename"], "checksums": { "sha1": package_source_metadata["sha1"], "sha256": package_source_metadata["sha256"], "blake2s256": package_source_metadata["blake2s256"], }, "url": package_source_metadata["url"], } ] del metadata["package_source"] elif "@xmlns" in metadata: assert metadata["@xmlns:codemeta"] in (CODEMETA_NS, [CODEMETA_NS]) assert "intrinsic" not in metadata assert "extra_headers" not in metadata # deposit loader format 3 if row["message"] == b"swh: Deposit 159 in collection swh": # There is no deposit 159 in the deposit DB, for some reason assert ( hash_to_hex(row["id"]) == "8e9cee14a6ad39bca4347077b87fb5bbd8953bb1" ) return elif row["message"] == b"hal: Deposit 342 in collection hal": # They have status 'failed' and no swhid return origin = None # TODO discovery_date = None # TODO (origin, discovery_date) = handle_deposit_row( row, discovery_date, origin, storage, deposit_cur, dry_run ) remove_atom_codemeta_metadata_with_xmlns(metadata) if "client" in metadata: del metadata["client"] # found in the deposit db if "committer" in metadata: del metadata["committer"] # found on the revision object elif "{http://www.w3.org/2005/Atom}id" in metadata: assert ( "{https://doi.org/10.5063/SCHEMA/CODEMETA-2.0}author" in metadata or "{http://www.w3.org/2005/Atom}author" in metadata ) assert "intrinsic" not in metadata assert "extra_headers" not in metadata # deposit loader format 4 origin = None discovery_date = None # TODO (origin, discovery_date) = handle_deposit_row( row, discovery_date, origin, storage, deposit_cur, dry_run ) remove_atom_codemeta_metadata_without_xmlns(metadata) elif hash_to_hex(row["id"]) == "a86747d201ab8f8657d145df4376676d5e47cf9f": # deposit 91, is missing "{http://www.w3.org/2005/Atom}id" for some # reason, and has an invalid oririn return elif ( isinstance(metadata.get("original_artifact"), dict) and metadata["original_artifact"]["url"].startswith( "https://files.pythonhosted.org/" ) ) or ( isinstance(metadata.get("original_artifact"), list) and len(metadata.get("original_artifact")) == 1 and metadata["original_artifact"][0] .get("url", "") .startswith("https://files.pythonhosted.org/") ): if isinstance(metadata.get("original_artifact"), dict): metadata["original_artifact"] = [metadata["original_artifact"]] assert len(metadata["original_artifact"]) == 1 - project_name = pypi_project_from_filename( - metadata["original_artifact"][0]["filename"] + origin = pypi_origin_from_filename( + storage, row["id"], metadata["original_artifact"][0]["filename"] ) - origin = f"https://pypi.org/project/{project_name}/" - # But unfortunately, the filename is user-provided, and doesn't - # necessarily match the package name on pypi. Therefore, we need - # to check it. - if not _check_revision_in_origin(storage, origin, row["id"]): - origin_with_dashes = origin.replace("_", "-") - # if the file name contains underscores but we can't find - # a matching origin, also try with dashes. It's common for package - # names containing underscores to use dashes on pypi. - if ( - "_" in origin - and check_origin_exists(storage, origin_with_dashes) - and _check_revision_in_origin( - storage, origin_with_dashes, row["id"] - ) - ): - origin = origin_with_dashes - else: - print( - f"revision {row['id'].hex()} false positive of origin {origin}." - ) - origin = None if "project" in metadata: # pypi loader format 2 - - # same reason as above, we can't do this: - # if metadata["project"]: - # assert metadata["project"]["name"] == project_name - load_metadata( storage, row["id"], + row["directory"], discovery_date, metadata["project"], PYPI_FORMAT, authority=AUTHORITIES["pypi"], origin=origin, dry_run=dry_run, ) del metadata["project"] else: assert set(metadata) == {"original_artifact"}, set(metadata) # pypi loader format 3 pass # nothing to do, there's no metadata elif row["message"] == b"synthetic revision message": assert isinstance(metadata["original_artifact"], list), metadata assert not any("url" in d for d in metadata["original_artifact"]) # archive loader format 2 origin = None elif deposit_revision_message_re.match(row["message"]): # deposit without metadata in the revision assert set(metadata) == {"original_artifact"}, metadata origin = None # TODO discovery_date = None (origin, discovery_date) = handle_deposit_row( row, discovery_date, origin, storage, deposit_cur, dry_run ) else: assert False, f"Unable to detect type of metadata for row: {row}" # Ignore common intrinsic metadata keys for key in ("intrinsic", "extra_headers"): if key in metadata: del metadata[key] # Ignore loader-specific intrinsic metadata keys if type_ == "hg": del metadata["node"] elif type_ == "dsc": if "package_info" in metadata: del metadata["package_info"] if "original_artifact" in metadata: for original_artifact in metadata["original_artifact"]: # Rename keys to the expected format of original-artifacts-json. rename_keys = [ ("name", "filename"), # eg. from old Debian loader ("size", "length"), # eg. from old PyPI loader ] for (old_name, new_name) in rename_keys: if old_name in original_artifact: assert new_name not in original_artifact original_artifact[new_name] = original_artifact.pop(old_name) # Move the checksums to their own subdict, which is the expected format # of original-artifacts-json. if "sha1" in original_artifact: assert "checksums" not in original_artifact original_artifact["checksums"] = {} for key in ("sha1", "sha256", "sha1_git", "blake2s256"): if key in original_artifact: original_artifact["checksums"][key] = original_artifact.pop(key) if "date" in original_artifact: # The information comes from the package repository rather than SWH, # so it shouldn't be in the 'original-artifacts' metadata # (which has SWH as authority). # Moreover, it's not a very useful information, so let's just drop it. del original_artifact["date"] allowed_keys = { "checksums", "filename", "length", "url", "archive_type", } assert set(original_artifact) <= allowed_keys, set(original_artifact) if type_ == "dsc": assert origin is None origins = debian_origins_from_row(row, storage) assert origins, row else: origins = [origin] for origin in origins: load_metadata( storage, row["id"], + row["directory"], discovery_date, metadata["original_artifact"], ORIGINAL_ARTIFACT_FORMAT, authority=AUTHORITIES["swh"], origin=origin, dry_run=dry_run, ) del metadata["original_artifact"] assert metadata == {}, ( f"remaining metadata keys for {row['id'].hex()} (type: {row['type']}): " f"{metadata}" ) def create_fetchers(db): with db.cursor() as cur: cur.execute( """ INSERT INTO metadata_fetcher (name, version, metadata) VALUES (%s, %s, %s) ON CONFLICT DO NOTHING """, (FETCHER.name, FETCHER.version, FETCHER.metadata), ) def iter_revision_rows(storage_dbconn: str, first_id: Sha1Git): after_id = first_id failures = 0 while True: try: storage_db = BaseDb.connect(storage_dbconn) with storage_db.cursor() as cur: while True: cur.execute( f"SELECT {', '.join(REVISION_COLS)} FROM revision " f"WHERE id > %s AND metadata IS NOT NULL " f"ORDER BY id LIMIT 1000", (after_id,), ) new_rows = 0 for row in cur: new_rows += 1 row_d = dict(zip(REVISION_COLS, row)) yield row_d after_id = row_d["id"] if new_rows == 0: return except psycopg2.OperationalError as e: print(e) # most likely a temporary error, try again if failures >= 60: raise else: time.sleep(60) failures += 1 def main(storage_dbconn, storage_url, deposit_dbconn, first_id, dry_run): storage_db = BaseDb.connect(storage_dbconn) deposit_db = BaseDb.connect(deposit_dbconn) storage = get_storage("remote", url=storage_url) if not dry_run: create_fetchers(storage_db) # Not creating authorities, as the loaders are presumably already running # and created them already. # This also helps make sure this script doesn't accidentally create # authorities that differ from what the loaders use. total_rows = 0 with deposit_db.cursor() as deposit_cur: for row in iter_revision_rows(storage_dbconn, first_id): handle_row(row, storage, deposit_cur, dry_run) total_rows += 1 if total_rows % 1000 == 0: percents = ( int.from_bytes(row["id"][0:4], byteorder="big") * 100 / (1 << 32) ) print( f"Processed {total_rows/1000000.:.2f}M rows " f"(~{percents:.1f}%, last revision: {row['id'].hex()})" ) if __name__ == "__main__": if len(sys.argv) == 4: (_, storage_dbconn, storage_url, deposit_dbconn) = sys.argv first_id = "00" * 20 elif len(sys.argv) == 5: (_, storage_dbconn, storage_url, deposit_dbconn, first_id) = sys.argv else: print( f"Syntax: {sys.argv[0]} " f" []" ) exit(1) if os.path.isfile("./origins.txt"): # You can generate this file with: # psql service=swh-replica \ # -c "\copy (select digest(url, 'sha1') from origin) to stdout" \ # | pv -l > origins.txt print("Loading origins...") with open("./origins.txt") as fd: for line in fd: digest = line.strip()[3:] _origins.add(bytes.fromhex(digest)) print("Done loading origins.") main(storage_dbconn, storage_url, deposit_dbconn, bytes.fromhex(first_id), True) diff --git a/swh/storage/postgresql/converters.py b/swh/storage/postgresql/converters.py index 7282f080..b19ea143 100644 --- a/swh/storage/postgresql/converters.py +++ b/swh/storage/postgresql/converters.py @@ -1,322 +1,322 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime from typing import Any, Dict, Optional from swh.core.utils import encode_with_unescape from swh.model.identifiers import parse_swhid from swh.model.model import ( MetadataAuthority, MetadataAuthorityType, MetadataFetcher, MetadataTargetType, ObjectType, Person, RawExtrinsicMetadata, Release, Revision, RevisionType, Timestamp, TimestampWithTimezone, ) from ..utils import map_optional DEFAULT_AUTHOR = { "fullname": None, "name": None, "email": None, } DEFAULT_DATE = { "timestamp": None, "offset": 0, "neg_utc_offset": None, } def author_to_db(author: Optional[Person]) -> Dict[str, Any]: """Convert a swh-model author to its DB representation. Args: author: a :mod:`swh.model` compatible author Returns: dict: a dictionary with three keys: author, fullname and email """ if author is None: return DEFAULT_AUTHOR return author.to_dict() def db_to_author( fullname: Optional[bytes], name: Optional[bytes], email: Optional[bytes] ) -> Optional[Person]: """Convert the DB representation of an author to a swh-model author. Args: fullname (bytes): the author's fullname name (bytes): the author's name email (bytes): the author's email Returns: a Person object, or None if 'fullname' is None. """ if fullname is None: return None return Person(fullname=fullname, name=name, email=email,) def db_to_git_headers(db_git_headers): ret = [] for key, value in db_git_headers: ret.append([key.encode("utf-8"), encode_with_unescape(value)]) return ret def db_to_date( date: Optional[datetime.datetime], offset: int, neg_utc_offset: Optional[bool] ) -> Optional[TimestampWithTimezone]: """Convert the DB representation of a date to a swh-model compatible date. Args: date: a date pulled out of the database offset: an integer number of minutes representing an UTC offset neg_utc_offset: whether an utc offset is negative Returns: a TimestampWithTimezone, or None if the date is None. """ if date is None: return None if neg_utc_offset is None: # For older versions of the database that were not migrated to schema v160 neg_utc_offset = False return TimestampWithTimezone( timestamp=Timestamp( seconds=int(date.timestamp()), microseconds=date.microsecond, ), offset=offset, negative_utc=neg_utc_offset, ) def date_to_db(ts_with_tz: Optional[TimestampWithTimezone]) -> Dict[str, Any]: """Convert a swh-model date_offset to its DB representation. Args: ts_with_tz: a TimestampWithTimezone object Returns: dict: a dictionary with three keys: - timestamp: a date in ISO format - offset: the UTC offset in minutes - neg_utc_offset: a boolean indicating whether a null offset is negative or positive. """ if ts_with_tz is None: return DEFAULT_DATE ts = ts_with_tz.timestamp timestamp = datetime.datetime.fromtimestamp(ts.seconds, datetime.timezone.utc) timestamp = timestamp.replace(microsecond=ts.microseconds) return { # PostgreSQL supports isoformatted timestamps "timestamp": timestamp.isoformat(), "offset": ts_with_tz.offset, "neg_utc_offset": ts_with_tz.negative_utc, } def revision_to_db(revision: Revision) -> Dict[str, Any]: """Convert a swh-model revision to its database representation. """ author = author_to_db(revision.author) date = date_to_db(revision.date) committer = author_to_db(revision.committer) committer_date = date_to_db(revision.committer_date) return { "id": revision.id, "author_fullname": author["fullname"], "author_name": author["name"], "author_email": author["email"], "date": date["timestamp"], "date_offset": date["offset"], "date_neg_utc_offset": date["neg_utc_offset"], "committer_fullname": committer["fullname"], "committer_name": committer["name"], "committer_email": committer["email"], "committer_date": committer_date["timestamp"], "committer_date_offset": committer_date["offset"], "committer_date_neg_utc_offset": committer_date["neg_utc_offset"], "type": revision.type.value, "directory": revision.directory, "message": revision.message, "metadata": None if revision.metadata is None else dict(revision.metadata), "synthetic": revision.synthetic, "extra_headers": revision.extra_headers, "parents": [ {"id": revision.id, "parent_id": parent, "parent_rank": i,} for i, parent in enumerate(revision.parents) ], } def db_to_revision(db_revision: Dict[str, Any]) -> Optional[Revision]: """Convert a database representation of a revision to its swh-model representation.""" if db_revision["type"] is None: assert all( v is None for (k, v) in db_revision.items() if k not in ("id", "parents") ) return None author = db_to_author( db_revision["author_fullname"], db_revision["author_name"], db_revision["author_email"], ) date = db_to_date( db_revision["date"], db_revision["date_offset"], db_revision["date_neg_utc_offset"], ) committer = db_to_author( db_revision["committer_fullname"], db_revision["committer_name"], db_revision["committer_email"], ) committer_date = db_to_date( db_revision["committer_date"], db_revision["committer_date_offset"], db_revision["committer_date_neg_utc_offset"], ) assert author, "author is None" assert committer, "committer is None" parents = [] if "parents" in db_revision: for parent in db_revision["parents"]: if parent: parents.append(parent) metadata = db_revision["metadata"] extra_headers = db_revision["extra_headers"] if not extra_headers: if metadata and "extra_headers" in metadata: extra_headers = db_to_git_headers(metadata.pop("extra_headers")) else: # For older versions of the database that were not migrated to schema v161 extra_headers = () return Revision( id=db_revision["id"], author=author, date=date, committer=committer, committer_date=committer_date, type=RevisionType(db_revision["type"]), directory=db_revision["directory"], message=db_revision["message"], metadata=metadata, synthetic=db_revision["synthetic"], extra_headers=extra_headers, parents=tuple(parents), ) def release_to_db(release: Release) -> Dict[str, Any]: """Convert a swh-model release to its database representation. """ author = author_to_db(release.author) date = date_to_db(release.date) return { "id": release.id, "author_fullname": author["fullname"], "author_name": author["name"], "author_email": author["email"], "date": date["timestamp"], "date_offset": date["offset"], "date_neg_utc_offset": date["neg_utc_offset"], "name": release.name, "target": release.target, "target_type": release.target_type.value, "comment": release.message, "synthetic": release.synthetic, } def db_to_release(db_release: Dict[str, Any]) -> Optional[Release]: """Convert a database representation of a release to its swh-model representation. """ if db_release["target_type"] is None: assert all(v is None for (k, v) in db_release.items() if k != "id") return None author = db_to_author( db_release["author_fullname"], db_release["author_name"], db_release["author_email"], ) date = db_to_date( db_release["date"], db_release["date_offset"], db_release["date_neg_utc_offset"] ) return Release( author=author, date=date, id=db_release["id"], name=db_release["name"], message=db_release["comment"], synthetic=db_release["synthetic"], target=db_release["target"], target_type=ObjectType(db_release["target_type"]), ) def db_to_raw_extrinsic_metadata(row) -> RawExtrinsicMetadata: type_ = MetadataTargetType(row["raw_extrinsic_metadata.type"]) - id_ = row["raw_extrinsic_metadata.id"] + target = row["raw_extrinsic_metadata.target"] if type_ != MetadataTargetType.ORIGIN: - id_ = parse_swhid(id_) + target = parse_swhid(target) return RawExtrinsicMetadata( type=type_, - id=id_, + target=target, authority=MetadataAuthority( type=MetadataAuthorityType(row["metadata_authority.type"]), url=row["metadata_authority.url"], ), fetcher=MetadataFetcher( name=row["metadata_fetcher.name"], version=row["metadata_fetcher.version"], ), discovery_date=row["discovery_date"], format=row["format"], metadata=row["raw_extrinsic_metadata.metadata"], origin=row["origin"], visit=row["visit"], snapshot=map_optional(parse_swhid, row["snapshot"]), release=map_optional(parse_swhid, row["release"]), revision=map_optional(parse_swhid, row["revision"]), path=row["path"], directory=map_optional(parse_swhid, row["directory"]), ) diff --git a/swh/storage/postgresql/db.py b/swh/storage/postgresql/db.py index 709df519..3d454d11 100644 --- a/swh/storage/postgresql/db.py +++ b/swh/storage/postgresql/db.py @@ -1,1354 +1,1354 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime import logging import random import select from typing import Any, Dict, Iterable, List, Optional, Tuple from swh.core.db import BaseDb from swh.core.db.db_utils import execute_values_generator from swh.core.db.db_utils import jsonize as _jsonize from swh.core.db.db_utils import stored_procedure from swh.model.model import SHA1_SIZE, OriginVisit, OriginVisitStatus from swh.storage.interface import ListOrder logger = logging.getLogger(__name__) def jsonize(d): return _jsonize(dict(d) if d is not None else None) class Db(BaseDb): """Proxy to the SWH DB, with wrappers around stored procedures """ - current_version = 163 + current_version = 164 def mktemp_dir_entry(self, entry_type, cur=None): self._cursor(cur).execute( "SELECT swh_mktemp_dir_entry(%s)", (("directory_entry_%s" % entry_type),) ) @stored_procedure("swh_mktemp_revision") def mktemp_revision(self, cur=None): pass @stored_procedure("swh_mktemp_release") def mktemp_release(self, cur=None): pass @stored_procedure("swh_mktemp_snapshot_branch") def mktemp_snapshot_branch(self, cur=None): pass def register_listener(self, notify_queue, cur=None): """Register a listener for NOTIFY queue `notify_queue`""" self._cursor(cur).execute("LISTEN %s" % notify_queue) def listen_notifies(self, timeout): """Listen to notifications for `timeout` seconds""" if select.select([self.conn], [], [], timeout) == ([], [], []): return else: self.conn.poll() while self.conn.notifies: yield self.conn.notifies.pop(0) @stored_procedure("swh_content_add") def content_add_from_temp(self, cur=None): pass @stored_procedure("swh_directory_add") def directory_add_from_temp(self, cur=None): pass @stored_procedure("swh_skipped_content_add") def skipped_content_add_from_temp(self, cur=None): pass @stored_procedure("swh_revision_add") def revision_add_from_temp(self, cur=None): pass @stored_procedure("swh_release_add") def release_add_from_temp(self, cur=None): pass def content_update_from_temp(self, keys_to_update, cur=None): cur = self._cursor(cur) cur.execute( """select swh_content_update(ARRAY[%s] :: text[])""" % keys_to_update ) content_get_metadata_keys = [ "sha1", "sha1_git", "sha256", "blake2s256", "length", "status", ] content_add_keys = content_get_metadata_keys + ["ctime"] skipped_content_keys = [ "sha1", "sha1_git", "sha256", "blake2s256", "length", "reason", "status", "origin", ] def content_get_metadata_from_sha1s(self, sha1s, cur=None): cur = self._cursor(cur) yield from execute_values_generator( cur, """ select t.sha1, %s from (values %%s) as t (sha1) inner join content using (sha1) """ % ", ".join(self.content_get_metadata_keys[1:]), ((sha1,) for sha1 in sha1s), ) def content_get_range(self, start, end, limit=None, cur=None): """Retrieve contents within range [start, end]. """ cur = self._cursor(cur) query = """select %s from content where %%s <= sha1 and sha1 <= %%s order by sha1 limit %%s""" % ", ".join( self.content_get_metadata_keys ) cur.execute(query, (start, end, limit)) yield from cur content_hash_keys = ["sha1", "sha1_git", "sha256", "blake2s256"] def content_missing_from_list(self, contents, cur=None): cur = self._cursor(cur) keys = ", ".join(self.content_hash_keys) equality = " AND ".join( ("t.%s = c.%s" % (key, key)) for key in self.content_hash_keys ) yield from execute_values_generator( cur, """ SELECT %s FROM (VALUES %%s) as t(%s) WHERE NOT EXISTS ( SELECT 1 FROM content c WHERE %s ) """ % (keys, keys, equality), (tuple(c[key] for key in self.content_hash_keys) for c in contents), ) def content_missing_per_sha1(self, sha1s, cur=None): cur = self._cursor(cur) yield from execute_values_generator( cur, """ SELECT t.sha1 FROM (VALUES %s) AS t(sha1) WHERE NOT EXISTS ( SELECT 1 FROM content c WHERE c.sha1 = t.sha1 )""", ((sha1,) for sha1 in sha1s), ) def content_missing_per_sha1_git(self, contents, cur=None): cur = self._cursor(cur) yield from execute_values_generator( cur, """ SELECT t.sha1_git FROM (VALUES %s) AS t(sha1_git) WHERE NOT EXISTS ( SELECT 1 FROM content c WHERE c.sha1_git = t.sha1_git )""", ((sha1,) for sha1 in contents), ) def skipped_content_missing(self, contents, cur=None): if not contents: return [] cur = self._cursor(cur) query = """SELECT * FROM (VALUES %s) AS t (%s) WHERE not exists (SELECT 1 FROM skipped_content s WHERE s.sha1 is not distinct from t.sha1::sha1 and s.sha1_git is not distinct from t.sha1_git::sha1 and s.sha256 is not distinct from t.sha256::bytea);""" % ( (", ".join("%s" for _ in contents)), ", ".join(self.content_hash_keys), ) cur.execute( query, [tuple(cont[key] for key in self.content_hash_keys) for cont in contents], ) yield from cur def snapshot_exists(self, snapshot_id, cur=None): """Check whether a snapshot with the given id exists""" cur = self._cursor(cur) cur.execute("""SELECT 1 FROM snapshot where id=%s""", (snapshot_id,)) return bool(cur.fetchone()) def snapshot_missing_from_list(self, snapshots, cur=None): cur = self._cursor(cur) yield from execute_values_generator( cur, """ SELECT id FROM (VALUES %s) as t(id) WHERE NOT EXISTS ( SELECT 1 FROM snapshot d WHERE d.id = t.id ) """, ((id,) for id in snapshots), ) def snapshot_add(self, snapshot_id, cur=None): """Add a snapshot from the temporary table""" cur = self._cursor(cur) cur.execute("""SELECT swh_snapshot_add(%s)""", (snapshot_id,)) snapshot_count_cols = ["target_type", "count"] def snapshot_count_branches(self, snapshot_id, cur=None): cur = self._cursor(cur) query = """\ SELECT %s FROM swh_snapshot_count_branches(%%s) """ % ", ".join( self.snapshot_count_cols ) cur.execute(query, (snapshot_id,)) yield from cur snapshot_get_cols = ["snapshot_id", "name", "target", "target_type"] def snapshot_get_by_id( self, snapshot_id, branches_from=b"", branches_count=None, target_types=None, cur=None, ): cur = self._cursor(cur) query = """\ SELECT %s FROM swh_snapshot_get_by_id(%%s, %%s, %%s, %%s :: snapshot_target[]) """ % ", ".join( self.snapshot_get_cols ) cur.execute(query, (snapshot_id, branches_from, branches_count, target_types)) yield from cur def snapshot_get_random(self, cur=None): return self._get_random_row_from_table("snapshot", ["id"], "id", cur) content_find_cols = [ "sha1", "sha1_git", "sha256", "blake2s256", "length", "ctime", "status", ] def content_find( self, sha1: Optional[bytes] = None, sha1_git: Optional[bytes] = None, sha256: Optional[bytes] = None, blake2s256: Optional[bytes] = None, cur=None, ): """Find the content optionally on a combination of the following checksums sha1, sha1_git, sha256 or blake2s256. Args: sha1: sha1 content git_sha1: the sha1 computed `a la git` sha1 of the content sha256: sha256 content blake2s256: blake2s256 content Returns: The tuple (sha1, sha1_git, sha256, blake2s256) if found or None. """ cur = self._cursor(cur) checksum_dict = { "sha1": sha1, "sha1_git": sha1_git, "sha256": sha256, "blake2s256": blake2s256, } query_parts = [f"SELECT {','.join(self.content_find_cols)} FROM content WHERE "] query_params = [] where_parts = [] # Adds only those keys which have values exist for algorithm in checksum_dict: if checksum_dict[algorithm] is not None: where_parts.append(f"{algorithm} = %s") query_params.append(checksum_dict[algorithm]) query_parts.append(" AND ".join(where_parts)) query = "\n".join(query_parts) cur.execute(query, query_params) content = cur.fetchall() return content def content_get_random(self, cur=None): return self._get_random_row_from_table("content", ["sha1_git"], "sha1_git", cur) def directory_missing_from_list(self, directories, cur=None): cur = self._cursor(cur) yield from execute_values_generator( cur, """ SELECT id FROM (VALUES %s) as t(id) WHERE NOT EXISTS ( SELECT 1 FROM directory d WHERE d.id = t.id ) """, ((id,) for id in directories), ) directory_ls_cols = [ "dir_id", "type", "target", "name", "perms", "status", "sha1", "sha1_git", "sha256", "length", ] def directory_walk_one(self, directory, cur=None): cur = self._cursor(cur) cols = ", ".join(self.directory_ls_cols) query = "SELECT %s FROM swh_directory_walk_one(%%s)" % cols cur.execute(query, (directory,)) yield from cur def directory_walk(self, directory, cur=None): cur = self._cursor(cur) cols = ", ".join(self.directory_ls_cols) query = "SELECT %s FROM swh_directory_walk(%%s)" % cols cur.execute(query, (directory,)) yield from cur def directory_entry_get_by_path(self, directory, paths, cur=None): """Retrieve a directory entry by path. """ cur = self._cursor(cur) cols = ", ".join(self.directory_ls_cols) query = "SELECT %s FROM swh_find_directory_entry_by_path(%%s, %%s)" % cols cur.execute(query, (directory, paths)) data = cur.fetchone() if set(data) == {None}: return None return data def directory_get_random(self, cur=None): return self._get_random_row_from_table("directory", ["id"], "id", cur) def revision_missing_from_list(self, revisions, cur=None): cur = self._cursor(cur) yield from execute_values_generator( cur, """ SELECT id FROM (VALUES %s) as t(id) WHERE NOT EXISTS ( SELECT 1 FROM revision r WHERE r.id = t.id ) """, ((id,) for id in revisions), ) revision_add_cols = [ "id", "date", "date_offset", "date_neg_utc_offset", "committer_date", "committer_date_offset", "committer_date_neg_utc_offset", "type", "directory", "message", "author_fullname", "author_name", "author_email", "committer_fullname", "committer_name", "committer_email", "metadata", "synthetic", "extra_headers", ] revision_get_cols = revision_add_cols + ["parents"] def origin_visit_add(self, origin, ts, type, cur=None): """Add a new origin_visit for origin origin at timestamp ts. Args: origin: origin concerned by the visit ts: the date of the visit type: type of loader for the visit Returns: The new visit index step for that origin """ cur = self._cursor(cur) self._cursor(cur).execute( "SELECT swh_origin_visit_add(%s, %s, %s)", (origin, ts, type) ) return cur.fetchone()[0] origin_visit_status_cols = [ "origin", "visit", "date", "status", "snapshot", "metadata", ] def origin_visit_status_add( self, visit_status: OriginVisitStatus, cur=None ) -> None: """Add new origin visit status """ assert self.origin_visit_status_cols[0] == "origin" assert self.origin_visit_status_cols[-1] == "metadata" cols = self.origin_visit_status_cols[1:-1] cur = self._cursor(cur) cur.execute( f"WITH origin_id as (select id from origin where url=%s) " f"INSERT INTO origin_visit_status " f"(origin, {', '.join(cols)}, metadata) " f"VALUES ((select id from origin_id), " f"{', '.join(['%s']*len(cols))}, %s) " f"ON CONFLICT (origin, visit, date) do nothing", [visit_status.origin] + [getattr(visit_status, key) for key in cols] + [jsonize(visit_status.metadata)], ) origin_visit_cols = ["origin", "visit", "date", "type"] def origin_visit_add_with_id(self, origin_visit: OriginVisit, cur=None) -> None: """Insert origin visit when id are already set """ ov = origin_visit assert ov.visit is not None cur = self._cursor(cur) query = """INSERT INTO origin_visit ({cols}) VALUES ((select id from origin where url=%s), {values}) ON CONFLICT (origin, visit) DO NOTHING""".format( cols=", ".join(self.origin_visit_cols), values=", ".join("%s" for col in self.origin_visit_cols[1:]), ) cur.execute(query, (ov.origin, ov.visit, ov.date, ov.type)) origin_visit_get_cols = [ "origin", "visit", "date", "type", "status", "metadata", "snapshot", ] origin_visit_select_cols = [ "o.url AS origin", "ov.visit", "ov.date", "ov.type AS type", "ovs.status", "ovs.metadata", "ovs.snapshot", ] origin_visit_status_select_cols = [ "o.url AS origin", "ovs.visit", "ovs.date", "ovs.status", "ovs.snapshot", "ovs.metadata", ] def _make_origin_visit_status( self, row: Optional[Tuple[Any]] ) -> Optional[Dict[str, Any]]: """Make an origin_visit_status dict out of a row """ if not row: return None return dict(zip(self.origin_visit_status_cols, row)) def origin_visit_status_get_latest( self, origin_url: str, visit: int, allowed_statuses: Optional[List[str]] = None, require_snapshot: bool = False, cur=None, ) -> Optional[Dict[str, Any]]: """Given an origin visit id, return its latest origin_visit_status """ cur = self._cursor(cur) query_parts = [ "SELECT %s" % ", ".join(self.origin_visit_status_select_cols), "FROM origin_visit_status ovs ", "INNER JOIN origin o ON o.id = ovs.origin", ] query_parts.append("WHERE o.url = %s") query_params: List[Any] = [origin_url] query_parts.append("AND ovs.visit = %s") query_params.append(visit) if require_snapshot: query_parts.append("AND ovs.snapshot is not null") if allowed_statuses: query_parts.append("AND ovs.status IN %s") query_params.append(tuple(allowed_statuses)) query_parts.append("ORDER BY ovs.date DESC LIMIT 1") query = "\n".join(query_parts) cur.execute(query, tuple(query_params)) row = cur.fetchone() return self._make_origin_visit_status(row) def origin_visit_status_get_range( self, origin: str, visit: int, date_from: Optional[datetime.datetime], order: ListOrder, limit: int, cur=None, ): """Retrieve visit_status rows for visit (origin, visit) in a paginated way. """ cur = self._cursor(cur) query_parts = [ f"SELECT {', '.join(self.origin_visit_status_select_cols)} " "FROM origin_visit_status ovs ", "INNER JOIN origin o ON o.id = ovs.origin ", ] query_parts.append("WHERE o.url = %s AND ovs.visit = %s ") query_params: List[Any] = [origin, visit] if date_from is not None: op_comparison = ">=" if order == ListOrder.ASC else "<=" query_parts.append(f"and ovs.date {op_comparison} %s ") query_params.append(date_from) if order == ListOrder.ASC: query_parts.append("ORDER BY ovs.date ASC ") elif order == ListOrder.DESC: query_parts.append("ORDER BY ovs.date DESC ") else: assert False query_parts.append("LIMIT %s") query_params.append(limit) query = "\n".join(query_parts) cur.execute(query, tuple(query_params)) yield from cur def origin_visit_get_range( self, origin: str, visit_from: int, order: ListOrder, limit: int, cur=None, ): cur = self._cursor(cur) origin_visit_cols = ["o.url as origin", "ov.visit", "ov.date", "ov.type"] query_parts = [ f"SELECT {', '.join(origin_visit_cols)} FROM origin_visit ov ", "INNER JOIN origin o ON o.id = ov.origin ", ] query_parts.append("WHERE o.url = %s") query_params: List[Any] = [origin] if visit_from > 0: op_comparison = ">" if order == ListOrder.ASC else "<" query_parts.append(f"and ov.visit {op_comparison} %s") query_params.append(visit_from) if order == ListOrder.ASC: query_parts.append("ORDER BY ov.visit ASC") elif order == ListOrder.DESC: query_parts.append("ORDER BY ov.visit DESC") query_parts.append("LIMIT %s") query_params.append(limit) query = "\n".join(query_parts) cur.execute(query, tuple(query_params)) yield from cur def origin_visit_get(self, origin_id, visit_id, cur=None): """Retrieve information on visit visit_id of origin origin_id. Args: origin_id: the origin concerned visit_id: The visit step for that origin Returns: The origin_visit information """ cur = self._cursor(cur) query = """\ SELECT %s FROM origin_visit ov INNER JOIN origin o ON o.id = ov.origin INNER JOIN origin_visit_status ovs ON ov.origin = ovs.origin AND ov.visit = ovs.visit WHERE o.url = %%s AND ov.visit = %%s ORDER BY ovs.date DESC LIMIT 1 """ % ( ", ".join(self.origin_visit_select_cols) ) cur.execute(query, (origin_id, visit_id)) r = cur.fetchall() if not r: return None return r[0] def origin_visit_find_by_date(self, origin, visit_date, cur=None): cur = self._cursor(cur) cur.execute( "SELECT * FROM swh_visit_find_by_date(%s, %s)", (origin, visit_date) ) rows = cur.fetchall() if rows: visit = dict(zip(self.origin_visit_get_cols, rows[0])) visit["origin"] = origin return visit def origin_visit_exists(self, origin_id, visit_id, cur=None): """Check whether an origin visit with the given ids exists""" cur = self._cursor(cur) query = "SELECT 1 FROM origin_visit where origin = %s AND visit = %s" cur.execute(query, (origin_id, visit_id)) return bool(cur.fetchone()) def origin_visit_get_latest( self, origin_id: str, type: Optional[str], allowed_statuses: Optional[Iterable[str]], require_snapshot: bool, cur=None, ): """Retrieve the most recent origin_visit of the given origin, with optional filters. Args: origin_id: the origin concerned type: Optional visit type to filter on allowed_statuses: the visit statuses allowed for the returned visit require_snapshot (bool): If True, only a visit with a known snapshot will be returned. Returns: The origin_visit information, or None if no visit matches. """ cur = self._cursor(cur) query_parts = [ "SELECT %s" % ", ".join(self.origin_visit_select_cols), "FROM origin_visit ov ", "INNER JOIN origin o ON o.id = ov.origin", "INNER JOIN origin_visit_status ovs ", "ON o.id = ovs.origin AND ov.visit = ovs.visit ", ] query_parts.append("WHERE o.url = %s") query_params: List[Any] = [origin_id] if type is not None: query_parts.append("AND ov.type = %s") query_params.append(type) if require_snapshot: query_parts.append("AND ovs.snapshot is not null") if allowed_statuses: query_parts.append("AND ovs.status IN %s") query_params.append(tuple(allowed_statuses)) query_parts.append( "ORDER BY ov.date DESC, ov.visit DESC, ovs.date DESC LIMIT 1" ) query = "\n".join(query_parts) cur.execute(query, tuple(query_params)) r = cur.fetchone() if not r: return None return r def origin_visit_get_random(self, type, cur=None): """Randomly select one origin visit that was full and in the last 3 months """ cur = self._cursor(cur) columns = ",".join(self.origin_visit_select_cols) query = f"""select {columns} from origin_visit ov inner join origin o on ov.origin=o.id inner join origin_visit_status ovs on ov.origin = ovs.origin and ov.visit = ovs.visit where ovs.status='full' and ov.type=%s and ov.date > now() - '3 months'::interval and random() < 0.1 limit 1 """ cur.execute(query, (type,)) return cur.fetchone() @staticmethod def mangle_query_key(key, main_table): if key == "id": return "t.id" if key == "parents": return """ ARRAY( SELECT rh.parent_id::bytea FROM revision_history rh WHERE rh.id = t.id ORDER BY rh.parent_rank )""" if "_" not in key: return "%s.%s" % (main_table, key) head, tail = key.split("_", 1) if head in ("author", "committer") and tail in ( "name", "email", "id", "fullname", ): return "%s.%s" % (head, tail) return "%s.%s" % (main_table, key) def revision_get_from_list(self, revisions, cur=None): cur = self._cursor(cur) query_keys = ", ".join( self.mangle_query_key(k, "revision") for k in self.revision_get_cols ) yield from execute_values_generator( cur, """ SELECT %s FROM (VALUES %%s) as t(sortkey, id) LEFT JOIN revision ON t.id = revision.id LEFT JOIN person author ON revision.author = author.id LEFT JOIN person committer ON revision.committer = committer.id ORDER BY sortkey """ % query_keys, ((sortkey, id) for sortkey, id in enumerate(revisions)), ) def revision_log(self, root_revisions, limit=None, cur=None): cur = self._cursor(cur) query = """SELECT %s FROM swh_revision_log(%%s, %%s) """ % ", ".join( self.revision_get_cols ) cur.execute(query, (root_revisions, limit)) yield from cur revision_shortlog_cols = ["id", "parents"] def revision_shortlog(self, root_revisions, limit=None, cur=None): cur = self._cursor(cur) query = """SELECT %s FROM swh_revision_list(%%s, %%s) """ % ", ".join( self.revision_shortlog_cols ) cur.execute(query, (root_revisions, limit)) yield from cur def revision_get_random(self, cur=None): return self._get_random_row_from_table("revision", ["id"], "id", cur) def release_missing_from_list(self, releases, cur=None): cur = self._cursor(cur) yield from execute_values_generator( cur, """ SELECT id FROM (VALUES %s) as t(id) WHERE NOT EXISTS ( SELECT 1 FROM release r WHERE r.id = t.id ) """, ((id,) for id in releases), ) object_find_by_sha1_git_cols = ["sha1_git", "type"] def object_find_by_sha1_git(self, ids, cur=None): cur = self._cursor(cur) yield from execute_values_generator( cur, """ WITH t (sha1_git) AS (VALUES %s), known_objects as (( select id as sha1_git, 'release'::object_type as type, object_id from release r where exists (select 1 from t where t.sha1_git = r.id) ) union all ( select id as sha1_git, 'revision'::object_type as type, object_id from revision r where exists (select 1 from t where t.sha1_git = r.id) ) union all ( select id as sha1_git, 'directory'::object_type as type, object_id from directory d where exists (select 1 from t where t.sha1_git = d.id) ) union all ( select sha1_git as sha1_git, 'content'::object_type as type, object_id from content c where exists (select 1 from t where t.sha1_git = c.sha1_git) )) select t.sha1_git as sha1_git, k.type from t left join known_objects k on t.sha1_git = k.sha1_git """, ((id,) for id in ids), ) def stat_counters(self, cur=None): cur = self._cursor(cur) cur.execute("SELECT * FROM swh_stat_counters()") yield from cur def origin_add(self, url, cur=None): """Insert a new origin and return the new identifier.""" insert = """INSERT INTO origin (url) values (%s) RETURNING url""" cur.execute(insert, (url,)) return cur.fetchone()[0] origin_cols = ["url"] def origin_get_by_url(self, origins, cur=None): """Retrieve origin `(type, url)` from urls if found.""" cur = self._cursor(cur) query = """SELECT %s FROM (VALUES %%s) as t(url) LEFT JOIN origin ON t.url = origin.url """ % ",".join( "origin." + col for col in self.origin_cols ) yield from execute_values_generator(cur, query, ((url,) for url in origins)) def origin_get_by_sha1(self, sha1s, cur=None): """Retrieve origin urls from sha1s if found.""" cur = self._cursor(cur) query = """SELECT %s FROM (VALUES %%s) as t(sha1) LEFT JOIN origin ON t.sha1 = digest(origin.url, 'sha1') """ % ",".join( "origin." + col for col in self.origin_cols ) yield from execute_values_generator(cur, query, ((sha1,) for sha1 in sha1s)) def origin_id_get_by_url(self, origins, cur=None): """Retrieve origin `(type, url)` from urls if found.""" cur = self._cursor(cur) query = """SELECT id FROM (VALUES %s) as t(url) LEFT JOIN origin ON t.url = origin.url """ for row in execute_values_generator(cur, query, ((url,) for url in origins)): yield row[0] origin_get_range_cols = ["id", "url"] def origin_get_range(self, origin_from: int = 1, origin_count: int = 100, cur=None): """Retrieve ``origin_count`` origins whose ids are greater or equal than ``origin_from``. Origins are sorted by id before retrieving them. Args: origin_from: the minimum id of origins to retrieve origin_count: the maximum number of origins to retrieve """ cur = self._cursor(cur) query = """SELECT %s FROM origin WHERE id >= %%s ORDER BY id LIMIT %%s """ % ",".join( self.origin_get_range_cols ) cur.execute(query, (origin_from, origin_count)) yield from cur def _origin_query( self, url_pattern, count=False, offset=0, limit=50, regexp=False, with_visit=False, cur=None, ): """ Method factorizing query creation for searching and counting origins. """ cur = self._cursor(cur) if count: origin_cols = "COUNT(*)" order_clause = "" else: origin_cols = ",".join(self.origin_cols) order_clause = "ORDER BY id" if not regexp: operator = "ILIKE" query_params = [f"%{url_pattern}%"] else: operator = "~*" query_params = [url_pattern] query = f""" WITH filtered_origins AS ( SELECT * FROM origin WHERE url {operator} %s {order_clause} ) SELECT {origin_cols} FROM filtered_origins AS o """ if with_visit: query += """ WHERE EXISTS ( SELECT 1 FROM origin_visit ov INNER JOIN origin_visit_status ovs ON ov.origin = ovs.origin AND ov.visit = ovs.visit INNER JOIN snapshot ON ovs.snapshot=snapshot.id WHERE ov.origin=o.id ) """ if not count: query += "OFFSET %s LIMIT %s" query_params.extend([offset, limit]) cur.execute(query, query_params) def origin_search( self, url_pattern: str, offset: int = 0, limit: int = 50, regexp: bool = False, with_visit: bool = False, cur=None, ): """Search for origins whose urls contain a provided string pattern or match a provided regular expression. The search is performed in a case insensitive way. Args: url_pattern: the string pattern to search for in origin urls offset: number of found origins to skip before returning results limit: the maximum number of found origins to return regexp: if True, consider the provided pattern as a regular expression and returns origins whose urls match it with_visit: if True, filter out origins with no visit """ self._origin_query( url_pattern, offset=offset, limit=limit, regexp=regexp, with_visit=with_visit, cur=cur, ) yield from cur def origin_count(self, url_pattern, regexp=False, with_visit=False, cur=None): """Count origins whose urls contain a provided string pattern or match a provided regular expression. The pattern search in origin urls is performed in a case insensitive way. Args: url_pattern (str): the string pattern to search for in origin urls regexp (bool): if True, consider the provided pattern as a regular expression and returns origins whose urls match it with_visit (bool): if True, filter out origins with no visit """ self._origin_query( url_pattern, count=True, regexp=regexp, with_visit=with_visit, cur=cur ) return cur.fetchone()[0] release_add_cols = [ "id", "target", "target_type", "date", "date_offset", "date_neg_utc_offset", "name", "comment", "synthetic", "author_fullname", "author_name", "author_email", ] release_get_cols = release_add_cols def release_get_from_list(self, releases, cur=None): cur = self._cursor(cur) query_keys = ", ".join( self.mangle_query_key(k, "release") for k in self.release_get_cols ) yield from execute_values_generator( cur, """ SELECT %s FROM (VALUES %%s) as t(sortkey, id) LEFT JOIN release ON t.id = release.id LEFT JOIN person author ON release.author = author.id ORDER BY sortkey """ % query_keys, ((sortkey, id) for sortkey, id in enumerate(releases)), ) def release_get_random(self, cur=None): return self._get_random_row_from_table("release", ["id"], "id", cur) _raw_extrinsic_metadata_context_cols = [ "origin", "visit", "snapshot", "release", "revision", "path", "directory", ] """The list of context columns for all artifact types.""" _raw_extrinsic_metadata_insert_cols = [ "type", - "id", + "target", "authority_id", "fetcher_id", "discovery_date", "format", "metadata", *_raw_extrinsic_metadata_context_cols, ] """List of columns of the raw_extrinsic_metadata table, used when writing metadata.""" _raw_extrinsic_metadata_insert_query = f""" INSERT INTO raw_extrinsic_metadata ({', '.join(_raw_extrinsic_metadata_insert_cols)}) VALUES ({', '.join('%s' for _ in _raw_extrinsic_metadata_insert_cols)}) - ON CONFLICT (id, authority_id, discovery_date, fetcher_id) + ON CONFLICT (target, authority_id, discovery_date, fetcher_id) DO NOTHING """ raw_extrinsic_metadata_get_cols = [ - "raw_extrinsic_metadata.id", + "raw_extrinsic_metadata.target", "raw_extrinsic_metadata.type", "discovery_date", "metadata_authority.type", "metadata_authority.url", "metadata_fetcher.id", "metadata_fetcher.name", "metadata_fetcher.version", *_raw_extrinsic_metadata_context_cols, "format", "raw_extrinsic_metadata.metadata", ] """List of columns of the raw_extrinsic_metadata, metadata_authority, and metadata_fetcher tables, used when reading object metadata.""" _raw_extrinsic_metadata_select_query = f""" SELECT {', '.join(raw_extrinsic_metadata_get_cols)} FROM raw_extrinsic_metadata INNER JOIN metadata_authority ON (metadata_authority.id=authority_id) INNER JOIN metadata_fetcher ON (metadata_fetcher.id=fetcher_id) - WHERE raw_extrinsic_metadata.id=%s AND authority_id=%s + WHERE raw_extrinsic_metadata.target=%s AND authority_id=%s """ def raw_extrinsic_metadata_add( self, type: str, - id: str, + target: str, discovery_date: datetime.datetime, authority_id: int, fetcher_id: int, format: str, metadata: bytes, origin: Optional[str], visit: Optional[int], snapshot: Optional[str], release: Optional[str], revision: Optional[str], path: Optional[bytes], directory: Optional[str], cur, ): query = self._raw_extrinsic_metadata_insert_query args: Dict[str, Any] = dict( type=type, - id=id, + target=target, authority_id=authority_id, fetcher_id=fetcher_id, discovery_date=discovery_date, format=format, metadata=metadata, origin=origin, visit=visit, snapshot=snapshot, release=release, revision=revision, path=path, directory=directory, ) params = [args[col] for col in self._raw_extrinsic_metadata_insert_cols] cur.execute(query, params) def raw_extrinsic_metadata_get( self, type: str, - id: str, + target: str, authority_id: int, after_time: Optional[datetime.datetime], after_fetcher: Optional[int], limit: int, cur, ): query_parts = [self._raw_extrinsic_metadata_select_query] - args = [id, authority_id] + args = [target, authority_id] if after_fetcher is not None: assert after_time query_parts.append("AND (discovery_date, fetcher_id) > (%s, %s)") args.extend([after_time, after_fetcher]) elif after_time is not None: query_parts.append("AND discovery_date > %s") args.append(after_time) query_parts.append("ORDER BY discovery_date, fetcher_id") if limit: query_parts.append("LIMIT %s") args.append(limit) cur.execute(" ".join(query_parts), args) yield from cur metadata_fetcher_cols = ["name", "version", "metadata"] def metadata_fetcher_add( self, name: str, version: str, metadata: bytes, cur=None ) -> None: cur = self._cursor(cur) cur.execute( "INSERT INTO metadata_fetcher (name, version, metadata) " "VALUES (%s, %s, %s) ON CONFLICT DO NOTHING", (name, version, jsonize(metadata)), ) def metadata_fetcher_get(self, name: str, version: str, cur=None): cur = self._cursor(cur) cur.execute( f"SELECT {', '.join(self.metadata_fetcher_cols)} " f"FROM metadata_fetcher " f"WHERE name=%s AND version=%s", (name, version), ) return cur.fetchone() def metadata_fetcher_get_id( self, name: str, version: str, cur=None ) -> Optional[int]: cur = self._cursor(cur) cur.execute( "SELECT id FROM metadata_fetcher WHERE name=%s AND version=%s", (name, version), ) row = cur.fetchone() if row: return row[0] else: return None metadata_authority_cols = ["type", "url", "metadata"] def metadata_authority_add( self, type: str, url: str, metadata: bytes, cur=None ) -> None: cur = self._cursor(cur) cur.execute( "INSERT INTO metadata_authority (type, url, metadata) " "VALUES (%s, %s, %s) ON CONFLICT DO NOTHING", (type, url, jsonize(metadata)), ) def metadata_authority_get(self, type: str, url: str, cur=None): cur = self._cursor(cur) cur.execute( f"SELECT {', '.join(self.metadata_authority_cols)} " f"FROM metadata_authority " f"WHERE type=%s AND url=%s", (type, url), ) return cur.fetchone() def metadata_authority_get_id(self, type: str, url: str, cur=None) -> Optional[int]: cur = self._cursor(cur) cur.execute( "SELECT id FROM metadata_authority WHERE type=%s AND url=%s", (type, url) ) row = cur.fetchone() if row: return row[0] else: return None def _get_random_row_from_table(self, table_name, cols, id_col, cur=None): random_sha1 = bytes(random.randint(0, 255) for _ in range(SHA1_SIZE)) cur = self._cursor(cur) query = """ (SELECT {cols} FROM {table} WHERE {id_col} >= %s ORDER BY {id_col} LIMIT 1) UNION (SELECT {cols} FROM {table} WHERE {id_col} < %s ORDER BY {id_col} DESC LIMIT 1) LIMIT 1 """.format( cols=", ".join(cols), table=table_name, id_col=id_col ) cur.execute(query, (random_sha1, random_sha1)) row = cur.fetchone() if row: return row[0] dbversion_cols = ["version", "release", "description"] def dbversion(self): with self.transaction() as cur: cur.execute(f"SELECT {', '.join(self.dbversion_cols)} FROM dbversion") return dict(zip(self.dbversion_cols, cur.fetchone())) def check_dbversion(self): dbversion = self.dbversion()["version"] if dbversion != self.current_version: logger.warning( "database dbversion (%s) != %s current_version (%s)", dbversion, __name__, self.current_version, ) return dbversion == self.current_version diff --git a/swh/storage/postgresql/storage.py b/swh/storage/postgresql/storage.py index c8e759a3..c84efd46 100644 --- a/swh/storage/postgresql/storage.py +++ b/swh/storage/postgresql/storage.py @@ -1,1420 +1,1420 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import base64 from collections import defaultdict import contextlib from contextlib import contextmanager import datetime import itertools from typing import Any, Counter, Dict, Iterable, List, Optional, Sequence, Tuple, Union import attr import psycopg2 import psycopg2.errors import psycopg2.pool from swh.core.api.serializers import msgpack_dumps, msgpack_loads from swh.core.db.common import db_transaction, db_transaction_generator from swh.model.hashutil import DEFAULT_ALGORITHMS, hash_to_bytes, hash_to_hex from swh.model.identifiers import SWHID from swh.model.model import ( SHA1_SIZE, Content, Directory, MetadataAuthority, MetadataAuthorityType, MetadataFetcher, MetadataTargetType, Origin, OriginVisit, OriginVisitStatus, RawExtrinsicMetadata, Release, Revision, Sha1, Sha1Git, SkippedContent, Snapshot, SnapshotBranch, TargetType, ) from swh.storage.exc import HashCollision, StorageArgumentException, StorageDBError from swh.storage.interface import ( VISIT_STATUSES, ListOrder, PagedResult, PartialBranches, ) from swh.storage.metrics import process_metrics, send_metric, timed from swh.storage.objstorage import ObjStorage from swh.storage.utils import ( extract_collision_hash, get_partition_bounds_bytes, map_optional, now, ) from swh.storage.writer import JournalWriter from . import converters from .db import Db # Max block size of contents to return BULK_BLOCK_CONTENT_LEN_MAX = 10000 EMPTY_SNAPSHOT_ID = hash_to_bytes("1a8893e6a86f444e8be8e7bda6cb34fb1735a00e") """Identifier for the empty snapshot""" VALIDATION_EXCEPTIONS = ( KeyError, TypeError, ValueError, psycopg2.errors.CheckViolation, psycopg2.errors.IntegrityError, psycopg2.errors.InvalidTextRepresentation, psycopg2.errors.NotNullViolation, psycopg2.errors.NumericValueOutOfRange, psycopg2.errors.UndefinedFunction, # (raised on wrong argument typs) ) """Exceptions raised by postgresql when validation of the arguments failed.""" @contextlib.contextmanager def convert_validation_exceptions(): """Catches postgresql errors related to invalid arguments, and re-raises a StorageArgumentException.""" try: yield except tuple(VALIDATION_EXCEPTIONS) as e: raise StorageArgumentException(str(e)) class Storage: """SWH storage proxy, encompassing DB and object storage """ def __init__( self, db, objstorage, min_pool_conns=1, max_pool_conns=10, journal_writer=None ): """ Args: db_conn: either a libpq connection string, or a psycopg2 connection obj_root: path to the root of the object storage """ try: if isinstance(db, psycopg2.extensions.connection): self._pool = None self._db = Db(db) else: self._pool = psycopg2.pool.ThreadedConnectionPool( min_pool_conns, max_pool_conns, db ) self._db = None except psycopg2.OperationalError as e: raise StorageDBError(e) self.journal_writer = JournalWriter(journal_writer) self.objstorage = ObjStorage(objstorage) def get_db(self): if self._db: return self._db else: return Db.from_pool(self._pool) def put_db(self, db): if db is not self._db: db.put_conn() @contextmanager def db(self): db = None try: db = self.get_db() yield db finally: if db: self.put_db(db) @timed @db_transaction() def check_config(self, *, check_write: bool, db=None, cur=None) -> bool: if not self.objstorage.check_config(check_write=check_write): return False if not db.check_dbversion(): return False # Check permissions on one of the tables if check_write: check = "INSERT" else: check = "SELECT" cur.execute("select has_table_privilege(current_user, 'content', %s)", (check,)) return cur.fetchone()[0] def _content_unique_key(self, hash, db): """Given a hash (tuple or dict), return a unique key from the aggregation of keys. """ keys = db.content_hash_keys if isinstance(hash, tuple): return hash return tuple([hash[k] for k in keys]) def _content_add_metadata(self, db, cur, content): """Add content to the postgresql database but not the object storage. """ # create temporary table for metadata injection db.mktemp("content", cur) db.copy_to( (c.to_dict() for c in content), "tmp_content", db.content_add_keys, cur ) # move metadata in place try: db.content_add_from_temp(cur) except psycopg2.IntegrityError as e: if e.diag.sqlstate == "23505" and e.diag.table_name == "content": message_detail = e.diag.message_detail if message_detail: hash_name, hash_id = extract_collision_hash(message_detail) collision_contents_hashes = [ c.hashes() for c in content if c.get_hash(hash_name) == hash_id ] else: constraint_to_hash_name = { "content_pkey": "sha1", "content_sha1_git_idx": "sha1_git", "content_sha256_idx": "sha256", } hash_name = constraint_to_hash_name.get(e.diag.constraint_name) hash_id = None collision_contents_hashes = None raise HashCollision( hash_name, hash_id, collision_contents_hashes ) from None else: raise @timed @process_metrics def content_add(self, content: List[Content]) -> Dict: ctime = now() contents = [attr.evolve(c, ctime=ctime) for c in content] objstorage_summary = self.objstorage.content_add(contents) with self.db() as db: with db.transaction() as cur: missing = list( self.content_missing( map(Content.to_dict, contents), key_hash="sha1_git", db=db, cur=cur, ) ) contents = [c for c in contents if c.sha1_git in missing] self.journal_writer.content_add(contents) self._content_add_metadata(db, cur, contents) return { "content:add": len(contents), "content:add:bytes": objstorage_summary["content:add:bytes"], } @timed @db_transaction() def content_update( self, contents: List[Dict[str, Any]], keys: List[str] = [], db=None, cur=None ) -> None: # TODO: Add a check on input keys. How to properly implement # this? We don't know yet the new columns. self.journal_writer.content_update(contents) db.mktemp("content", cur) select_keys = list(set(db.content_get_metadata_keys).union(set(keys))) with convert_validation_exceptions(): db.copy_to(contents, "tmp_content", select_keys, cur) db.content_update_from_temp(keys_to_update=keys, cur=cur) @timed @process_metrics @db_transaction() def content_add_metadata(self, content: List[Content], db=None, cur=None) -> Dict: missing = self.content_missing( (c.to_dict() for c in content), key_hash="sha1_git", db=db, cur=cur, ) contents = [c for c in content if c.sha1_git in missing] self.journal_writer.content_add_metadata(contents) self._content_add_metadata(db, cur, contents) return { "content:add": len(contents), } @timed def content_get_data(self, content: Sha1) -> Optional[bytes]: # FIXME: Make this method support slicing the `data` return self.objstorage.content_get(content) @timed @db_transaction() def content_get_partition( self, partition_id: int, nb_partitions: int, page_token: Optional[str] = None, limit: int = 1000, db=None, cur=None, ) -> PagedResult[Content]: if limit is None: raise StorageArgumentException("limit should not be None") (start, end) = get_partition_bounds_bytes( partition_id, nb_partitions, SHA1_SIZE ) if page_token: start = hash_to_bytes(page_token) if end is None: end = b"\xff" * SHA1_SIZE next_page_token: Optional[str] = None contents = [] for counter, row in enumerate(db.content_get_range(start, end, limit + 1, cur)): row_d = dict(zip(db.content_get_metadata_keys, row)) content = Content(**row_d) if counter >= limit: # take the last content for the next page starting from this next_page_token = hash_to_hex(content.sha1) break contents.append(content) assert len(contents) <= limit return PagedResult(results=contents, next_page_token=next_page_token) @timed @db_transaction(statement_timeout=500) def content_get( self, contents: List[Sha1], db=None, cur=None ) -> List[Optional[Content]]: contents_by_sha1: Dict[Sha1, Optional[Content]] = {} for row in db.content_get_metadata_from_sha1s(contents, cur): row_d = dict(zip(db.content_get_metadata_keys, row)) content = Content(**row_d) contents_by_sha1[content.sha1] = content return [contents_by_sha1.get(sha1) for sha1 in contents] @timed @db_transaction_generator() def content_missing( self, contents: List[Dict[str, Any]], key_hash: str = "sha1", db=None, cur=None ) -> Iterable[bytes]: if key_hash not in DEFAULT_ALGORITHMS: raise StorageArgumentException( "key_hash should be one of {','.join(DEFAULT_ALGORITHMS)}" ) keys = db.content_hash_keys key_hash_idx = keys.index(key_hash) for obj in db.content_missing_from_list(contents, cur): yield obj[key_hash_idx] @timed @db_transaction_generator() def content_missing_per_sha1( self, contents: List[bytes], db=None, cur=None ) -> Iterable[bytes]: for obj in db.content_missing_per_sha1(contents, cur): yield obj[0] @timed @db_transaction_generator() def content_missing_per_sha1_git( self, contents: List[bytes], db=None, cur=None ) -> Iterable[Sha1Git]: for obj in db.content_missing_per_sha1_git(contents, cur): yield obj[0] @timed @db_transaction() def content_find(self, content: Dict[str, Any], db=None, cur=None) -> List[Content]: if not set(content).intersection(DEFAULT_ALGORITHMS): raise StorageArgumentException( "content keys must contain at least one " f"of: {', '.join(sorted(DEFAULT_ALGORITHMS))}" ) rows = db.content_find( sha1=content.get("sha1"), sha1_git=content.get("sha1_git"), sha256=content.get("sha256"), blake2s256=content.get("blake2s256"), cur=cur, ) contents = [] for row in rows: row_d = dict(zip(db.content_find_cols, row)) contents.append(Content(**row_d)) return contents @timed @db_transaction() def content_get_random(self, db=None, cur=None) -> Sha1Git: return db.content_get_random(cur) @staticmethod def _skipped_content_normalize(d): d = d.copy() if d.get("status") is None: d["status"] = "absent" if d.get("length") is None: d["length"] = -1 return d def _skipped_content_add_metadata(self, db, cur, content: List[SkippedContent]): origin_ids = db.origin_id_get_by_url([cont.origin for cont in content], cur=cur) content = [ attr.evolve(c, origin=origin_id) for (c, origin_id) in zip(content, origin_ids) ] db.mktemp("skipped_content", cur) db.copy_to( [c.to_dict() for c in content], "tmp_skipped_content", db.skipped_content_keys, cur, ) # move metadata in place db.skipped_content_add_from_temp(cur) @timed @process_metrics @db_transaction() def skipped_content_add( self, content: List[SkippedContent], db=None, cur=None ) -> Dict: ctime = now() content = [attr.evolve(c, ctime=ctime) for c in content] missing_contents = self.skipped_content_missing( (c.to_dict() for c in content), db=db, cur=cur, ) content = [ c for c in content if any( all( c.get_hash(algo) == missing_content.get(algo) for algo in DEFAULT_ALGORITHMS ) for missing_content in missing_contents ) ] self.journal_writer.skipped_content_add(content) self._skipped_content_add_metadata(db, cur, content) return { "skipped_content:add": len(content), } @timed @db_transaction_generator() def skipped_content_missing( self, contents: List[Dict[str, Any]], db=None, cur=None ) -> Iterable[Dict[str, Any]]: contents = list(contents) for content in db.skipped_content_missing(contents, cur): yield dict(zip(db.content_hash_keys, content)) @timed @process_metrics @db_transaction() def directory_add(self, directories: List[Directory], db=None, cur=None) -> Dict: summary = {"directory:add": 0} dirs = set() dir_entries: Dict[str, defaultdict] = { "file": defaultdict(list), "dir": defaultdict(list), "rev": defaultdict(list), } for cur_dir in directories: dir_id = cur_dir.id dirs.add(dir_id) for src_entry in cur_dir.entries: entry = src_entry.to_dict() entry["dir_id"] = dir_id dir_entries[entry["type"]][dir_id].append(entry) dirs_missing = set(self.directory_missing(dirs, db=db, cur=cur)) if not dirs_missing: return summary self.journal_writer.directory_add( dir_ for dir_ in directories if dir_.id in dirs_missing ) # Copy directory ids dirs_missing_dict = ({"id": dir} for dir in dirs_missing) db.mktemp("directory", cur) db.copy_to(dirs_missing_dict, "tmp_directory", ["id"], cur) # Copy entries for entry_type, entry_list in dir_entries.items(): entries = itertools.chain.from_iterable( entries_for_dir for dir_id, entries_for_dir in entry_list.items() if dir_id in dirs_missing ) db.mktemp_dir_entry(entry_type) db.copy_to( entries, "tmp_directory_entry_%s" % entry_type, ["target", "name", "perms", "dir_id"], cur, ) # Do the final copy db.directory_add_from_temp(cur) summary["directory:add"] = len(dirs_missing) return summary @timed @db_transaction_generator() def directory_missing( self, directories: List[Sha1Git], db=None, cur=None ) -> Iterable[Sha1Git]: for obj in db.directory_missing_from_list(directories, cur): yield obj[0] @timed @db_transaction_generator(statement_timeout=20000) def directory_ls( self, directory: Sha1Git, recursive: bool = False, db=None, cur=None ) -> Iterable[Dict[str, Any]]: if recursive: res_gen = db.directory_walk(directory, cur=cur) else: res_gen = db.directory_walk_one(directory, cur=cur) for line in res_gen: yield dict(zip(db.directory_ls_cols, line)) @timed @db_transaction(statement_timeout=2000) def directory_entry_get_by_path( self, directory: Sha1Git, paths: List[bytes], db=None, cur=None ) -> Optional[Dict[str, Any]]: res = db.directory_entry_get_by_path(directory, paths, cur) return dict(zip(db.directory_ls_cols, res)) if res else None @timed @db_transaction() def directory_get_random(self, db=None, cur=None) -> Sha1Git: return db.directory_get_random(cur) @timed @process_metrics @db_transaction() def revision_add(self, revisions: List[Revision], db=None, cur=None) -> Dict: summary = {"revision:add": 0} revisions_missing = set( self.revision_missing( set(revision.id for revision in revisions), db=db, cur=cur ) ) if not revisions_missing: return summary db.mktemp_revision(cur) revisions_filtered = [ revision for revision in revisions if revision.id in revisions_missing ] self.journal_writer.revision_add(revisions_filtered) db_revisions_filtered = list(map(converters.revision_to_db, revisions_filtered)) parents_filtered: List[bytes] = [] with convert_validation_exceptions(): db.copy_to( db_revisions_filtered, "tmp_revision", db.revision_add_cols, cur, lambda rev: parents_filtered.extend(rev["parents"]), ) db.revision_add_from_temp(cur) db.copy_to( parents_filtered, "revision_history", ["id", "parent_id", "parent_rank"], cur, ) return {"revision:add": len(revisions_missing)} @timed @db_transaction_generator() def revision_missing( self, revisions: List[Sha1Git], db=None, cur=None ) -> Iterable[Sha1Git]: if not revisions: return None for obj in db.revision_missing_from_list(revisions, cur): yield obj[0] @timed @db_transaction(statement_timeout=1000) def revision_get( self, revision_ids: List[Sha1Git], db=None, cur=None ) -> List[Optional[Revision]]: revisions = [] for line in db.revision_get_from_list(revision_ids, cur): revision = converters.db_to_revision(dict(zip(db.revision_get_cols, line))) revisions.append(revision) return revisions @timed @db_transaction_generator(statement_timeout=2000) def revision_log( self, revisions: List[Sha1Git], limit: Optional[int] = None, db=None, cur=None ) -> Iterable[Optional[Dict[str, Any]]]: for line in db.revision_log(revisions, limit, cur): data = converters.db_to_revision(dict(zip(db.revision_get_cols, line))) if not data: yield None continue yield data.to_dict() @timed @db_transaction_generator(statement_timeout=2000) def revision_shortlog( self, revisions: List[Sha1Git], limit: Optional[int] = None, db=None, cur=None ) -> Iterable[Optional[Tuple[Sha1Git, Tuple[Sha1Git, ...]]]]: yield from db.revision_shortlog(revisions, limit, cur) @timed @db_transaction() def revision_get_random(self, db=None, cur=None) -> Sha1Git: return db.revision_get_random(cur) @timed @process_metrics @db_transaction() def release_add(self, releases: List[Release], db=None, cur=None) -> Dict: summary = {"release:add": 0} release_ids = set(release.id for release in releases) releases_missing = set(self.release_missing(release_ids, db=db, cur=cur)) if not releases_missing: return summary db.mktemp_release(cur) releases_filtered = [ release for release in releases if release.id in releases_missing ] self.journal_writer.release_add(releases_filtered) db_releases_filtered = list(map(converters.release_to_db, releases_filtered)) with convert_validation_exceptions(): db.copy_to(db_releases_filtered, "tmp_release", db.release_add_cols, cur) db.release_add_from_temp(cur) return {"release:add": len(releases_missing)} @timed @db_transaction_generator() def release_missing( self, releases: List[Sha1Git], db=None, cur=None ) -> Iterable[Sha1Git]: if not releases: return for obj in db.release_missing_from_list(releases, cur): yield obj[0] @timed @db_transaction(statement_timeout=500) def release_get( self, releases: List[Sha1Git], db=None, cur=None ) -> List[Optional[Release]]: rels = [] for release in db.release_get_from_list(releases, cur): data = converters.db_to_release(dict(zip(db.release_get_cols, release))) rels.append(data if data else None) return rels @timed @db_transaction() def release_get_random(self, db=None, cur=None) -> Sha1Git: return db.release_get_random(cur) @timed @process_metrics @db_transaction() def snapshot_add(self, snapshots: List[Snapshot], db=None, cur=None) -> Dict: created_temp_table = False count = 0 for snapshot in snapshots: if not db.snapshot_exists(snapshot.id, cur): if not created_temp_table: db.mktemp_snapshot_branch(cur) created_temp_table = True with convert_validation_exceptions(): db.copy_to( ( { "name": name, "target": info.target if info else None, "target_type": ( info.target_type.value if info else None ), } for name, info in snapshot.branches.items() ), "tmp_snapshot_branch", ["name", "target", "target_type"], cur, ) self.journal_writer.snapshot_add([snapshot]) db.snapshot_add(snapshot.id, cur) count += 1 return {"snapshot:add": count} @timed @db_transaction_generator() def snapshot_missing( self, snapshots: List[Sha1Git], db=None, cur=None ) -> Iterable[Sha1Git]: for obj in db.snapshot_missing_from_list(snapshots, cur): yield obj[0] @timed @db_transaction(statement_timeout=2000) def snapshot_get( self, snapshot_id: Sha1Git, db=None, cur=None ) -> Optional[Dict[str, Any]]: d = self.snapshot_get_branches(snapshot_id) if d is None: return d return { "id": d["id"], "branches": { name: branch.to_dict() if branch else None for (name, branch) in d["branches"].items() }, "next_branch": d["next_branch"], } @timed @db_transaction(statement_timeout=2000) def snapshot_count_branches( self, snapshot_id: Sha1Git, db=None, cur=None ) -> Optional[Dict[Optional[str], int]]: return dict([bc for bc in db.snapshot_count_branches(snapshot_id, cur)]) @timed @db_transaction(statement_timeout=2000) def snapshot_get_branches( self, snapshot_id: Sha1Git, branches_from: bytes = b"", branches_count: int = 1000, target_types: Optional[List[str]] = None, db=None, cur=None, ) -> Optional[PartialBranches]: if snapshot_id == EMPTY_SNAPSHOT_ID: return PartialBranches(id=snapshot_id, branches={}, next_branch=None,) branches = {} next_branch = None fetched_branches = list( db.snapshot_get_by_id( snapshot_id, branches_from=branches_from, branches_count=branches_count + 1, target_types=target_types, cur=cur, ) ) for row in fetched_branches[:branches_count]: branch_d = dict(zip(db.snapshot_get_cols, row)) del branch_d["snapshot_id"] name = branch_d.pop("name") if branch_d["target"] is None and branch_d["target_type"] is None: branch = None else: assert branch_d["target_type"] is not None branch = SnapshotBranch( target=branch_d["target"], target_type=TargetType(branch_d["target_type"]), ) branches[name] = branch if len(fetched_branches) > branches_count: next_branch = dict(zip(db.snapshot_get_cols, fetched_branches[-1]))["name"] if branches: return PartialBranches( id=snapshot_id, branches=branches, next_branch=next_branch, ) return None @timed @db_transaction() def snapshot_get_random(self, db=None, cur=None) -> Sha1Git: return db.snapshot_get_random(cur) @timed @db_transaction() def origin_visit_add( self, visits: List[OriginVisit], db=None, cur=None ) -> Iterable[OriginVisit]: for visit in visits: origin = self.origin_get([visit.origin], db=db, cur=cur)[0] if not origin: # Cannot add a visit without an origin raise StorageArgumentException("Unknown origin %s", visit.origin) all_visits = [] nb_visits = 0 for visit in visits: nb_visits += 1 if not visit.visit: with convert_validation_exceptions(): visit_id = db.origin_visit_add( visit.origin, visit.date, visit.type, cur=cur ) visit = attr.evolve(visit, visit=visit_id) else: db.origin_visit_add_with_id(visit, cur=cur) assert visit.visit is not None all_visits.append(visit) # Forced to write after for the case when the visit has no id self.journal_writer.origin_visit_add([visit]) visit_status = OriginVisitStatus( origin=visit.origin, visit=visit.visit, date=visit.date, status="created", snapshot=None, ) self._origin_visit_status_add(visit_status, db=db, cur=cur) send_metric("origin_visit:add", count=nb_visits, method_name="origin_visit") return all_visits def _origin_visit_status_add( self, visit_status: OriginVisitStatus, db, cur ) -> None: """Add an origin visit status""" self.journal_writer.origin_visit_status_add([visit_status]) db.origin_visit_status_add(visit_status, cur=cur) send_metric( "origin_visit_status:add", count=1, method_name="origin_visit_status" ) @timed @db_transaction() def origin_visit_status_add( self, visit_statuses: List[OriginVisitStatus], db=None, cur=None, ) -> None: # First round to check existence (fail early if any is ko) for visit_status in visit_statuses: origin_url = self.origin_get([visit_status.origin], db=db, cur=cur)[0] if not origin_url: raise StorageArgumentException(f"Unknown origin {visit_status.origin}") for visit_status in visit_statuses: self._origin_visit_status_add(visit_status, db, cur) @timed @db_transaction() def origin_visit_status_get_latest( self, origin_url: str, visit: int, allowed_statuses: Optional[List[str]] = None, require_snapshot: bool = False, db=None, cur=None, ) -> Optional[OriginVisitStatus]: if allowed_statuses and not set(allowed_statuses).intersection(VISIT_STATUSES): raise StorageArgumentException( f"Unknown allowed statuses {','.join(allowed_statuses)}, only " f"{','.join(VISIT_STATUSES)} authorized" ) row = db.origin_visit_status_get_latest( origin_url, visit, allowed_statuses, require_snapshot, cur=cur ) if not row: return None return OriginVisitStatus.from_dict(row) @timed @db_transaction(statement_timeout=500) def origin_visit_get( self, origin: str, page_token: Optional[str] = None, order: ListOrder = ListOrder.ASC, limit: int = 10, db=None, cur=None, ) -> PagedResult[OriginVisit]: page_token = page_token or "0" if not isinstance(order, ListOrder): raise StorageArgumentException("order must be a ListOrder value") if not isinstance(page_token, str): raise StorageArgumentException("page_token must be a string.") next_page_token = None visit_from = int(page_token) visits: List[OriginVisit] = [] extra_limit = limit + 1 for row in db.origin_visit_get_range( origin, visit_from=visit_from, order=order, limit=extra_limit, cur=cur ): row_d = dict(zip(db.origin_visit_cols, row)) visits.append( OriginVisit( origin=row_d["origin"], visit=row_d["visit"], date=row_d["date"], type=row_d["type"], ) ) assert len(visits) <= extra_limit if len(visits) == extra_limit: visits = visits[:limit] next_page_token = str(visits[-1].visit) return PagedResult(results=visits, next_page_token=next_page_token) @timed @db_transaction(statement_timeout=500) def origin_visit_find_by_date( self, origin: str, visit_date: datetime.datetime, db=None, cur=None ) -> Optional[OriginVisit]: row_d = db.origin_visit_find_by_date(origin, visit_date, cur=cur) if not row_d: return None return OriginVisit( origin=row_d["origin"], visit=row_d["visit"], date=row_d["date"], type=row_d["type"], ) @timed @db_transaction(statement_timeout=500) def origin_visit_get_by( self, origin: str, visit: int, db=None, cur=None ) -> Optional[OriginVisit]: row = db.origin_visit_get(origin, visit, cur) if row: row_d = dict(zip(db.origin_visit_get_cols, row)) return OriginVisit( origin=row_d["origin"], visit=row_d["visit"], date=row_d["date"], type=row_d["type"], ) return None @timed @db_transaction(statement_timeout=4000) def origin_visit_get_latest( self, origin: str, type: Optional[str] = None, allowed_statuses: Optional[List[str]] = None, require_snapshot: bool = False, db=None, cur=None, ) -> Optional[OriginVisit]: if allowed_statuses and not set(allowed_statuses).intersection(VISIT_STATUSES): raise StorageArgumentException( f"Unknown allowed statuses {','.join(allowed_statuses)}, only " f"{','.join(VISIT_STATUSES)} authorized" ) row = db.origin_visit_get_latest( origin, type=type, allowed_statuses=allowed_statuses, require_snapshot=require_snapshot, cur=cur, ) if row: row_d = dict(zip(db.origin_visit_get_cols, row)) visit = OriginVisit( origin=row_d["origin"], visit=row_d["visit"], date=row_d["date"], type=row_d["type"], ) return visit return None @timed @db_transaction(statement_timeout=500) def origin_visit_status_get( self, origin: str, visit: int, page_token: Optional[str] = None, order: ListOrder = ListOrder.ASC, limit: int = 10, db=None, cur=None, ) -> PagedResult[OriginVisitStatus]: next_page_token = None date_from = None if page_token is not None: date_from = datetime.datetime.fromisoformat(page_token) visit_statuses: List[OriginVisitStatus] = [] # Take one more visit status so we can reuse it as the next page token if any for row in db.origin_visit_status_get_range( origin, visit, date_from=date_from, order=order, limit=limit + 1, cur=cur, ): row_d = dict(zip(db.origin_visit_status_cols, row)) visit_statuses.append( OriginVisitStatus( origin=row_d["origin"], visit=row_d["visit"], date=row_d["date"], status=row_d["status"], snapshot=row_d["snapshot"], metadata=row_d["metadata"], ) ) if len(visit_statuses) > limit: # last visit status date is the next page token next_page_token = str(visit_statuses[-1].date) # excluding that visit status from the result to respect the limit size visit_statuses = visit_statuses[:limit] return PagedResult(results=visit_statuses, next_page_token=next_page_token) @timed @db_transaction() def origin_visit_status_get_random( self, type: str, db=None, cur=None ) -> Optional[Tuple[OriginVisit, OriginVisitStatus]]: row = db.origin_visit_get_random(type, cur) if row is not None: row_d = dict(zip(db.origin_visit_get_cols, row)) visit = OriginVisit( origin=row_d["origin"], visit=row_d["visit"], date=row_d["date"], type=row_d["type"], ) visit_status = OriginVisitStatus( origin=row_d["origin"], visit=row_d["visit"], date=row_d["date"], status=row_d["status"], metadata=row_d["metadata"], snapshot=row_d["snapshot"], ) return visit, visit_status return None @timed @db_transaction(statement_timeout=2000) def object_find_by_sha1_git( self, ids: List[Sha1Git], db=None, cur=None ) -> Dict[Sha1Git, List[Dict]]: ret: Dict[Sha1Git, List[Dict]] = {id: [] for id in ids} for retval in db.object_find_by_sha1_git(ids, cur=cur): if retval[1]: ret[retval[0]].append( dict(zip(db.object_find_by_sha1_git_cols, retval)) ) return ret @timed @db_transaction(statement_timeout=500) def origin_get( self, origins: List[str], db=None, cur=None ) -> Iterable[Optional[Origin]]: rows = db.origin_get_by_url(origins, cur) result: List[Optional[Origin]] = [] for row in rows: origin_d = dict(zip(db.origin_cols, row)) url = origin_d["url"] result.append(None if url is None else Origin(url=url)) return result @timed @db_transaction(statement_timeout=500) def origin_get_by_sha1( self, sha1s: List[bytes], db=None, cur=None ) -> List[Optional[Dict[str, Any]]]: return [ dict(zip(db.origin_cols, row)) if row[0] else None for row in db.origin_get_by_sha1(sha1s, cur) ] @timed @db_transaction_generator() def origin_get_range(self, origin_from=1, origin_count=100, db=None, cur=None): for origin in db.origin_get_range(origin_from, origin_count, cur): yield dict(zip(db.origin_get_range_cols, origin)) @timed @db_transaction() def origin_list( self, page_token: Optional[str] = None, limit: int = 100, *, db=None, cur=None ) -> PagedResult[Origin]: page_token = page_token or "0" if not isinstance(page_token, str): raise StorageArgumentException("page_token must be a string.") origin_from = int(page_token) next_page_token = None origins: List[Origin] = [] # Take one more origin so we can reuse it as the next page token if any for row_d in self.origin_get_range(origin_from, limit + 1, db=db, cur=cur): origins.append(Origin(url=row_d["url"])) # keep the last_id for the pagination if needed last_id = row_d["id"] if len(origins) > limit: # data left for subsequent call # last origin id is the next page token next_page_token = str(last_id) # excluding that origin from the result to respect the limit size origins = origins[:limit] assert len(origins) <= limit return PagedResult(results=origins, next_page_token=next_page_token) @timed @db_transaction() def origin_search( self, url_pattern: str, page_token: Optional[str] = None, limit: int = 50, regexp: bool = False, with_visit: bool = False, db=None, cur=None, ) -> PagedResult[Origin]: next_page_token = None offset = int(page_token) if page_token else 0 origins = [] # Take one more origin so we can reuse it as the next page token if any for origin in db.origin_search( url_pattern, offset, limit + 1, regexp, with_visit, cur ): row_d = dict(zip(db.origin_cols, origin)) origins.append(Origin(url=row_d["url"])) if len(origins) > limit: # next offset next_page_token = str(offset + limit) # excluding that origin from the result to respect the limit size origins = origins[:limit] assert len(origins) <= limit return PagedResult(results=origins, next_page_token=next_page_token) @timed @db_transaction() def origin_count( self, url_pattern: str, regexp: bool = False, with_visit: bool = False, db=None, cur=None, ) -> int: return db.origin_count(url_pattern, regexp, with_visit, cur) @timed @process_metrics @db_transaction() def origin_add(self, origins: List[Origin], db=None, cur=None) -> Dict[str, int]: urls = [o.url for o in origins] known_origins = set(url for (url,) in db.origin_get_by_url(urls, cur)) # keep only one occurrence of each given origin while keeping the list # sorted as originally given to_add = sorted(set(urls) - known_origins, key=urls.index) self.journal_writer.origin_add([Origin(url=url) for url in to_add]) added = 0 for url in to_add: if db.origin_add(url, cur): added += 1 return {"origin:add": added} @db_transaction(statement_timeout=500) def stat_counters(self, db=None, cur=None): return {k: v for (k, v) in db.stat_counters()} @db_transaction() def refresh_stat_counters(self, db=None, cur=None): keys = [ "content", "directory", "directory_entry_dir", "directory_entry_file", "directory_entry_rev", "origin", "origin_visit", "person", "release", "revision", "revision_history", "skipped_content", "snapshot", ] for key in keys: cur.execute("select * from swh_update_counter(%s)", (key,)) @db_transaction() def raw_extrinsic_metadata_add( self, metadata: List[RawExtrinsicMetadata], db, cur, ) -> None: metadata = list(metadata) self.journal_writer.raw_extrinsic_metadata_add(metadata) counter = Counter[MetadataTargetType]() for metadata_entry in metadata: authority_id = self._get_authority_id(metadata_entry.authority, db, cur) fetcher_id = self._get_fetcher_id(metadata_entry.fetcher, db, cur) db.raw_extrinsic_metadata_add( type=metadata_entry.type.value, - id=str(metadata_entry.id), + target=str(metadata_entry.target), discovery_date=metadata_entry.discovery_date, authority_id=authority_id, fetcher_id=fetcher_id, format=metadata_entry.format, metadata=metadata_entry.metadata, origin=metadata_entry.origin, visit=metadata_entry.visit, snapshot=map_optional(str, metadata_entry.snapshot), release=map_optional(str, metadata_entry.release), revision=map_optional(str, metadata_entry.revision), path=metadata_entry.path, directory=map_optional(str, metadata_entry.directory), cur=cur, ) counter[metadata_entry.type] += 1 for (type, count) in counter.items(): send_metric( f"{type.value}_metadata:add", count=count, method_name=f"{type.value}_metadata_add", ) @db_transaction() def raw_extrinsic_metadata_get( self, type: MetadataTargetType, - id: Union[str, SWHID], + target: Union[str, SWHID], authority: MetadataAuthority, after: Optional[datetime.datetime] = None, page_token: Optional[bytes] = None, limit: int = 1000, db=None, cur=None, ) -> PagedResult[RawExtrinsicMetadata]: if type == MetadataTargetType.ORIGIN: - if isinstance(id, SWHID): + if isinstance(target, SWHID): raise StorageArgumentException( f"raw_extrinsic_metadata_get called with type='origin', " - f"but provided id is an SWHID: {id!r}" + f"but provided target is a SWHID: {target!r}" ) else: - if not isinstance(id, SWHID): + if not isinstance(target, SWHID): raise StorageArgumentException( f"raw_extrinsic_metadata_get called with type!='origin', " - f"but provided id is not an SWHID: {id!r}" + f"but provided target is not a SWHID: {target!r}" ) if page_token: (after_time, after_fetcher) = msgpack_loads(base64.b64decode(page_token)) if after and after_time < after: raise StorageArgumentException( "page_token is inconsistent with the value of 'after'." ) else: after_time = after after_fetcher = None authority_id = self._get_authority_id(authority, db, cur) if not authority_id: return PagedResult(next_page_token=None, results=[],) rows = db.raw_extrinsic_metadata_get( - type, str(id), authority_id, after_time, after_fetcher, limit + 1, cur, + type, str(target), authority_id, after_time, after_fetcher, limit + 1, cur, ) rows = [dict(zip(db.raw_extrinsic_metadata_get_cols, row)) for row in rows] results = [] for row in rows: - assert str(id) == row["raw_extrinsic_metadata.id"] + assert str(target) == row["raw_extrinsic_metadata.target"] results.append(converters.db_to_raw_extrinsic_metadata(row)) if len(results) > limit: results.pop() assert len(results) == limit last_returned_row = rows[-2] # rows[-1] corresponds to the popped result next_page_token: Optional[str] = base64.b64encode( msgpack_dumps( ( last_returned_row["discovery_date"], last_returned_row["metadata_fetcher.id"], ) ) ).decode() else: next_page_token = None return PagedResult(next_page_token=next_page_token, results=results,) @timed @db_transaction() def metadata_fetcher_add( self, fetchers: List[MetadataFetcher], db=None, cur=None ) -> None: fetchers = list(fetchers) self.journal_writer.metadata_fetcher_add(fetchers) count = 0 for fetcher in fetchers: if fetcher.metadata is None: raise StorageArgumentException( "MetadataFetcher.metadata may not be None in metadata_fetcher_add." ) db.metadata_fetcher_add( fetcher.name, fetcher.version, dict(fetcher.metadata), cur=cur ) count += 1 send_metric("metadata_fetcher:add", count=count, method_name="metadata_fetcher") @timed @db_transaction(statement_timeout=500) def metadata_fetcher_get( self, name: str, version: str, db=None, cur=None ) -> Optional[MetadataFetcher]: row = db.metadata_fetcher_get(name, version, cur=cur) if not row: return None return MetadataFetcher.from_dict(dict(zip(db.metadata_fetcher_cols, row))) @timed @db_transaction() def metadata_authority_add( self, authorities: List[MetadataAuthority], db=None, cur=None ) -> None: authorities = list(authorities) self.journal_writer.metadata_authority_add(authorities) count = 0 for authority in authorities: if authority.metadata is None: raise StorageArgumentException( "MetadataAuthority.metadata may not be None in " "metadata_authority_add." ) db.metadata_authority_add( authority.type.value, authority.url, dict(authority.metadata), cur=cur ) count += 1 send_metric( "metadata_authority:add", count=count, method_name="metadata_authority" ) @timed @db_transaction() def metadata_authority_get( self, type: MetadataAuthorityType, url: str, db=None, cur=None ) -> Optional[MetadataAuthority]: row = db.metadata_authority_get(type.value, url, cur=cur) if not row: return None return MetadataAuthority.from_dict(dict(zip(db.metadata_authority_cols, row))) def clear_buffers(self, object_types: Sequence[str] = ()) -> None: """Do nothing """ return None def flush(self, object_types: Sequence[str] = ()) -> Dict[str, int]: return {} def _get_authority_id(self, authority: MetadataAuthority, db, cur): authority_id = db.metadata_authority_get_id( authority.type.value, authority.url, cur ) if not authority_id: raise StorageArgumentException(f"Unknown authority {authority}") return authority_id def _get_fetcher_id(self, fetcher: MetadataFetcher, db, cur): fetcher_id = db.metadata_fetcher_get_id(fetcher.name, fetcher.version, cur) if not fetcher_id: raise StorageArgumentException(f"Unknown fetcher {fetcher}") return fetcher_id diff --git a/swh/storage/pytest_plugin.py b/swh/storage/pytest_plugin.py index 01565700..018b761d 100644 --- a/swh/storage/pytest_plugin.py +++ b/swh/storage/pytest_plugin.py @@ -1,200 +1,54 @@ # Copyright (C) 2019-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information -import glob from os import environ, path -import subprocess -from typing import Union import pytest -from pytest_postgresql import factories -from pytest_postgresql.janitor import DatabaseJanitor, Version, psycopg2 -from swh.core.utils import numfile_sortkey as sortkey +from swh.core.db.pytest_plugin import postgresql_fact import swh.storage from swh.storage import get_storage from swh.storage.tests.storage_data import StorageData SQL_DIR = path.join(path.dirname(swh.storage.__file__), "sql") environ["LC_ALL"] = "C.UTF-8" -DUMP_FILES = path.join(SQL_DIR, "*.sql") - - -# the postgres_fact factory fixture below is mostly a copy of the code -# from pytest-postgresql. We need a custom version here to be able to -# specify our version of the DBJanitor we use. -def postgresql_fact(process_fixture_name, db_name=None, dump_files=DUMP_FILES): - @pytest.fixture - def postgresql_factory(request): - """ - Fixture factory for PostgreSQL. - - :param FixtureRequest request: fixture request object - :rtype: psycopg2.connection - :returns: postgresql client - """ - config = factories.get_config(request) - if not psycopg2: - raise ImportError("No module named psycopg2. Please install it.") - proc_fixture = request.getfixturevalue(process_fixture_name) - - # _, config = try_import('psycopg2', request) - pg_host = proc_fixture.host - pg_port = proc_fixture.port - pg_user = proc_fixture.user - pg_options = proc_fixture.options - pg_db = db_name or config["dbname"] - with SwhDatabaseJanitor( - pg_user, - pg_host, - pg_port, - pg_db, - proc_fixture.version, - dump_files=dump_files, - ): - connection = psycopg2.connect( - dbname=pg_db, - user=pg_user, - host=pg_host, - port=pg_port, - options=pg_options, - ) - yield connection - connection.close() - - return postgresql_factory - - -swh_storage_postgresql = postgresql_fact("postgresql_proc", db_name="storage") + +swh_storage_postgresql = postgresql_fact( + "postgresql_proc", db_name="storage", dump_files=path.join(SQL_DIR, "*.sql") +) @pytest.fixture def swh_storage_backend_config(swh_storage_postgresql): """Basic pg storage configuration with no journal collaborator (to avoid pulling optional dependency on clients of this fixture) """ yield { "cls": "local", "db": swh_storage_postgresql.dsn, "objstorage": {"cls": "memory"}, "check_config": {"check_write": True}, } @pytest.fixture def swh_storage(swh_storage_backend_config): return get_storage(**swh_storage_backend_config) -# This version of the DatabaseJanitor implement a different setup/teardown -# behavior than than the stock one: instead of dropping, creating and -# initializing the database for each test, it create and initialize the db only -# once, then it truncate the tables. This is needed to have acceptable test -# performances. -class SwhDatabaseJanitor(DatabaseJanitor): - def __init__( - self, - user: str, - host: str, - port: str, - db_name: str, - version: Union[str, float, Version], - dump_files: str = DUMP_FILES, - ) -> None: - super().__init__(user, host, port, db_name, version) - self.dump_files = sorted(glob.glob(dump_files), key=sortkey) - - def db_setup(self): - conninfo = ( - f"host={self.host} user={self.user} port={self.port} dbname={self.db_name}" - ) - - for fname in self.dump_files: - subprocess.check_call( - [ - "psql", - "--quiet", - "--no-psqlrc", - "-v", - "ON_ERROR_STOP=1", - "-d", - conninfo, - "-f", - fname, - ] - ) - - def db_reset(self): - with psycopg2.connect( - dbname=self.db_name, user=self.user, host=self.host, port=self.port, - ) as cnx: - with cnx.cursor() as cur: - cur.execute( - "SELECT table_name FROM information_schema.tables " - "WHERE table_schema = %s", - ("public",), - ) - tables = set(table for (table,) in cur.fetchall()) - {"dbversion"} - for table in tables: - cur.execute("truncate table %s cascade" % table) - - cur.execute( - "SELECT sequence_name FROM information_schema.sequences " - "WHERE sequence_schema = %s", - ("public",), - ) - seqs = set(seq for (seq,) in cur.fetchall()) - for seq in seqs: - cur.execute("ALTER SEQUENCE %s RESTART;" % seq) - cnx.commit() - - def init(self): - with self.cursor() as cur: - cur.execute( - "SELECT COUNT(1) FROM pg_database WHERE datname=%s;", (self.db_name,) - ) - db_exists = cur.fetchone()[0] == 1 - if db_exists: - cur.execute( - "UPDATE pg_database SET datallowconn=true " "WHERE datname = %s;", - (self.db_name,), - ) - - if db_exists: - self.db_reset() - else: - with self.cursor() as cur: - cur.execute('CREATE DATABASE "{}";'.format(self.db_name)) - self.db_setup() - - def drop(self): - pid_column = "pid" - with self.cursor() as cur: - cur.execute( - "UPDATE pg_database SET datallowconn=false " "WHERE datname = %s;", - (self.db_name,), - ) - cur.execute( - "SELECT pg_terminate_backend(pg_stat_activity.{})" - "FROM pg_stat_activity " - "WHERE pg_stat_activity.datname = %s;".format(pid_column), - (self.db_name,), - ) - - @pytest.fixture def sample_data() -> StorageData: """Pre-defined sample storage object data to manipulate Returns: StorageData whose attribute keys are data model objects. Either multiple objects: contents, directories, revisions, releases, ... or simple ones: content, directory, revision, release, ... """ return StorageData() diff --git a/swh/storage/sql/30-schema.sql b/swh/storage/sql/30-schema.sql index 3e027018..e0a705fd 100644 --- a/swh/storage/sql/30-schema.sql +++ b/swh/storage/sql/30-schema.sql @@ -1,499 +1,499 @@ --- --- SQL implementation of the Software Heritage data model --- -- schema versions create table dbversion ( version int primary key, release timestamptz, description text ); comment on table dbversion is 'Details of current db version'; comment on column dbversion.version is 'SQL schema version'; comment on column dbversion.release is 'Version deployment timestamp'; comment on column dbversion.description is 'Release description'; -- latest schema version insert into dbversion(version, release, description) - values(163, now(), 'Work In Progress'); + values(164, now(), 'Work In Progress'); -- a SHA1 checksum create domain sha1 as bytea check (length(value) = 20); -- a Git object ID, i.e., a Git-style salted SHA1 checksum create domain sha1_git as bytea check (length(value) = 20); -- a SHA256 checksum create domain sha256 as bytea check (length(value) = 32); -- a blake2 checksum create domain blake2s256 as bytea check (length(value) = 32); -- UNIX path (absolute, relative, individual path component, etc.) create domain unix_path as bytea; -- a set of UNIX-like access permissions, as manipulated by, e.g., chmod create domain file_perms as int; -- an SWHID create domain swhid as text check (value ~ '^swh:[0-9]+:.*'); -- Checksums about actual file content. Note that the content itself is not -- stored in the DB, but on external (key-value) storage. A single checksum is -- used as key there, but the other can be used to verify that we do not inject -- content collisions not knowingly. create table content ( sha1 sha1 not null, sha1_git sha1_git not null, sha256 sha256 not null, blake2s256 blake2s256 not null, length bigint not null, ctime timestamptz not null default now(), -- creation time, i.e. time of (first) injection into the storage status content_status not null default 'visible', object_id bigserial ); comment on table content is 'Checksums of file content which is actually stored externally'; comment on column content.sha1 is 'Content sha1 hash'; comment on column content.sha1_git is 'Git object sha1 hash'; comment on column content.sha256 is 'Content Sha256 hash'; comment on column content.blake2s256 is 'Content blake2s hash'; comment on column content.length is 'Content length'; comment on column content.ctime is 'First seen time'; comment on column content.status is 'Content status (absent, visible, hidden)'; comment on column content.object_id is 'Content identifier'; -- An origin is a place, identified by an URL, where software source code -- artifacts can be found. We support different kinds of origins, e.g., git and -- other VCS repositories, web pages that list tarballs URLs (e.g., -- http://www.kernel.org), indirect tarball URLs (e.g., -- http://www.example.org/latest.tar.gz), etc. The key feature of an origin is -- that it can be *fetched* from (wget, git clone, svn checkout, etc.) to -- retrieve all the contained software. create table origin ( id bigserial not null, url text not null ); comment on column origin.id is 'Artifact origin id'; comment on column origin.url is 'URL of origin'; -- Content blobs observed somewhere, but not ingested into the archive for -- whatever reason. This table is separate from the content table as we might -- not have the sha1 checksum of skipped contents (for instance when we inject -- git repositories, objects that are too big will be skipped here, and we will -- only know their sha1_git). 'reason' contains the reason the content was -- skipped. origin is a nullable column allowing to find out which origin -- contains that skipped content. create table skipped_content ( sha1 sha1, sha1_git sha1_git, sha256 sha256, blake2s256 blake2s256, length bigint not null, ctime timestamptz not null default now(), status content_status not null default 'absent', reason text not null, origin bigint, object_id bigserial ); comment on table skipped_content is 'Content blobs observed, but not ingested in the archive'; comment on column skipped_content.sha1 is 'Skipped content sha1 hash'; comment on column skipped_content.sha1_git is 'Git object sha1 hash'; comment on column skipped_content.sha256 is 'Skipped content sha256 hash'; comment on column skipped_content.blake2s256 is 'Skipped content blake2s hash'; comment on column skipped_content.length is 'Skipped content length'; comment on column skipped_content.ctime is 'First seen time'; comment on column skipped_content.status is 'Skipped content status (absent, visible, hidden)'; comment on column skipped_content.reason is 'Reason for skipping'; comment on column skipped_content.origin is 'Origin table identifier'; comment on column skipped_content.object_id is 'Skipped content identifier'; -- A file-system directory. A directory is a list of directory entries (see -- tables: directory_entry_{dir,file}). -- -- To list the contents of a directory: -- 1. list the contained directory_entry_dir using array dir_entries -- 2. list the contained directory_entry_file using array file_entries -- 3. list the contained directory_entry_rev using array rev_entries -- 4. UNION -- -- Synonyms/mappings: -- * git: tree create table directory ( id sha1_git not null, dir_entries bigint[], -- sub-directories, reference directory_entry_dir file_entries bigint[], -- contained files, reference directory_entry_file rev_entries bigint[], -- mounted revisions, reference directory_entry_rev object_id bigserial -- short object identifier ); comment on table directory is 'Contents of a directory, synonymous to tree (git)'; comment on column directory.id is 'Git object sha1 hash'; comment on column directory.dir_entries is 'Sub-directories, reference directory_entry_dir'; comment on column directory.file_entries is 'Contained files, reference directory_entry_file'; comment on column directory.rev_entries is 'Mounted revisions, reference directory_entry_rev'; comment on column directory.object_id is 'Short object identifier'; -- A directory entry pointing to a (sub-)directory. create table directory_entry_dir ( id bigserial, target sha1_git not null, -- id of target directory name unix_path not null, -- path name, relative to containing dir perms file_perms not null -- unix-like permissions ); comment on table directory_entry_dir is 'Directory entry for directory'; comment on column directory_entry_dir.id is 'Directory identifier'; comment on column directory_entry_dir.target is 'Target directory identifier'; comment on column directory_entry_dir.name is 'Path name, relative to containing directory'; comment on column directory_entry_dir.perms is 'Unix-like permissions'; -- A directory entry pointing to a file content. create table directory_entry_file ( id bigserial, target sha1_git not null, -- id of target file name unix_path not null, -- path name, relative to containing dir perms file_perms not null -- unix-like permissions ); comment on table directory_entry_file is 'Directory entry for file'; comment on column directory_entry_file.id is 'File identifier'; comment on column directory_entry_file.target is 'Target file identifier'; comment on column directory_entry_file.name is 'Path name, relative to containing directory'; comment on column directory_entry_file.perms is 'Unix-like permissions'; -- A directory entry pointing to a revision. create table directory_entry_rev ( id bigserial, target sha1_git not null, -- id of target revision name unix_path not null, -- path name, relative to containing dir perms file_perms not null -- unix-like permissions ); comment on table directory_entry_rev is 'Directory entry for revision'; comment on column directory_entry_dir.id is 'Revision identifier'; comment on column directory_entry_dir.target is 'Target revision in identifier'; comment on column directory_entry_dir.name is 'Path name, relative to containing directory'; comment on column directory_entry_dir.perms is 'Unix-like permissions'; -- A person referenced by some source code artifacts, e.g., a VCS revision or -- release metadata. create table person ( id bigserial, name bytea, -- advisory: not null if we managed to parse a name email bytea, -- advisory: not null if we managed to parse an email fullname bytea not null -- freeform specification; what is actually used in the checksums -- will usually be of the form 'name ' ); comment on table person is 'Person referenced in code artifact release metadata'; comment on column person.id is 'Person identifier'; comment on column person.name is 'Name'; comment on column person.email is 'Email'; comment on column person.fullname is 'Full name (raw name)'; -- The state of a source code tree at a specific point in time. -- -- Synonyms/mappings: -- * git / subversion / etc: commit -- * tarball: a specific tarball -- -- Revisions are organized as DAGs. Each revision points to 0, 1, or more (in -- case of merges) parent revisions. Each revision points to a directory, i.e., -- a file-system tree containing files and directories. create table revision ( id sha1_git not null, date timestamptz, date_offset smallint, committer_date timestamptz, committer_date_offset smallint, type revision_type not null, directory sha1_git, -- source code 'root' directory message bytea, author bigint, committer bigint, synthetic boolean not null default false, -- true iff revision has been created by Software Heritage metadata jsonb, -- extra metadata (tarball checksums, extra commit information, etc...) object_id bigserial, date_neg_utc_offset boolean, committer_date_neg_utc_offset boolean, extra_headers bytea[][] not null -- extra headers (used in hash computation) ); comment on table revision is 'A revision represents the state of a source code tree at a specific point in time'; comment on column revision.id is 'Git-style SHA1 commit identifier'; comment on column revision.date is 'Author timestamp as UNIX epoch'; comment on column revision.date_offset is 'Author timestamp timezone, as minute offsets from UTC'; comment on column revision.date_neg_utc_offset is 'True indicates a -0 UTC offset on author timestamp'; comment on column revision.committer_date is 'Committer timestamp as UNIX epoch'; comment on column revision.committer_date_offset is 'Committer timestamp timezone, as minute offsets from UTC'; comment on column revision.committer_date_neg_utc_offset is 'True indicates a -0 UTC offset on committer timestamp'; comment on column revision.type is 'Type of revision'; comment on column revision.directory is 'Directory identifier'; comment on column revision.message is 'Commit message'; comment on column revision.author is 'Author identity'; comment on column revision.committer is 'Committer identity'; comment on column revision.synthetic is 'True iff revision has been synthesized by Software Heritage'; comment on column revision.metadata is 'Extra revision metadata'; comment on column revision.object_id is 'Non-intrinsic, sequential object identifier'; comment on column revision.extra_headers is 'Extra revision headers; used in revision hash computation'; -- either this table or the sha1_git[] column on the revision table create table revision_history ( id sha1_git not null, parent_id sha1_git not null, parent_rank int not null default 0 -- parent position in merge commits, 0-based ); comment on table revision_history is 'Sequence of revision history with parent and position in history'; comment on column revision_history.id is 'Revision history git object sha1 checksum'; comment on column revision_history.parent_id is 'Parent revision git object identifier'; comment on column revision_history.parent_rank is 'Parent position in merge commits, 0-based'; -- Crawling history of software origins visited by Software Heritage. Each -- visit is a 3-way mapping between a software origin, a timestamp, and a -- snapshot object capturing the full-state of the origin at visit time. create table origin_visit ( origin bigint not null, visit bigint not null, date timestamptz not null, type text not null ); comment on column origin_visit.origin is 'Visited origin'; comment on column origin_visit.visit is 'Sequential visit number for the origin'; comment on column origin_visit.date is 'Visit timestamp'; comment on column origin_visit.type is 'Type of loader that did the visit (hg, git, ...)'; -- Crawling history of software origin visits by Software Heritage. Each -- visit see its history change through new origin visit status updates create table origin_visit_status ( origin bigint not null, visit bigint not null, date timestamptz not null, status origin_visit_state not null, metadata jsonb, snapshot sha1_git ); comment on column origin_visit_status.origin is 'Origin concerned by the visit update'; comment on column origin_visit_status.visit is 'Visit concerned by the visit update'; comment on column origin_visit_status.date is 'Visit update timestamp'; comment on column origin_visit_status.status is 'Visit status (ongoing, failed, full)'; comment on column origin_visit_status.metadata is 'Optional origin visit metadata'; comment on column origin_visit_status.snapshot is 'Optional, possibly partial, snapshot of the origin visit. It can be partial.'; -- A snapshot represents the entire state of a software origin as crawled by -- Software Heritage. This table is a simple mapping between (public) intrinsic -- snapshot identifiers and (private) numeric sequential identifiers. create table snapshot ( object_id bigserial not null, -- PK internal object identifier id sha1_git not null -- snapshot intrinsic identifier ); comment on table snapshot is 'State of a software origin as crawled by Software Heritage'; comment on column snapshot.object_id is 'Internal object identifier'; comment on column snapshot.id is 'Intrinsic snapshot identifier'; -- Each snapshot associate "branch" names to other objects in the Software -- Heritage Merkle DAG. This table describes branches as mappings between names -- and target typed objects. create table snapshot_branch ( object_id bigserial not null, -- PK internal object identifier name bytea not null, -- branch name, e.g., "master" or "feature/drag-n-drop" target bytea, -- target object identifier, e.g., a revision identifier target_type snapshot_target -- target object type, e.g., "revision" ); comment on table snapshot_branch is 'Associates branches with objects in Heritage Merkle DAG'; comment on column snapshot_branch.object_id is 'Internal object identifier'; comment on column snapshot_branch.name is 'Branch name'; comment on column snapshot_branch.target is 'Target object identifier'; comment on column snapshot_branch.target_type is 'Target object type'; -- Mapping between snapshots and their branches. create table snapshot_branches ( snapshot_id bigint not null, -- snapshot identifier, ref. snapshot.object_id branch_id bigint not null -- branch identifier, ref. snapshot_branch.object_id ); comment on table snapshot_branches is 'Mapping between snapshot and their branches'; comment on column snapshot_branches.snapshot_id is 'Snapshot identifier'; comment on column snapshot_branches.branch_id is 'Branch identifier'; -- A "memorable" point in time in the development history of a software -- project. -- -- Synonyms/mappings: -- * git: tag (of the annotated kind, otherwise they are just references) -- * tarball: the release version number create table release ( id sha1_git not null, target sha1_git, date timestamptz, date_offset smallint, name bytea, comment bytea, author bigint, synthetic boolean not null default false, -- true iff release has been created by Software Heritage object_id bigserial, target_type object_type not null, date_neg_utc_offset boolean ); comment on table release is 'Details of a software release, synonymous with a tag (git) or version number (tarball)'; comment on column release.id is 'Release git identifier'; comment on column release.target is 'Target git identifier'; comment on column release.date is 'Release timestamp'; comment on column release.date_offset is 'Timestamp offset from UTC'; comment on column release.name is 'Name'; comment on column release.comment is 'Comment'; comment on column release.author is 'Author'; comment on column release.synthetic is 'Indicates if created by Software Heritage'; comment on column release.object_id is 'Object identifier'; comment on column release.target_type is 'Object type (''content'', ''directory'', ''revision'', ''release'', ''snapshot'')'; comment on column release.date_neg_utc_offset is 'True indicates -0 UTC offset for release timestamp'; -- Tools create table metadata_fetcher ( id serial not null, name text not null, version text not null, metadata jsonb not null ); comment on table metadata_fetcher is 'Tools used to retrieve metadata'; comment on column metadata_fetcher.id is 'Internal identifier of the fetcher'; comment on column metadata_fetcher.name is 'Fetcher name'; comment on column metadata_fetcher.version is 'Fetcher version'; comment on column metadata_fetcher.metadata is 'Extra information about the fetcher'; create table metadata_authority ( id serial not null, type text not null, url text not null, metadata jsonb not null ); comment on table metadata_authority is 'Metadata authority information'; comment on column metadata_authority.id is 'Internal identifier of the authority'; comment on column metadata_authority.type is 'Type of authority (deposit_client/forge/registry)'; comment on column metadata_authority.url is 'Authority''s uri'; comment on column metadata_authority.metadata is 'Other metadata about authority'; -- Extrinsic metadata on a DAG objects and origins. create table raw_extrinsic_metadata ( type text not null, - id text not null, + target text not null, -- metadata source authority_id bigint not null, fetcher_id bigint not null, discovery_date timestamptz not null, -- metadata itself format text not null, metadata bytea not null, -- context origin text, visit bigint, snapshot swhid, release swhid, revision swhid, path bytea, directory swhid ); comment on table raw_extrinsic_metadata is 'keeps all metadata found concerning an object'; comment on column raw_extrinsic_metadata.type is 'the type of object (content/directory/revision/release/snapshot/origin) the metadata is on'; -comment on column raw_extrinsic_metadata.id is 'the SWHID or origin URL for which the metadata was found'; +comment on column raw_extrinsic_metadata.target is 'the SWHID or origin URL for which the metadata was found'; comment on column raw_extrinsic_metadata.discovery_date is 'the date of retrieval'; comment on column raw_extrinsic_metadata.authority_id is 'the metadata provider: github, openhub, deposit, etc.'; comment on column raw_extrinsic_metadata.fetcher_id is 'the tool used for extracting metadata: loaders, crawlers, etc.'; comment on column raw_extrinsic_metadata.format is 'name of the format of metadata, used by readers to interpret it.'; comment on column raw_extrinsic_metadata.metadata is 'original metadata in opaque format'; -- Keep a cache of object counts create table object_counts ( object_type text, -- table for which we're counting objects (PK) value bigint, -- count of objects in the table last_update timestamptz, -- last update for the object count in this table single_update boolean -- whether we update this table standalone (true) or through bucketed counts (false) ); comment on table object_counts is 'Cache of object counts'; comment on column object_counts.object_type is 'Object type (''content'', ''directory'', ''revision'', ''release'', ''snapshot'')'; comment on column object_counts.value is 'Count of objects in the table'; comment on column object_counts.last_update is 'Last update for object count'; comment on column object_counts.single_update is 'standalone (true) or bucketed counts (false)'; create table object_counts_bucketed ( line serial not null, -- PK object_type text not null, -- table for which we're counting objects identifier text not null, -- identifier across which we're bucketing objects bucket_start bytea, -- lower bound (inclusive) for the bucket bucket_end bytea, -- upper bound (exclusive) for the bucket value bigint, -- count of objects in the bucket last_update timestamptz -- last update for the object count in this bucket ); comment on table object_counts_bucketed is 'Bucketed count for objects ordered by type'; comment on column object_counts_bucketed.line is 'Auto incremented idenitfier value'; comment on column object_counts_bucketed.object_type is 'Object type (''content'', ''directory'', ''revision'', ''release'', ''snapshot'')'; comment on column object_counts_bucketed.identifier is 'Common identifier for bucketed objects'; comment on column object_counts_bucketed.bucket_start is 'Lower bound (inclusive) for the bucket'; comment on column object_counts_bucketed.bucket_end is 'Upper bound (exclusive) for the bucket'; comment on column object_counts_bucketed.value is 'Count of objects in the bucket'; comment on column object_counts_bucketed.last_update is 'Last update for the object count in this bucket'; diff --git a/swh/storage/sql/60-indexes.sql b/swh/storage/sql/60-indexes.sql index 75a1697a..370fb0fe 100644 --- a/swh/storage/sql/60-indexes.sql +++ b/swh/storage/sql/60-indexes.sql @@ -1,283 +1,283 @@ -- psql variables to get the current database flavor select swh_get_dbflavor() = 'read_replica' as dbflavor_read_replica \gset select swh_get_dbflavor() != 'read_replica' as dbflavor_does_deduplication \gset select swh_get_dbflavor() = 'mirror' as dbflavor_mirror \gset select swh_get_dbflavor() = 'default' as dbflavor_default \gset -- content create unique index concurrently content_pkey on content(sha1); alter table content add primary key using index content_pkey; \if :dbflavor_does_deduplication create unique index concurrently on content(sha1_git); \else create index concurrently on content(sha1_git); \endif create index concurrently on content(sha256); create index concurrently on content(blake2s256); \if :dbflavor_default create unique index concurrently on content(object_id); -- to be reviewed create index concurrently on content(ctime); -- to be reviewed \endif -- origin create unique index concurrently origin_pkey on origin(id); alter table origin add primary key using index origin_pkey; \if :dbflavor_does_deduplication create unique index concurrently on origin using btree(url); \else create index concurrently on origin using btree(url); \endif create index concurrently on origin using gin (url gin_trgm_ops); create index concurrently on origin using btree(digest(url, 'sha1')); -- skipped_content \if :dbflavor_does_deduplication alter table skipped_content add constraint skipped_content_sha1_sha1_git_sha256_key unique (sha1, sha1_git, sha256); \endif create index concurrently on skipped_content(sha1); create index concurrently on skipped_content(sha1_git); create index concurrently on skipped_content(sha256); create index concurrently on skipped_content(blake2s256); create unique index concurrently on skipped_content(object_id); \if :dbflavor_default alter table skipped_content add constraint skipped_content_origin_fkey foreign key (origin) references origin(id) not valid; alter table skipped_content validate constraint skipped_content_origin_fkey; \endif -- directory create unique index concurrently directory_pkey on directory(id); alter table directory add primary key using index directory_pkey; \if :dbflavor_default create index concurrently on directory using gin (dir_entries); -- to be reviewed create index concurrently on directory using gin (file_entries); -- to be reviewed create index concurrently on directory using gin (rev_entries); -- to be reviewed create unique index concurrently on directory(object_id); -- to be reviewed \endif -- directory_entry_dir create unique index concurrently directory_entry_dir_pkey on directory_entry_dir(id); alter table directory_entry_dir add primary key using index directory_entry_dir_pkey; \if :dbflavor_does_deduplication create unique index concurrently on directory_entry_dir(target, name, perms); \endif -- directory_entry_file create unique index concurrently directory_entry_file_pkey on directory_entry_file(id); alter table directory_entry_file add primary key using index directory_entry_file_pkey; \if :dbflavor_does_deduplication create unique index concurrently on directory_entry_file(target, name, perms); \endif -- directory_entry_rev create unique index concurrently directory_entry_rev_pkey on directory_entry_rev(id); alter table directory_entry_rev add primary key using index directory_entry_rev_pkey; \if :dbflavor_does_deduplication create unique index concurrently on directory_entry_rev(target, name, perms); \endif -- person create unique index concurrently person_pkey on person(id); alter table person add primary key using index person_pkey; \if :dbflavor_does_deduplication create unique index concurrently on person(fullname); \else create index concurrently on person(fullname); -- to be reviewed \endif \if :dbflavor_default create index concurrently on person(name); -- to be reviewed create index concurrently on person(email); -- to be reviewed \endif -- revision create unique index concurrently revision_pkey on revision(id); alter table revision add primary key using index revision_pkey; \if :dbflavor_does_deduplication alter table revision add constraint revision_author_fkey foreign key (author) references person(id) not valid; alter table revision validate constraint revision_author_fkey; alter table revision add constraint revision_committer_fkey foreign key (committer) references person(id) not valid; alter table revision validate constraint revision_committer_fkey; alter table revision add constraint revision_date_neg_utc_offset_not_null check (date is null or date_neg_utc_offset is not null) not valid; alter table revision add constraint revision_committer_date_neg_utc_offset_not_null check (committer_date is null or committer_date_neg_utc_offset is not null) not valid; alter table revision validate constraint revision_date_neg_utc_offset_not_null; alter table revision validate constraint revision_committer_date_neg_utc_offset_not_null; \endif \if :dbflavor_default create index concurrently on revision(directory); -- to be reviewed create unique index concurrently on revision(object_id); -- to be reviewed \endif -- revision_history create unique index concurrently revision_history_pkey on revision_history(id, parent_rank); alter table revision_history add primary key using index revision_history_pkey; \if :dbflavor_default create index concurrently on revision_history(parent_id); -- to be reviewed \endif \if :dbflavor_does_deduplication alter table revision_history add constraint revision_history_id_fkey foreign key (id) references revision(id) not valid; alter table revision_history validate constraint revision_history_id_fkey; \endif -- snapshot create unique index concurrently snapshot_pkey on snapshot(object_id); alter table snapshot add primary key using index snapshot_pkey; \if :dbflavor_does_deduplication create unique index concurrently on snapshot(id); \else create index concurrently on snapshot(id); \endif -- snapshot_branch create unique index concurrently snapshot_branch_pkey on snapshot_branch(object_id); alter table snapshot_branch add primary key using index snapshot_branch_pkey; \if :dbflavor_does_deduplication create unique index concurrently on snapshot_branch (target_type, target, name); alter table snapshot_branch add constraint snapshot_branch_target_check check ((target_type is null) = (target is null)) not valid; alter table snapshot_branch validate constraint snapshot_branch_target_check; alter table snapshot_branch add constraint snapshot_target_check check (target_type not in ('content', 'directory', 'revision', 'release', 'snapshot') or length(target) = 20) not valid; alter table snapshot_branch validate constraint snapshot_target_check; create unique index concurrently on snapshot_branch (name) where target_type is null and target is null; \endif -- snapshot_branches create unique index concurrently snapshot_branches_pkey on snapshot_branches(snapshot_id, branch_id); alter table snapshot_branches add primary key using index snapshot_branches_pkey; \if :dbflavor_does_deduplication alter table snapshot_branches add constraint snapshot_branches_snapshot_id_fkey foreign key (snapshot_id) references snapshot(object_id) not valid; alter table snapshot_branches validate constraint snapshot_branches_snapshot_id_fkey; alter table snapshot_branches add constraint snapshot_branches_branch_id_fkey foreign key (branch_id) references snapshot_branch(object_id) not valid; alter table snapshot_branches validate constraint snapshot_branches_branch_id_fkey; \endif -- origin_visit create unique index concurrently origin_visit_pkey on origin_visit(origin, visit); alter table origin_visit add primary key using index origin_visit_pkey; \if :dbflavor_default create index concurrently on origin_visit(date); -- to be reviewed create index concurrently origin_visit_type_date on origin_visit(type, date); -- to be reviewed \endif \if :dbflavor_does_deduplication alter table origin_visit add constraint origin_visit_origin_fkey foreign key (origin) references origin(id) not valid; alter table origin_visit validate constraint origin_visit_origin_fkey; \endif -- origin_visit_status create unique index concurrently origin_visit_status_pkey on origin_visit_status(origin, visit, date); alter table origin_visit_status add primary key using index origin_visit_status_pkey; \if :dbflavor_default alter table origin_visit_status add constraint origin_visit_status_origin_visit_fkey foreign key (origin, visit) references origin_visit(origin, visit) not valid; alter table origin_visit_status validate constraint origin_visit_status_origin_visit_fkey; \endif -- release create unique index concurrently release_pkey on release(id); alter table release add primary key using index release_pkey; \if :dbflavor_default create index concurrently on release(target, target_type); -- to be reviewed create unique index concurrently on release(object_id); -- to be reviewed \endif \if :dbflavor_does_deduplication alter table release add constraint release_author_fkey foreign key (author) references person(id) not valid; alter table release validate constraint release_author_fkey; alter table release add constraint release_date_neg_utc_offset_not_null check (date is null or date_neg_utc_offset is not null) not valid; alter table release validate constraint release_date_neg_utc_offset_not_null; -- if the author is null, then the date must be null alter table release add constraint release_author_date_check check ((date is null) or (author is not null)) not valid; alter table release validate constraint release_author_date_check; \endif -- metadata_fetcher create unique index metadata_fetcher_pkey on metadata_fetcher(id); alter table metadata_fetcher add primary key using index metadata_fetcher_pkey; \if :dbflavor_does_deduplication create unique index metadata_fetcher_name_version on metadata_fetcher(name, version); \else create index metadata_fetcher_name_version on metadata_fetcher(name, version); \endif -- metadata_authority create unique index concurrently metadata_authority_pkey on metadata_authority(id); alter table metadata_authority add primary key using index metadata_authority_pkey; \if :dbflavor_does_deduplication create unique index concurrently metadata_authority_type_url on metadata_authority(type, url); \else create index concurrently metadata_authority_type_url on metadata_authority(type, url); \endif -- raw_extrinsic_metadata -create unique index concurrently raw_extrinsic_metadata_content_authority_date_fetcher on raw_extrinsic_metadata(id, authority_id, discovery_date, fetcher_id); +create unique index concurrently raw_extrinsic_metadata_content_authority_date_fetcher on raw_extrinsic_metadata(target, authority_id, discovery_date, fetcher_id); \if :dbflavor_default alter table raw_extrinsic_metadata add constraint raw_extrinsic_metadata_authority_fkey foreign key (authority_id) references metadata_authority(id) not valid; alter table raw_extrinsic_metadata validate constraint raw_extrinsic_metadata_authority_fkey; alter table raw_extrinsic_metadata add constraint raw_extrinsic_metadata_fetcher_fkey foreign key (fetcher_id) references metadata_fetcher(id) not valid; alter table raw_extrinsic_metadata validate constraint raw_extrinsic_metadata_fetcher_fkey; \endif -- object_counts create unique index concurrently object_counts_pkey on object_counts(object_type); alter table object_counts add primary key using index object_counts_pkey; -- object_counts_bucketed create unique index concurrently object_counts_bucketed_pkey on object_counts_bucketed(line); alter table object_counts_bucketed add primary key using index object_counts_bucketed_pkey; diff --git a/swh/storage/tests/algos/test_snapshot.py b/swh/storage/tests/algos/test_snapshot.py index 2759e7f4..cdd3c1d7 100644 --- a/swh/storage/tests/algos/test_snapshot.py +++ b/swh/storage/tests/algos/test_snapshot.py @@ -1,267 +1,397 @@ # Copyright (C) 2018-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from hypothesis import given import pytest -from swh.model.collections import ImmutableDict from swh.model.hypothesis_strategies import branch_names, branch_targets, snapshots -from swh.model.model import OriginVisit, OriginVisitStatus, Snapshot +from swh.model.model import ( + OriginVisit, + OriginVisitStatus, + Snapshot, + SnapshotBranch, + TargetType, +) from swh.storage.algos.snapshot import ( snapshot_get_all_branches, snapshot_get_latest, snapshot_id_get_from_revision, + snapshot_resolve_alias, visits_and_snapshots_get_from_revision, ) from swh.storage.utils import now @pytest.fixture def swh_storage_backend_config(): yield { "cls": "memory", "journal_writer": None, } @given(snapshot=snapshots(min_size=0, max_size=10, only_objects=False)) def test_snapshot_small(swh_storage, snapshot): # noqa swh_storage.snapshot_add([snapshot]) returned_snapshot = snapshot_get_all_branches(swh_storage, snapshot.id) assert snapshot == returned_snapshot @given(branch_name=branch_names(), branch_target=branch_targets(only_objects=True)) def test_snapshot_large(swh_storage, branch_name, branch_target): # noqa snapshot = Snapshot( - branches=ImmutableDict( - (b"%s%05d" % (branch_name, i), branch_target) for i in range(10000) - ), + branches={b"%s%05d" % (branch_name, i): branch_target for i in range(10000)}, ) swh_storage.snapshot_add([snapshot]) returned_snapshot = snapshot_get_all_branches(swh_storage, snapshot.id) assert snapshot == returned_snapshot def test_snapshot_get_latest_none(swh_storage, sample_data): """Retrieve latest snapshot on unknown origin or origin without snapshot should yield no result """ # unknown origin so None assert snapshot_get_latest(swh_storage, "unknown-origin") is None # no snapshot on origin visit so None origin = sample_data.origin swh_storage.origin_add([origin]) origin_visit, origin_visit2 = sample_data.origin_visits[:2] assert origin_visit.origin == origin.url swh_storage.origin_visit_add([origin_visit]) assert snapshot_get_latest(swh_storage, origin.url) is None ov1 = swh_storage.origin_visit_get_latest(origin.url) assert ov1 is not None # visit references a snapshot but the snapshot does not exist in backend for some # reason complete_snapshot = sample_data.snapshots[2] swh_storage.origin_visit_status_add( [ OriginVisitStatus( origin=origin.url, visit=ov1.visit, date=origin_visit2.date, status="partial", snapshot=complete_snapshot.id, ) ] ) # so we do not find it assert snapshot_get_latest(swh_storage, origin.url) is None assert snapshot_get_latest(swh_storage, origin.url, branches_count=1) is None def test_snapshot_get_latest(swh_storage, sample_data): origin = sample_data.origin swh_storage.origin_add([origin]) visit1, visit2 = sample_data.origin_visits[:2] assert visit1.origin == origin.url swh_storage.origin_visit_add([visit1]) ov1 = swh_storage.origin_visit_get_latest(origin.url) # Add snapshot to visit1, latest snapshot = visit 1 snapshot complete_snapshot = sample_data.snapshots[2] swh_storage.snapshot_add([complete_snapshot]) swh_storage.origin_visit_status_add( [ OriginVisitStatus( origin=origin.url, visit=ov1.visit, date=visit2.date, status="partial", snapshot=None, ) ] ) assert visit1.date < visit2.date # no snapshot associated to the visit, so None actual_snapshot = snapshot_get_latest( swh_storage, origin.url, allowed_statuses=["partial"] ) assert actual_snapshot is None date_now = now() assert visit2.date < date_now swh_storage.origin_visit_status_add( [ OriginVisitStatus( origin=origin.url, visit=ov1.visit, date=date_now, status="full", snapshot=complete_snapshot.id, ) ] ) swh_storage.origin_visit_add( [OriginVisit(origin=origin.url, date=now(), type=visit1.type,)] ) actual_snapshot = snapshot_get_latest(swh_storage, origin.url) assert actual_snapshot is not None assert actual_snapshot == complete_snapshot actual_snapshot = snapshot_get_latest(swh_storage, origin.url, branches_count=1) assert actual_snapshot is not None assert actual_snapshot.id == complete_snapshot.id assert len(actual_snapshot.branches.values()) == 1 with pytest.raises(ValueError, match="branches_count must be a positive integer"): snapshot_get_latest(swh_storage, origin.url, branches_count="something-wrong") def test_snapshot_id_get_from_revision(swh_storage, sample_data): origin = sample_data.origin swh_storage.origin_add([origin]) date_visit2 = now() visit1, visit2 = sample_data.origin_visits[:2] assert visit1.origin == origin.url ov1, ov2 = swh_storage.origin_visit_add([visit1, visit2]) revision1, revision2, revision3 = sample_data.revisions[:3] swh_storage.revision_add([revision1, revision2]) empty_snapshot, complete_snapshot = sample_data.snapshots[1:3] swh_storage.snapshot_add([complete_snapshot]) # Add complete_snapshot to visit1 which targets revision1 ovs1, ovs2 = [ OriginVisitStatus( origin=origin.url, visit=ov1.visit, date=date_visit2, status="partial", snapshot=complete_snapshot.id, ), OriginVisitStatus( origin=origin.url, visit=ov2.visit, date=now(), status="full", snapshot=empty_snapshot.id, ), ] swh_storage.origin_visit_status_add([ovs1, ovs2]) assert ov1.date < ov2.date assert ov2.date < ovs1.date assert ovs1.date < ovs2.date # revision3 does not exist so result is None actual_snapshot_id = snapshot_id_get_from_revision( swh_storage, origin.url, revision3.id ) assert actual_snapshot_id is None # no snapshot targets revision2 for origin.url so result is None actual_snapshot_id = snapshot_id_get_from_revision( swh_storage, origin.url, revision2.id ) assert actual_snapshot_id is None # complete_snapshot targets at least revision1 actual_snapshot_id = snapshot_id_get_from_revision( swh_storage, origin.url, revision1.id ) assert actual_snapshot_id == complete_snapshot.id def test_visit_and_snapshot_get_from_revision(swh_storage, sample_data): origin = sample_data.origin swh_storage.origin_add([origin]) date_visit2 = now() visit1, visit2 = sample_data.origin_visits[:2] assert visit1.origin == origin.url ov1, ov2 = swh_storage.origin_visit_add([visit1, visit2]) revision1, revision2, revision3 = sample_data.revisions[:3] swh_storage.revision_add([revision1, revision2]) empty_snapshot, complete_snapshot = sample_data.snapshots[1:3] swh_storage.snapshot_add([complete_snapshot]) # Add complete_snapshot to visit1 which targets revision1 ovs1, ovs2 = [ OriginVisitStatus( origin=origin.url, visit=ov1.visit, date=date_visit2, status="partial", snapshot=complete_snapshot.id, ), OriginVisitStatus( origin=origin.url, visit=ov2.visit, date=now(), status="full", snapshot=empty_snapshot.id, ), ] swh_storage.origin_visit_status_add([ovs1, ovs2]) assert ov1.date < ov2.date assert ov2.date < ovs1.date assert ovs1.date < ovs2.date # revision3 does not exist so result is None actual_snapshot_id = snapshot_id_get_from_revision( swh_storage, origin.url, revision3.id ) assert actual_snapshot_id is None # no snapshot targets revision2 for origin.url so result is None res = list( visits_and_snapshots_get_from_revision(swh_storage, origin.url, revision2.id) ) assert res == [] # complete_snapshot targets at least revision1 res = list( visits_and_snapshots_get_from_revision(swh_storage, origin.url, revision1.id) ) assert res == [(ov1, ovs1, complete_snapshot)] + + +def test_snapshot_resolve_aliases_unknown_snapshot(swh_storage): + assert snapshot_resolve_alias(swh_storage, b"foo", b"HEAD") is None + + +def test_snapshot_resolve_aliases_no_aliases(swh_storage): + snapshot = Snapshot(branches={}) + swh_storage.snapshot_add([snapshot]) + + assert snapshot_resolve_alias(swh_storage, snapshot.id, b"HEAD") == ([], None) + + +def test_snapshot_resolve_alias(swh_storage, sample_data): + rev_branch_name = b"revision_branch" + rel_branch_name = b"release_branch" + rev_alias1_name = b"rev_alias1" + rev_alias2_name = b"rev_alias2" + rev_alias3_name = b"rev_alias3" + rel_alias_name = b"rel_alias" + rev_branch_info = SnapshotBranch( + target=sample_data.revisions[0].id, target_type=TargetType.REVISION, + ) + rel_branch_info = SnapshotBranch( + target=sample_data.releases[0].id, target_type=TargetType.RELEASE, + ) + rev_alias1_branch_info = SnapshotBranch( + target=rev_branch_name, target_type=TargetType.ALIAS + ) + rev_alias2_branch_info = SnapshotBranch( + target=rev_alias1_name, target_type=TargetType.ALIAS + ) + + rev_alias3_branch_info = SnapshotBranch( + target=rev_alias2_name, target_type=TargetType.ALIAS + ) + rel_alias_branch_info = SnapshotBranch( + target=rel_branch_name, target_type=TargetType.ALIAS + ) + + snapshot = Snapshot( + branches={ + rev_branch_name: rev_branch_info, + rel_branch_name: rel_branch_info, + rev_alias1_name: rev_alias1_branch_info, + rev_alias2_name: rev_alias2_branch_info, + rev_alias3_name: rev_alias3_branch_info, + rel_alias_name: rel_alias_branch_info, + } + ) + swh_storage.snapshot_add([snapshot]) + + for alias_name, expected_branches in ( + (rev_alias1_name, ([rev_alias1_branch_info], rev_branch_info)), + ( + rev_alias2_name, + ([rev_alias2_branch_info, rev_alias1_branch_info], rev_branch_info), + ), + ( + rev_alias3_name, + ( + [ + rev_alias3_branch_info, + rev_alias2_branch_info, + rev_alias1_branch_info, + ], + rev_branch_info, + ), + ), + (rel_alias_name, ([rel_alias_branch_info], rel_branch_info)), + ): + branches = snapshot_resolve_alias(swh_storage, snapshot.id, alias_name) + assert branches == expected_branches + + +def test_snapshot_resolve_alias_dangling_branch(swh_storage): + dangling_branch_name = b"dangling_branch" + alias_name = b"rev_alias" + + alias_branch_info = SnapshotBranch( + target=dangling_branch_name, target_type=TargetType.ALIAS + ) + + snapshot = Snapshot( + branches={dangling_branch_name: None, alias_name: alias_branch_info,} + ) + swh_storage.snapshot_add([snapshot]) + + branches = snapshot_resolve_alias(swh_storage, snapshot.id, alias_name) + assert branches == ([alias_branch_info], None) + + +def test_snapshot_resolve_alias_cycle_found(swh_storage): + alias1_name = b"alias_1" + alias2_name = b"alias_2" + alias3_name = b"alias_3" + alias4_name = b"alias_4" + + alias1_branch_info = SnapshotBranch( + target=alias2_name, target_type=TargetType.ALIAS + ) + alias2_branch_info = SnapshotBranch( + target=alias3_name, target_type=TargetType.ALIAS + ) + alias3_branch_info = SnapshotBranch( + target=alias4_name, target_type=TargetType.ALIAS + ) + alias4_branch_info = SnapshotBranch( + target=alias2_name, target_type=TargetType.ALIAS + ) + + snapshot = Snapshot( + branches={ + alias1_name: alias1_branch_info, + alias2_name: alias2_branch_info, + alias3_name: alias3_branch_info, + alias4_name: alias4_branch_info, + } + ) + swh_storage.snapshot_add([snapshot]) + + branches = snapshot_resolve_alias(swh_storage, snapshot.id, alias1_name) + assert branches == ( + [alias1_branch_info, alias2_branch_info, alias3_branch_info], + alias4_branch_info, + ) diff --git a/swh/storage/tests/migrate_extrinsic_metadata/test_cran.py b/swh/storage/tests/migrate_extrinsic_metadata/test_cran.py index a58a04dd..41772bc2 100644 --- a/swh/storage/tests/migrate_extrinsic_metadata/test_cran.py +++ b/swh/storage/tests/migrate_extrinsic_metadata/test_cran.py @@ -1,295 +1,304 @@ # Copyright (C) 2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information # flake8: noqa # because of long lines import copy import datetime import json from unittest.mock import Mock, call from swh.model.identifiers import parse_swhid from swh.model.model import ( MetadataAuthority, MetadataAuthorityType, MetadataFetcher, MetadataTargetType, Origin, RawExtrinsicMetadata, ) from swh.storage.migrate_extrinsic_metadata import cran_package_from_url, handle_row FETCHER = MetadataFetcher( name="migrate-extrinsic-metadata-from-revisions", version="0.0.1", ) SWH_AUTHORITY = MetadataAuthority( type=MetadataAuthorityType.REGISTRY, url="https://softwareheritage.org/", metadata={}, ) +DIRECTORY_ID = b"a" * 20 +DIRECTORY_SWHID = parse_swhid("swh:1:dir:" + DIRECTORY_ID.hex()) + def test_cran_package_from_url(): files = [ ("https://cran.r-project.org/src/contrib/shapeR_0.1-5.tar.gz", "shapeR"), ("https://cran.r-project.org/src/contrib/hot.deck_1.1.tar.gz", "hot.deck"), ] for (filename, project) in files: assert cran_package_from_url(filename) == project def test_cran(): source_original_artifacts = [ { "length": 170623, "filename": "ExtremeRisks_0.0.3.tar.gz", "checksums": { "sha1": "f2f19fc0f24b66b5ea9413366c632f3c229f7f3f", "sha256": "6f232556313019809dde3554149a1399bb1901a366b4965af49dc007d01945c9", }, } ] dest_original_artifacts = [ { "length": 170623, "filename": "ExtremeRisks_0.0.3.tar.gz", "checksums": { "sha1": "f2f19fc0f24b66b5ea9413366c632f3c229f7f3f", "sha256": "6f232556313019809dde3554149a1399bb1901a366b4965af49dc007d01945c9", }, "url": "https://cran.r-project.org/src/contrib/ExtremeRisks_0.0.3.tar.gz", } ] row = { "id": b"\x00\x03a\xaa3\x84,\xbd\xea_\xa6\xe7}\xb6\x96\xb97\xeb\xd2i", + "directory": DIRECTORY_ID, "date": datetime.datetime(2020, 5, 5, 0, 0, tzinfo=datetime.timezone.utc,), "committer_date": datetime.datetime( 2020, 5, 5, 0, 0, tzinfo=datetime.timezone.utc, ), "type": "tar", "message": b"0.0.3", "metadata": { "extrinsic": { "raw": { "url": "https://cran.r-project.org/src/contrib/ExtremeRisks_0.0.3.tar.gz", "version": "0.0.3", }, "when": "2020-05-07T15:27:38.652281+00:00", "provider": "https://cran.r-project.org/package=ExtremeRisks", }, "intrinsic": { "raw": { "URL": "mypage.unibocconi.it/simonepadoan/", "Date": "2020-05-05", "Title": "Extreme Risk Measures", "Author": "Simone Padoan [cre, aut],\n Gilles Stupfler [aut]", # ... "Date/Publication": "2020-05-07 10:20:02 UTC", }, "tool": "DESCRIPTION", }, "original_artifact": source_original_artifacts, }, } origin_url = "https://cran.r-project.org/package=ExtremeRisks" storage = Mock() def origin_get(urls): assert urls == [origin_url] return [Origin(url=origin_url)] storage.origin_get.side_effect = origin_get deposit_cur = None handle_row(row, storage, deposit_cur, dry_run=False) assert storage.method_calls == [ call.origin_get([origin_url]), call.raw_extrinsic_metadata_add( [ RawExtrinsicMetadata( - type=MetadataTargetType.REVISION, - id=parse_swhid( - "swh:1:rev:000361aa33842cbdea5fa6e77db696b937ebd269" - ), + type=MetadataTargetType.DIRECTORY, + target=DIRECTORY_SWHID, discovery_date=datetime.datetime( 2020, 5, 7, 15, 27, 38, 652281, tzinfo=datetime.timezone.utc, ), authority=SWH_AUTHORITY, fetcher=FETCHER, format="original-artifacts-json", metadata=json.dumps(dest_original_artifacts).encode(), origin=origin_url, + revision=parse_swhid( + "swh:1:rev:000361aa33842cbdea5fa6e77db696b937ebd269" + ), ), ] ), ] def test_cran_without_revision_date(): """Tests a CRAN revision with a date in the metadata but not as revision date""" source_original_artifacts = [ { "length": 8018, "filename": "gofgamma_1.0.tar.gz", "checksums": { "sha1": "58f2993140f9e9e1a136554f0af0174a252f2c7b", "sha256": "55408f004642b5043bb01de831a7e7a0b9f24a30cb0151e70c2d37abdc508d03", }, } ] dest_original_artifacts = [ { "length": 8018, "filename": "gofgamma_1.0.tar.gz", "checksums": { "sha1": "58f2993140f9e9e1a136554f0af0174a252f2c7b", "sha256": "55408f004642b5043bb01de831a7e7a0b9f24a30cb0151e70c2d37abdc508d03", }, "url": "https://cran.r-project.org/src/contrib/gofgamma_1.0.tar.gz", } ] row = { "id": b'\x00\x00\xd4\xef^\x16a"\xae\xe6\x86*\xd3\x8a\x18\xceS\x86\xcc>', + "directory": DIRECTORY_ID, "date": None, "committer_date": None, "type": "tar", "message": b"1.0", "metadata": { "extrinsic": { "raw": { "url": "https://cran.r-project.org/src/contrib/gofgamma_1.0.tar.gz", "version": "1.0", }, "when": "2020-04-30T11:01:57.832481+00:00", "provider": "https://cran.r-project.org/package=gofgamma", }, "intrinsic": { "raw": { "Type": "Package", "Title": "Goodness-of-Fit Tests for the Gamma Distribution", "Author": "Lucas Butsch [aut],\n Bruno Ebner [aut, cre],\n Steffen Betsch [aut]", # ... }, "tool": "DESCRIPTION", }, "original_artifact": source_original_artifacts, }, } origin_url = "https://cran.r-project.org/package=gofgamma" storage = Mock() def origin_get(urls): assert urls == [origin_url] return [Origin(url=origin_url)] storage.origin_get.side_effect = origin_get deposit_cur = None handle_row(copy.deepcopy(row), storage, deposit_cur, dry_run=False) assert storage.method_calls == [ call.origin_get([origin_url]), call.raw_extrinsic_metadata_add( [ RawExtrinsicMetadata( - type=MetadataTargetType.REVISION, - id=parse_swhid( - "swh:1:rev:0000d4ef5e166122aee6862ad38a18ce5386cc3e" - ), + type=MetadataTargetType.DIRECTORY, + target=DIRECTORY_SWHID, discovery_date=datetime.datetime( 2020, 4, 30, 11, 1, 57, 832481, tzinfo=datetime.timezone.utc, ), authority=SWH_AUTHORITY, fetcher=FETCHER, format="original-artifacts-json", metadata=json.dumps(dest_original_artifacts).encode(), origin=origin_url, + revision=parse_swhid( + "swh:1:rev:0000d4ef5e166122aee6862ad38a18ce5386cc3e" + ), ), ] ), ] def test_cran_with_new_original_artifacts_format(): original_artifacts = [ { "url": "https://cran.r-project.org/src/contrib/r2mlm_0.1.0.tar.gz", "length": 346563, "filename": "r2mlm_0.1.0.tar.gz", "checksums": { "sha1": "25c06b4af523c35a7813b58dd0db414e79848501", "sha256": "c887fe6c4f78c94b2279759052e12d639cf80225b444c1f67931c6aa6f0faf23", }, } ] row = { "id": b'."7\x82\xeeK\xa1R\xe4\xc8\x86\xf7\x97\x97bA\xc3\x9a\x9a\xab', + "directory": DIRECTORY_ID, "date": None, "committer_date": None, "type": "tar", "message": b"0.1.0", "metadata": { "extrinsic": { "raw": { "url": "https://cran.r-project.org/src/contrib/r2mlm_0.1.0.tar.gz" }, "when": "2020-09-25T14:04:20.926667+00:00", "provider": "https://cran.r-project.org/package=r2mlm", }, "intrinsic": { "raw": { "URL": "https://github.com/mkshaw/r2mlm", "Type": "Package", "Title": "R-Squared Measures for Multilevel Models", "Author": "Mairead Shaw [aut, cre],\n Jason Rights [aut],\n Sonya Sterba [aut],\n Jessica Flake [aut]", # ... }, "tool": "DESCRIPTION", }, "original_artifact": original_artifacts, }, } origin_url = "https://cran.r-project.org/package=r2mlm" storage = Mock() def origin_get(urls): assert urls == [origin_url] return [Origin(url=origin_url)] storage.origin_get.side_effect = origin_get deposit_cur = None handle_row(row, storage, deposit_cur, dry_run=False) assert storage.method_calls == [ call.origin_get([origin_url]), call.raw_extrinsic_metadata_add( [ RawExtrinsicMetadata( - type=MetadataTargetType.REVISION, - id=parse_swhid( - "swh:1:rev:2e223782ee4ba152e4c886f797976241c39a9aab" - ), + type=MetadataTargetType.DIRECTORY, + target=DIRECTORY_SWHID, discovery_date=datetime.datetime( 2020, 9, 25, 14, 4, 20, 926667, tzinfo=datetime.timezone.utc, ), authority=SWH_AUTHORITY, fetcher=FETCHER, format="original-artifacts-json", metadata=json.dumps(original_artifacts).encode(), origin=origin_url, + revision=parse_swhid( + "swh:1:rev:2e223782ee4ba152e4c886f797976241c39a9aab" + ), ), ] ), ] diff --git a/swh/storage/tests/migrate_extrinsic_metadata/test_debian.py b/swh/storage/tests/migrate_extrinsic_metadata/test_debian.py index a923f985..9fc5f559 100644 --- a/swh/storage/tests/migrate_extrinsic_metadata/test_debian.py +++ b/swh/storage/tests/migrate_extrinsic_metadata/test_debian.py @@ -1,563 +1,573 @@ # Copyright (C) 2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information # flake8: noqa # because of long lines import copy import datetime import json from unittest.mock import Mock, call from unittest.mock import patch as _patch import attr import pytest from swh.model.identifiers import parse_swhid from swh.model.model import ( MetadataAuthority, MetadataAuthorityType, MetadataFetcher, MetadataTargetType, Origin, OriginVisit, OriginVisitStatus, Person, RawExtrinsicMetadata, Revision, RevisionType, Snapshot, SnapshotBranch, TargetType, Timestamp, TimestampWithTimezone, ) from swh.storage import get_storage from swh.storage.interface import ListOrder, PagedResult from swh.storage.migrate_extrinsic_metadata import debian_origins_from_row, handle_row FETCHER = MetadataFetcher( name="migrate-extrinsic-metadata-from-revisions", version="0.0.1", ) SWH_AUTHORITY = MetadataAuthority( type=MetadataAuthorityType.REGISTRY, url="https://softwareheritage.org/", metadata={}, ) +DIRECTORY_ID = b"a" * 20 +DIRECTORY_SWHID = parse_swhid("swh:1:dir:" + DIRECTORY_ID.hex()) + def now(): return datetime.datetime.now(tz=datetime.timezone.utc) def patch(function_name, *args, **kwargs): # It's a long name, this function spares some line breaks in 'with' statements return _patch( "swh.storage.migrate_extrinsic_metadata." + function_name, *args, **kwargs ) def test_debian_origins_from_row(): """Tests debian_origins_from_row on a real example (with some parts omitted, for conciseness).""" origin_url = "deb://Debian/packages/kalgebra" visit = OriginVisit( origin=origin_url, date=datetime.datetime( 2020, 1, 27, 19, 32, 3, 925498, tzinfo=datetime.timezone.utc, ), type="deb", visit=280, ) storage = get_storage("memory") storage.origin_add( [ Origin(url=origin_url), Origin(url="http://snapshot.debian.org/package/kalgebra/"), ] ) storage.origin_visit_add([visit]) storage.origin_visit_status_add( [ OriginVisitStatus( origin=origin_url, visit=280, date=datetime.datetime( 2020, 1, 27, 19, 32, 3, 925498, tzinfo=datetime.timezone.utc ), status="full", snapshot=b"\xafD\x15\x98){\xd4$\xdeI\x1f\xbe\x95lh`x\x14\xce\xc4", metadata=None, ) ], ) snapshot = Snapshot( id=b"\xafD\x15\x98){\xd4$\xdeI\x1f\xbe\x95lh`x\x14\xce\xc4", branches={ # ... b"releases/unstable/main/4:19.12.1-1": SnapshotBranch( target=b"\x00\x00\x03l1\x1e\xf3:(\x1b\x05h\x8fn\xad\xcf\xc0\x94:\xee", target_type=TargetType.REVISION, ), }, ) revision_row = { "id": b"\x00\x00\x03l1\x1e\xf3:(\x1b\x05h\x8fn\xad\xcf\xc0\x94:\xee", + "directory": DIRECTORY_ID, "metadata": { # ... "original_artifact": [ { "filename": "kalgebra_19.12.1-1.dsc", # ... }, ] }, } storage.snapshot_add([snapshot]) assert debian_origins_from_row(revision_row, storage) == [origin_url] def test_debian_origins_from_row__no_result(): """Tests debian_origins_from_row when there's no origin, visit, status, snapshot, branch, or matching branch. """ storage = get_storage("memory") origin_url = "deb://Debian/packages/kalgebra" snapshot_id = b"42424242424242424242" revision_id = b"21212121212121212121" storage.origin_add([Origin(url=origin_url)]) revision_row = { "id": b"\x00\x00\x03l1\x1e\xf3:(\x1b\x05h\x8fn\xad\xcf\xc0\x94:\xee", + "directory": DIRECTORY_ID, "metadata": {"original_artifact": [{"filename": "kalgebra_19.12.1-1.dsc",},]}, } # no visit assert debian_origins_from_row(revision_row, storage) == [] storage.origin_visit_add( [OriginVisit(origin=origin_url, date=now(), type="deb", visit=280,)] ) # no status assert debian_origins_from_row(revision_row, storage) == [] status = OriginVisitStatus( origin=origin_url, visit=280, date=now(), status="full", snapshot=None, metadata=None, ) storage.origin_visit_status_add([status]) # no snapshot assert debian_origins_from_row(revision_row, storage) == [] status = attr.evolve(status, snapshot=snapshot_id, date=now()) storage.origin_visit_status_add([status]) storage_before_snapshot = copy.deepcopy(storage) snapshot = Snapshot(id=snapshot_id, branches={}) storage.snapshot_add([snapshot]) # no branch assert debian_origins_from_row(revision_row, storage) == [] # "remove" the snapshot, so we can add a new one with the same id storage = copy.deepcopy(storage_before_snapshot) snapshot = attr.evolve(snapshot, branches={b"foo": None,},) storage.snapshot_add([snapshot]) # dangling branch assert debian_origins_from_row(revision_row, storage) == [] # "remove" the snapshot again storage = copy.deepcopy(storage_before_snapshot) snapshot = attr.evolve( snapshot, branches={ b"foo": SnapshotBranch(target_type=TargetType.REVISION, target=revision_id,) }, ) storage.snapshot_add([snapshot]) # branch points to unknown revision assert debian_origins_from_row(revision_row, storage) == [] revision = Revision( id=revision_id, message=b"foo", author=Person.from_fullname(b"foo"), committer=Person.from_fullname(b"foo"), date=TimestampWithTimezone( timestamp=Timestamp(seconds=1580076204, microseconds=0), offset=60, negative_utc=False, ), committer_date=TimestampWithTimezone( timestamp=Timestamp(seconds=1580076204, microseconds=0), offset=60, negative_utc=False, ), type=RevisionType.DSC, directory=b"\xd5\x9a\x1f\x9c\x80\x9d\x8c}19P\xf6\xc8\xa2\x0f^%H\xcd\xdb", synthetic=True, metadata=None, parents=(), extra_headers=(), ) storage.revision_add([revision]) # no matching branch assert debian_origins_from_row(revision_row, storage) == [] def test_debian_origins_from_row__check_revisions(): """Tests debian_origins_from_row errors when the revision at the head of a branch is a DSC and has no parents """ storage = get_storage("memory") origin_url = "deb://Debian/packages/kalgebra" revision_id = b"21" * 10 storage.origin_add([Origin(url=origin_url)]) revision_row = { "id": b"\x00\x00\x03l1\x1e\xf3:(\x1b\x05h\x8fn\xad\xcf\xc0\x94:\xee", + "directory": DIRECTORY_ID, "metadata": {"original_artifact": [{"filename": "kalgebra_19.12.1-1.dsc",},]}, } storage.origin_visit_add( [ OriginVisit( origin=origin_url, date=datetime.datetime.now(tz=datetime.timezone.utc), type="deb", visit=280, ) ] ) storage.origin_visit_status_add( [ OriginVisitStatus( origin=origin_url, visit=280, date=datetime.datetime.now(tz=datetime.timezone.utc), status="full", snapshot=b"42" * 10, metadata=None, ) ] ) storage.snapshot_add( [ Snapshot( id=b"42" * 10, branches={ b"foo": SnapshotBranch( target_type=TargetType.REVISION, target=revision_id ) }, ) ] ) storage_before_revision = copy.deepcopy(storage) revision = Revision( id=revision_id, message=b"foo", author=Person.from_fullname(b"foo"), committer=Person.from_fullname(b"foo"), date=TimestampWithTimezone( timestamp=Timestamp(seconds=1580076204, microseconds=0), offset=60, negative_utc=False, ), committer_date=TimestampWithTimezone( timestamp=Timestamp(seconds=1580076204, microseconds=0), offset=60, negative_utc=False, ), type=RevisionType.DSC, directory=b"\xd5\x9a\x1f\x9c\x80\x9d\x8c}19P\xf6\xc8\xa2\x0f^%H\xcd\xdb", synthetic=True, metadata=None, parents=(b"parent " * 2,), extra_headers=(), ) storage.revision_add([revision]) with pytest.raises(AssertionError, match="revision with parents"): debian_origins_from_row(revision_row, storage) def test_debian_with_extrinsic(): dest_original_artifacts = [ { "length": 2936, "filename": "kalgebra_19.12.1-1.dsc", "checksums": { "sha1": "f869e9f1155b1ee6d28ae3b40060570152a358cd", "sha256": "75f77150aefdaa4bcf8bc5b1e9b8b90b5cb1651b76a068c5e58e5b83658d5d11", }, "url": "http://deb.debian.org/debian//pool/main/k/kalgebra/kalgebra_19.12.1-1.dsc", }, { "length": 1156408, "filename": "kalgebra_19.12.1.orig.tar.xz", "checksums": { "sha1": "e496032962212983a5359aebadfe13c4026fd45c", "sha256": "49d623186800eb8f6fbb91eb43fb14dff78e112624c9cda6b331d494d610b16a", }, "url": "http://deb.debian.org/debian//pool/main/k/kalgebra/kalgebra_19.12.1.orig.tar.xz", }, { "length": 10044, "filename": "kalgebra_19.12.1-1.debian.tar.xz", "checksums": { "sha1": "b518bfc2ac708b40577c595bd539faa8b84572db", "sha256": "1a30acd2699c3769da302f7a0c63a7d7b060f80925b38c8c43ce3bec92744d67", }, "url": "http://deb.debian.org/debian//pool/main/k/kalgebra/kalgebra_19.12.1-1.debian.tar.xz", }, { "length": 488, "filename": "kalgebra_19.12.1.orig.tar.xz.asc", "checksums": { "sha1": "ff53a5c21c1aef2b9caa38a02fa3488f43df4c20", "sha256": "a37e0b95bb1f16b19b0587bc5d3b99ba63a195d7f6335c4a359122ad96d682dd", }, "url": "http://deb.debian.org/debian//pool/main/k/kalgebra/kalgebra_19.12.1.orig.tar.xz.asc", }, ] source_original_artifacts = [ {k: v for (k, v) in d.items() if k != "url"} for d in dest_original_artifacts ] row = { "id": b"\x00\x00\x03l1\x1e\xf3:(\x1b\x05h\x8fn\xad\xcf\xc0\x94:\xee", + "directory": DIRECTORY_ID, "date": datetime.datetime( 2020, 1, 26, 22, 3, 24, tzinfo=datetime.timezone.utc, ), "date_offset": 60, "type": "dsc", "message": b"Synthetic revision for Debian source package kalgebra version 4:19.12.1-1", "metadata": { "extrinsic": { "raw": { "id": 2718802, "name": "kalgebra", "files": { "kalgebra_19.12.1-1.dsc": { "uri": "http://deb.debian.org/debian//pool/main/k/kalgebra/kalgebra_19.12.1-1.dsc", "name": "kalgebra_19.12.1-1.dsc", "size": 2936, "md5sum": "fd28f604d4cc31a0a305543230f1622a", "sha256": "75f77150aefdaa4bcf8bc5b1e9b8b90b5cb1651b76a068c5e58e5b83658d5d11", }, "kalgebra_19.12.1.orig.tar.xz": { "uri": "http://deb.debian.org/debian//pool/main/k/kalgebra/kalgebra_19.12.1.orig.tar.xz", "name": "kalgebra_19.12.1.orig.tar.xz", "size": 1156408, "md5sum": "34e09ed152da762d53101ea33634712b", "sha256": "49d623186800eb8f6fbb91eb43fb14dff78e112624c9cda6b331d494d610b16a", }, "kalgebra_19.12.1-1.debian.tar.xz": { "uri": "http://deb.debian.org/debian//pool/main/k/kalgebra/kalgebra_19.12.1-1.debian.tar.xz", "name": "kalgebra_19.12.1-1.debian.tar.xz", "size": 10044, "md5sum": "4f639f36143898d97d044f273f038e58", "sha256": "1a30acd2699c3769da302f7a0c63a7d7b060f80925b38c8c43ce3bec92744d67", }, "kalgebra_19.12.1.orig.tar.xz.asc": { "uri": "http://deb.debian.org/debian//pool/main/k/kalgebra/kalgebra_19.12.1.orig.tar.xz.asc", "name": "kalgebra_19.12.1.orig.tar.xz.asc", "size": 488, "md5sum": "3c29291e4e6f0c294de80feb8e9fce4c", "sha256": "a37e0b95bb1f16b19b0587bc5d3b99ba63a195d7f6335c4a359122ad96d682dd", }, }, "version": "4:19.12.1-1", "revision_id": None, }, "when": "2020-01-27T19:32:03.925498+00:00", "provider": "http://deb.debian.org/debian//pool/main/k/kalgebra/kalgebra_19.12.1-1.dsc", }, "intrinsic": { "raw": { "name": "kalgebra", "version": "4:19.12.1-1", # ... }, "tool": "dsc", }, "original_artifact": source_original_artifacts, }, } origin_url = "deb://Debian/packages/kalgebra" storage = Mock() deposit_cur = None with patch("debian_origins_from_row", return_value=[origin_url]): handle_row(copy.deepcopy(row), storage, deposit_cur, dry_run=False) assert storage.method_calls == [ call.raw_extrinsic_metadata_add( [ RawExtrinsicMetadata( - type=MetadataTargetType.REVISION, - id=parse_swhid( - "swh:1:rev:0000036c311ef33a281b05688f6eadcfc0943aee" - ), + type=MetadataTargetType.DIRECTORY, + target=DIRECTORY_SWHID, discovery_date=datetime.datetime( 2020, 1, 26, 22, 3, 24, tzinfo=datetime.timezone.utc, ), authority=SWH_AUTHORITY, fetcher=FETCHER, format="original-artifacts-json", metadata=json.dumps(dest_original_artifacts).encode(), origin=origin_url, + revision=parse_swhid( + "swh:1:rev:0000036c311ef33a281b05688f6eadcfc0943aee" + ), ), ] ), ] def test_debian_without_extrinsic(): source_original_artifacts = [ { "name": "pymongo_1.10-1.dsc", "sha1": "81877c1ae4406c2519b9cc9c4557cf6b0775a241", "length": 99, "sha256": "40269a73f38ee4c2f9cc021f1d5d091cc59ca6e778c339684b7be030e29e282f", "sha1_git": "0ac7bdb8e4d10926c5d3e51baa2be7bb29a3966b", }, { "name": "pymongo_1.10.orig.tar.gz", "sha1": "4f4c97641b86ac8f21396281bd1a7369236693c3", "length": 99, "sha256": "0b6bffb310782ffaeb3916c75790742ec5830c63a758fc711cd1f557eb5a4b5f", "sha1_git": "19ef0adda8868520d1ef9d4164b3ace4df1d62ad", }, { "name": "pymongo_1.10-1.debian.tar.gz", "sha1": "fbf378296613c8d55e043aec98896b3e50a94971", "length": 99, "sha256": "3970cc70fe3ba6499a9c56ba4b4c6c3782f56433d0d17d72b7a0e2ceae31b513", "sha1_git": "2eea9904806050a8fda95edd5d4fa60d29c1fdec", }, ] dest_original_artifacts = [ { "length": 99, "filename": "pymongo_1.10-1.dsc", "checksums": { "sha1": "81877c1ae4406c2519b9cc9c4557cf6b0775a241", "sha256": "40269a73f38ee4c2f9cc021f1d5d091cc59ca6e778c339684b7be030e29e282f", "sha1_git": "0ac7bdb8e4d10926c5d3e51baa2be7bb29a3966b", }, }, { "length": 99, "filename": "pymongo_1.10.orig.tar.gz", "checksums": { "sha1": "4f4c97641b86ac8f21396281bd1a7369236693c3", "sha256": "0b6bffb310782ffaeb3916c75790742ec5830c63a758fc711cd1f557eb5a4b5f", "sha1_git": "19ef0adda8868520d1ef9d4164b3ace4df1d62ad", }, }, { "length": 99, "filename": "pymongo_1.10-1.debian.tar.gz", "checksums": { "sha1": "fbf378296613c8d55e043aec98896b3e50a94971", "sha256": "3970cc70fe3ba6499a9c56ba4b4c6c3782f56433d0d17d72b7a0e2ceae31b513", "sha1_git": "2eea9904806050a8fda95edd5d4fa60d29c1fdec", }, }, ] row = { "id": b"\x00\x00\x01\xc2\x8c\x8f\xca\x01\xb9\x04\xde\x92\xa2d\n\x86l\xe0<\xb7", + "directory": DIRECTORY_ID, "date": datetime.datetime( 2011, 3, 31, 20, 17, 41, tzinfo=datetime.timezone.utc ), "date_offset": 0, "type": "dsc", "message": b"Synthetic revision for Debian source package pymongo version 1.10-1", "metadata": { "package_info": { "name": "pymongo", "version": "1.10-1", "changelog": { # ... }, "maintainers": [ {"name": "Federico Ceratto", "email": "federico.ceratto@gmail.com"}, {"name": "Janos Guljas", "email": "janos@resenje.org"}, ], "pgp_signature": { "date": "2011-03-31T21:02:44+00:00", "keyid": "2BABC6254E66E7B8450AC3E1E6AA90171392B174", "person": {"name": "David Paleino", "email": "d.paleino@gmail.com"}, }, "lister_metadata": {"id": 244296, "lister": "snapshot.debian.org"}, }, "original_artifact": source_original_artifacts, }, } storage = Mock() origin_url = "http://snapshot.debian.org/package/pymongo" deposit_cur = None with patch("debian_origins_from_row", return_value=[origin_url]): handle_row(copy.deepcopy(row), storage, deposit_cur, dry_run=False) assert storage.method_calls == [ call.raw_extrinsic_metadata_add( [ RawExtrinsicMetadata( - type=MetadataTargetType.REVISION, - id=parse_swhid( - "swh:1:rev:000001c28c8fca01b904de92a2640a866ce03cb7" - ), + type=MetadataTargetType.DIRECTORY, + target=DIRECTORY_SWHID, discovery_date=datetime.datetime( 2011, 3, 31, 20, 17, 41, tzinfo=datetime.timezone.utc ), authority=SWH_AUTHORITY, fetcher=FETCHER, format="original-artifacts-json", metadata=json.dumps(dest_original_artifacts).encode(), origin=origin_url, + revision=parse_swhid( + "swh:1:rev:000001c28c8fca01b904de92a2640a866ce03cb7" + ), ), ] ) ] diff --git a/swh/storage/tests/migrate_extrinsic_metadata/test_deposit.py b/swh/storage/tests/migrate_extrinsic_metadata/test_deposit.py index 6e34359b..da97e234 100644 --- a/swh/storage/tests/migrate_extrinsic_metadata/test_deposit.py +++ b/swh/storage/tests/migrate_extrinsic_metadata/test_deposit.py @@ -1,1165 +1,1354 @@ # Copyright (C) 2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information # flake8: noqa # because of long lines import copy import datetime import json from unittest.mock import MagicMock, Mock, call from swh.model.identifiers import parse_swhid from swh.model.model import ( MetadataAuthority, MetadataAuthorityType, MetadataFetcher, MetadataTargetType, Origin, RawExtrinsicMetadata, ) from swh.storage.migrate_extrinsic_metadata import ( DEPOSIT_COLS, cran_package_from_url, handle_row, ) FETCHER = MetadataFetcher( name="migrate-extrinsic-metadata-from-revisions", version="0.0.1", ) SWH_AUTHORITY = MetadataAuthority( type=MetadataAuthorityType.REGISTRY, url="https://softwareheritage.org/", metadata={}, ) SWH_DEPOSIT_AUTHORITY = MetadataAuthority( type=MetadataAuthorityType.DEPOSIT_CLIENT, url="https://www.softwareheritage.org", metadata={}, ) HAL_AUTHORITY = MetadataAuthority( type=MetadataAuthorityType.DEPOSIT_CLIENT, url="https://hal.archives-ouvertes.fr/", metadata={}, ) INTEL_AUTHORITY = MetadataAuthority( type=MetadataAuthorityType.DEPOSIT_CLIENT, url="https://software.intel.com", metadata={}, ) +DIRECTORY_ID = b"a" * 20 +DIRECTORY_SWHID = parse_swhid("swh:1:dir:" + DIRECTORY_ID.hex()) + def get_mock_deposit_cur(row_dicts): rows = [tuple(d[key] for key in DEPOSIT_COLS) for d in row_dicts] deposit_cur = MagicMock() deposit_cur.__iter__.side_effect = [iter(rows)] return deposit_cur def test_deposit_1(): """Has a provider and xmlns, and the metadata is in the revision twice (at the root of the metadata dict, and in metadata->extrinsic->raw->origin_metadata)""" extrinsic_metadata = { "title": "Je suis GPL", "@xmlns": "http://www.w3.org/2005/Atom", "client": "swh", "codemeta:url": "https://forge.softwareheritage.org/source/jesuisgpl/", "@xmlns:codemeta": "https://doi.org/10.5063/SCHEMA/CODEMETA-2.0", "codemeta:author": { "codemeta:name": "Stefano Zacchiroli", "codemeta:jobTitle": "Maintainer", }, "codemeta:license": { "codemeta:url": "https://spdx.org/licenses/GPL-3.0-or-later.html", "codemeta:name": "GNU General Public License v3.0 or later", }, # ... } original_artifacts = [ { "length": 80880, "filename": "archive.zip", "checksums": { "sha1": "bad32a47a359e0e16ebdca2ad2dc6a771dac8f71", "sha256": "182b7ee3b7b5b550e83d3bcfed029bb2f625ee760ebfe9557d5fd072bd4e22e4", }, } ] row = { "id": b"\x02#\x10\xdf\x16\xfd\x9eMO\x81\xfe6\xa1B\xe8-\xb9w\xc0\x1d", + "directory": DIRECTORY_ID, "date": datetime.datetime(2018, 1, 5, 0, 0, tzinfo=datetime.timezone.utc), "committer_date": datetime.datetime( 2018, 1, 5, 0, 0, tzinfo=datetime.timezone.utc ), "type": "tar", "message": b"swh: Deposit 467 in collection swh", "metadata": { "client": "swh", "extrinsic": { "raw": { "origin": { "url": "https://www.softwareheritage.org/check-deposit-2020-03-11T11:07:18.424476", "type": "deposit", }, "branch_name": "master", "origin_metadata": { "tool": { "name": "swh-deposit", "version": "0.0.1", "configuration": {"sword_version": 2}, }, "metadata": extrinsic_metadata, }, }, "when": "2020-03-11T11:11:36.336283+00:00", "provider": "https://deposit.softwareheritage.org/1/private/467/meta/", }, "original_artifact": original_artifacts, **extrinsic_metadata, }, } origin_url = ( "https://www.softwareheritage.org/check-deposit-2020-03-11T11:07:18.424476" ) swhid = ( f"swh:1:dir:ef04a768181417fbc5eef4243e2507915f24deea" f";origin={origin_url}" f";visit=swh:1:snp:14433c19dbb03ad57c86b58b53a800d6a0e32dd3" f";anchor=swh:1:rev:022310df16fd9e4d4f81fe36a142e82db977c01d" f";path=/" ) deposit_rows = [ { "deposit.id": 467, "deposit.external_id": "check-deposit-2020-03-11T11:07:18.424476", "deposit.swhid_context": swhid, "deposit.status": "success", "deposit_request.metadata": extrinsic_metadata, "deposit_request.date": datetime.datetime( 2020, 3, 11, 11, 7, 18, 688410, tzinfo=datetime.timezone.utc ), "deposit_client.provider_url": "https://www.softwareheritage.org", "deposit_collection.name": "swh", "auth_user.username": "swh", }, { "deposit.id": 467, "deposit.external_id": "check-deposit-2020-03-11T11:07:18.424476", "deposit.swhid_context": swhid, "deposit.status": "success", "deposit_request.metadata": None, "deposit_request.date": datetime.datetime( 2020, 3, 11, 11, 7, 18, 669428, tzinfo=datetime.timezone.utc ), "deposit_client.provider_url": "https://www.softwareheritage.org", "deposit_collection.name": "swh", "auth_user.username": "swh", }, ] storage = Mock() def origin_get(urls): assert urls == [origin_url] return [Origin(url=origin_url)] storage.origin_get.side_effect = origin_get deposit_cur = get_mock_deposit_cur(deposit_rows) handle_row(copy.deepcopy(row), storage, deposit_cur, dry_run=False) deposit_cur.execute.assert_called_once() deposit_cur.__iter__.assert_called_once() assert storage.method_calls == [ call.origin_get([origin_url]), call.raw_extrinsic_metadata_add( [ RawExtrinsicMetadata( - type=MetadataTargetType.REVISION, - id=parse_swhid( - "swh:1:rev:022310df16fd9e4d4f81fe36a142e82db977c01d" - ), + type=MetadataTargetType.DIRECTORY, + target=DIRECTORY_SWHID, discovery_date=datetime.datetime( 2020, 3, 11, 11, 7, 18, 688410, tzinfo=datetime.timezone.utc ), authority=SWH_DEPOSIT_AUTHORITY, fetcher=FETCHER, format="sword-v2-atom-codemeta-v2-in-json", metadata=json.dumps(extrinsic_metadata).encode(), origin=origin_url, + revision=parse_swhid( + "swh:1:rev:022310df16fd9e4d4f81fe36a142e82db977c01d" + ), ), ] ), call.raw_extrinsic_metadata_add( [ RawExtrinsicMetadata( - type=MetadataTargetType.REVISION, - id=parse_swhid( - "swh:1:rev:022310df16fd9e4d4f81fe36a142e82db977c01d" - ), + type=MetadataTargetType.DIRECTORY, + target=DIRECTORY_SWHID, discovery_date=datetime.datetime( 2020, 3, 11, 11, 11, 36, 336283, tzinfo=datetime.timezone.utc ), authority=SWH_AUTHORITY, fetcher=FETCHER, format="original-artifacts-json", metadata=json.dumps(original_artifacts).encode(), origin=origin_url, + revision=parse_swhid( + "swh:1:rev:022310df16fd9e4d4f81fe36a142e82db977c01d" + ), ), ] ), ] def test_deposit_2_without_xmlns(): """Has a provider, no xmlns, and the metadata is only in metadata->extrinsic->raw->origin_metadata)""" extrinsic_metadata = { "{http://www.w3.org/2005/Atom}id": "hal-01243573", "{http://www.w3.org/2005/Atom}author": { "{http://www.w3.org/2005/Atom}name": "HAL", "{http://www.w3.org/2005/Atom}email": "hal@ccsd.cnrs.fr", }, "{http://www.w3.org/2005/Atom}client": "hal", "{http://www.w3.org/2005/Atom}external_identifier": "hal-01243573", "{https://doi.org/10.5063/SCHEMA/CODEMETA-2.0}url": "https://hal-test.archives-ouvertes.fr/hal-01243573", "{https://doi.org/10.5063/SCHEMA/CODEMETA-2.0}name": "The assignment problem", "{https://doi.org/10.5063/SCHEMA/CODEMETA-2.0}author": { "{https://doi.org/10.5063/SCHEMA/CODEMETA-2.0}name": "Morane Gruenpeter" }, "{https://doi.org/10.5063/SCHEMA/CODEMETA-2.0}version": 1, "{https://doi.org/10.5063/SCHEMA/CODEMETA-2.0}identifier": "10.5281/zenodo.438684", "{https://doi.org/10.5063/SCHEMA/CODEMETA-2.0}dateCreated": "2017-11-16T14:54:23+01:00", } original_artifacts = [ { "length": 208357, "filename": "archive.zip", "checksums": { "sha1": "fa0aec08e8a44ea144dba7ce366c8b5d66c14453", "sha256": "f53c05fe947e88ce83751a93bd522b1f88478ea2e7b984c07fc7a7c68128bf87", }, } ] row = { "id": b"\x01\x16\xca\xb7\x19d\xd5\x9c\x85p\xb4\xc5r\x9b(\xbd\xd6<\x9bF", + "directory": DIRECTORY_ID, "date": datetime.datetime( 2018, 1, 17, 12, 54, 0, 723882, tzinfo=datetime.timezone.utc ), "committer_date": datetime.datetime( 2018, 1, 17, 12, 54, 0, 723882, tzinfo=datetime.timezone.utc ), "type": "tar", "message": b"hal: Deposit 82 in collection hal", "metadata": { "extrinsic": { "raw": { "origin": { "url": "https://hal.archives-ouvertes.fr/hal-01243573", "type": "deposit", }, "origin_metadata": { "tool": { "name": "swh-deposit", "version": "0.0.1", "configuration": {"sword_version": 2}, }, "metadata": extrinsic_metadata, "provider": { "metadata": {}, "provider_url": "https://hal.archives-ouvertes.fr/", "provider_name": "hal", "provider_type": "deposit_client", }, }, }, "when": "2020-05-15T14:27:21.462270+00:00", "provider": "https://deposit.softwareheritage.org/1/private/82/meta/", }, "original_artifact": original_artifacts, }, } swhid = ( "swh:1:dir:e04b2a7b8a8838da0693e9fd992a10d6fd211b50" ";origin=https://hal.archives-ouvertes.fr/hal-01243573" ";visit=swh:1:snp:abc9ae594245a740235b6c039f044352a5f723ec" ";anchor=swh:1:rev:0116cab71964d59c8570b4c5729b28bdd63c9b46" ";path=/" ) deposit_rows = [ { "deposit.id": 82, "deposit.external_id": "hal-01243573", "deposit.swhid_context": swhid, "deposit.status": "success", "deposit_request.metadata": None, "deposit_request.date": datetime.datetime( 2018, 1, 17, 12, 54, 1, 533972, tzinfo=datetime.timezone.utc ), "deposit_client.provider_url": "https://hal.archives-ouvertes.fr/", "deposit_collection.name": "hal", "auth_user.username": "hal", }, { "deposit.id": 82, "deposit.external_id": "hal-01243573", "deposit.swhid_context": swhid, "deposit.status": "success", "deposit_request.metadata": extrinsic_metadata, "deposit_request.date": datetime.datetime( 2018, 1, 17, 12, 54, 0, 413748, tzinfo=datetime.timezone.utc ), "deposit_client.provider_url": "https://hal.archives-ouvertes.fr/", "deposit_collection.name": "hal", "auth_user.username": "hal", }, ] origin_url = "https://hal.archives-ouvertes.fr/hal-01243573" storage = Mock() def origin_get(urls): assert urls == [origin_url] return [Origin(url=origin_url)] storage.origin_get.side_effect = origin_get deposit_cur = get_mock_deposit_cur(deposit_rows) handle_row(copy.deepcopy(row), storage, deposit_cur, dry_run=False) deposit_cur.execute.assert_called_once() deposit_cur.__iter__.assert_called_once() assert storage.method_calls == [ call.origin_get([origin_url]), call.raw_extrinsic_metadata_add( [ RawExtrinsicMetadata( - type=MetadataTargetType.REVISION, - id=parse_swhid( - "swh:1:rev:0116cab71964d59c8570b4c5729b28bdd63c9b46" - ), + type=MetadataTargetType.DIRECTORY, + target=DIRECTORY_SWHID, discovery_date=datetime.datetime( 2018, 1, 17, 12, 54, 0, 413748, tzinfo=datetime.timezone.utc ), authority=HAL_AUTHORITY, fetcher=FETCHER, format="sword-v2-atom-codemeta-v2-in-json-with-expanded-namespaces", metadata=json.dumps(extrinsic_metadata).encode(), origin=origin_url, + revision=parse_swhid( + "swh:1:rev:0116cab71964d59c8570b4c5729b28bdd63c9b46" + ), ), ] ), call.raw_extrinsic_metadata_add( [ RawExtrinsicMetadata( - type=MetadataTargetType.REVISION, - id=parse_swhid( - "swh:1:rev:0116cab71964d59c8570b4c5729b28bdd63c9b46" - ), + type=MetadataTargetType.DIRECTORY, + target=DIRECTORY_SWHID, discovery_date=datetime.datetime( 2020, 5, 15, 14, 27, 21, 462270, tzinfo=datetime.timezone.utc ), authority=SWH_AUTHORITY, fetcher=FETCHER, format="original-artifacts-json", metadata=json.dumps(original_artifacts).encode(), origin=origin_url, + revision=parse_swhid( + "swh:1:rev:0116cab71964d59c8570b4c5729b28bdd63c9b46" + ), ), ] ), ] def test_deposit_2_with_xmlns(): """Has a provider, xmlns, and the metadata is only in metadata->extrinsic->raw->origin_metadata)""" extrinsic_metadata = { "title": "Je suis GPL", "@xmlns": "http://www.w3.org/2005/Atom", "client": "swh", "codemeta:url": "https://forge.softwareheritage.org/source/jesuisgpl/", "@xmlns:codemeta": "https://doi.org/10.5063/SCHEMA/CODEMETA-2.0", "codemeta:author": { "codemeta:name": "Stefano Zacchiroli", "codemeta:jobTitle": "Maintainer", }, "codemeta:license": { "codemeta:url": "https://spdx.org/licenses/GPL-3.0-or-later.html", "codemeta:name": "GNU General Public License v3.0 or later", }, "external_identifier": "je-suis-gpl", "codemeta:dateCreated": "2018-01-05", } original_artifacts = [ { "length": 80880, "filename": "archive.zip", "checksums": { "sha1": "bad32a47a359e0e16ebdca2ad2dc6a771dac8f71", "sha256": "182b7ee3b7b5b550e83d3bcfed029bb2f625ee760ebfe9557d5fd072bd4e22e4", }, } ] row = { "id": b'\x01"\x96nP\x93\x17\xae\xcejA\xd0\xf0\x88\xdas<\xc0\x9d\x0f', + "directory": DIRECTORY_ID, "date": datetime.datetime(2018, 1, 5, 0, 0, tzinfo=datetime.timezone.utc), "committer_date": datetime.datetime( 2018, 1, 5, 0, 0, tzinfo=datetime.timezone.utc ), "type": "tar", "message": b"swh: Deposit 687 in collection swh", "metadata": { "extrinsic": { "raw": { "origin": { "url": "https://www.softwareheritage.org/check-deposit-2020-06-26T13:50:07.564420", "type": "deposit", }, "origin_metadata": { "tool": { "name": "swh-deposit", "version": "0.0.1", "configuration": {"sword_version": 2}, }, "metadata": extrinsic_metadata, "provider": { "metadata": {}, "provider_url": "https://www.softwareheritage.org", "provider_name": "swh", "provider_type": "deposit_client", }, }, }, "when": "2020-06-26T13:50:22.640625+00:00", "provider": "https://deposit.softwareheritage.org/1/private/687/meta/", }, "original_artifact": original_artifacts, }, } swhid = ( "swh:1:dir:ef04a768181417fbc5eef4243e2507915f24deea" ";origin=https://www.softwareheritage.org/check-deposit-2020-06-26T13:50:07.564420" ";visit=swh:1:snp:8fd469e280fb0724175c64906627f619143d5bdb" ";anchor=swh:1:rev:0122966e509317aece6a41d0f088da733cc09d0f" ";path=/" ) deposit_rows = [ { "deposit.id": 687, "deposit.external_id": "check-deposit-2020-06-26T13:50:07.564420", "deposit.swhid_context": swhid, "deposit.status": "success", "deposit_request.metadata": extrinsic_metadata, "deposit_request.date": datetime.datetime( 2020, 6, 26, 13, 50, 8, 216113, tzinfo=datetime.timezone.utc ), "deposit_client.provider_url": "https://www.softwareheritage.org", "deposit_collection.name": "swh", "auth_user.username": "swh", }, { "deposit.id": 687, "deposit.external_id": "check-deposit-2020-06-26T13:50:07.564420", "deposit.swhid_context": swhid, "deposit.status": "success", "deposit_request.metadata": None, "deposit_request.date": datetime.datetime( 2020, 6, 26, 13, 50, 8, 150498, tzinfo=datetime.timezone.utc ), "deposit_client.provider_url": "https://www.softwareheritage.org", "deposit_collection.name": "swh", "auth_user.username": "swh", }, ] origin_url = ( "https://www.softwareheritage.org/check-deposit-2020-06-26T13:50:07.564420" ) storage = Mock() def origin_get(urls): assert urls == [origin_url] return [Origin(url=origin_url)] storage.origin_get.side_effect = origin_get deposit_cur = get_mock_deposit_cur(deposit_rows) handle_row(copy.deepcopy(row), storage, deposit_cur, dry_run=False) deposit_cur.execute.assert_called_once() deposit_cur.__iter__.assert_called_once() assert storage.method_calls == [ call.origin_get([origin_url]), call.raw_extrinsic_metadata_add( [ RawExtrinsicMetadata( - type=MetadataTargetType.REVISION, - id=parse_swhid( - "swh:1:rev:0122966e509317aece6a41d0f088da733cc09d0f" - ), + type=MetadataTargetType.DIRECTORY, + target=DIRECTORY_SWHID, discovery_date=datetime.datetime( 2020, 6, 26, 13, 50, 8, 216113, tzinfo=datetime.timezone.utc ), authority=SWH_DEPOSIT_AUTHORITY, fetcher=FETCHER, format="sword-v2-atom-codemeta-v2-in-json", metadata=json.dumps(extrinsic_metadata).encode(), origin=origin_url, + revision=parse_swhid( + "swh:1:rev:0122966e509317aece6a41d0f088da733cc09d0f" + ), ), ] ), call.raw_extrinsic_metadata_add( [ RawExtrinsicMetadata( - type=MetadataTargetType.REVISION, - id=parse_swhid( + type=MetadataTargetType.DIRECTORY, + target=DIRECTORY_SWHID, + discovery_date=datetime.datetime( + 2020, 6, 26, 13, 50, 22, 640625, tzinfo=datetime.timezone.utc + ), + authority=SWH_AUTHORITY, + fetcher=FETCHER, + format="original-artifacts-json", + metadata=json.dumps(original_artifacts).encode(), + origin=origin_url, + revision=parse_swhid( "swh:1:rev:0122966e509317aece6a41d0f088da733cc09d0f" ), + ), + ] + ), + ] + + +def test_deposit_2_with_json_in_json_and_no_xmlns(): + """New formats introduced in https://forge.softwareheritage.org/D4105 , + where the raw metadata is itself JSONed inside the metadata JSON tree + and https://forge.softwareheritage.org/D4065 where the @xmlns declarations + are stripped before being sent to the deposit DB""" + extrinsic_metadata = { + "id": "hal-02960679", + "author": {"name": "HAL", "email": "hal@ccsd.cnrs.fr"}, + "client": "hal", + "codemeta:url": "https://hal.archives-ouvertes.fr/hal-02960679", + "codemeta:name": "Compressive Spectral Clustering Toolbox", + "codemeta:author": [ + {"codemeta:name": "Nicolas Tremblay", "codemeta:affiliation": "PANAMA"}, + {"codemeta:name": "Gilles Puy", "codemeta:affiliation": "PANAMA"}, + {"codemeta:name": "R{\\'e}mi Gribonval", "codemeta:affiliation": "PANAMA"}, + {"codemeta:name": "Pierre Vandergheynst"}, + ], + # ... + } + + original_artifacts = [ + { + "url": "https://deposit.softwareheritage.org/1/private/1037/raw/", + "length": 4546913, + "filename": "archive.zip", + "checksums": { + "sha1": "01a0069c626a383de9a17ace40ecfd588e5c4f26", + "sha256": "c780a6de91286c70ceecc69fe0c6d201d3fe944aa89e193f3a89ae85dc25c3b1", + }, + } + ] + + row = { + "id": b"J\x9dc{\xa5\x07\xa2\xb93e%\x04(\xe6\xe3\xf0!\xf1\x94\xd0", + "directory": DIRECTORY_ID, + "date": datetime.datetime(2016, 1, 29, 0, 0, tzinfo=datetime.timezone.utc), + "committer_date": datetime.datetime( + 2020, 10, 8, 0, 0, tzinfo=datetime.timezone.utc + ), + "type": "tar", + "message": b"hal: Deposit 1037 in collection hal", + "metadata": { + "extrinsic": { + "raw": { + "origin": { + "url": "https://hal.archives-ouvertes.fr/hal-02960679", + "type": "deposit", + }, + "origin_metadata": { + "tool": { + "name": "swh-deposit", + "version": "0.2.0", + "configuration": {"sword_version": "2"}, + }, + "metadata": json.dumps(extrinsic_metadata), + "provider": { + "metadata": {}, + "provider_url": "https://hal.archives-ouvertes.fr/", + "provider_name": "hal", + "provider_type": "deposit_client", + }, + }, + }, + "when": "2020-10-09T13:38:25.888646+00:00", + "provider": "https://deposit.softwareheritage.org/1/private/1037/meta/", + }, + "original_artifact": original_artifacts, + }, + } + + swhid = ( + "swh:1:dir:8bfdf74037ae1c51335995891c6226e0f85e46e2" + ";origin=https://hal.archives-ouvertes.fr/hal-02960679" + ";visit=swh:1:snp:bc4a2ddf84dd0cc13d74e1970a1471c2574ed6aa" + ";anchor=swh:1:rev:4a9d637ba507a2b93365250428e6e3f021f194d0" + ";path=/" + ) + deposit_rows = [ + { + "deposit.id": 1037, + "deposit.external_id": "hal-02960679", + "deposit.swhid_context": swhid, + "deposit.status": "done", + "deposit_request.metadata": None, + "deposit_request.date": datetime.datetime( + 2020, 10, 9, 13, 38, 8, 269611, tzinfo=datetime.timezone.utc, + ), + "deposit_client.provider_url": "https://hal.archives-ouvertes.fr/", + "deposit_collection.name": "hal", + "auth_user.username": "hal", + }, + { + "deposit.id": 1037, + "deposit.external_id": "hal-02960679", + "deposit.swhid_context": swhid, + "deposit.status": "done", + "deposit_request.metadata": extrinsic_metadata, + "deposit_request.date": datetime.datetime( + 2020, 10, 9, 13, 38, 7, 394544, tzinfo=datetime.timezone.utc, + ), + "deposit_client.provider_url": "https://hal.archives-ouvertes.fr/", + "deposit_collection.name": "hal", + "auth_user.username": "hal", + }, + ] + + origin_url = "https://hal.archives-ouvertes.fr/hal-02960679" + + storage = Mock() + + def origin_get(urls): + assert urls == [origin_url] + return [Origin(url=origin_url)] + + storage.origin_get.side_effect = origin_get + deposit_cur = get_mock_deposit_cur(deposit_rows) + handle_row(copy.deepcopy(row), storage, deposit_cur, dry_run=False) + + deposit_cur.execute.assert_called_once() + deposit_cur.__iter__.assert_called_once() + + assert storage.method_calls == [ + call.origin_get([origin_url]), + call.raw_extrinsic_metadata_add( + [ + RawExtrinsicMetadata( + type=MetadataTargetType.DIRECTORY, + target=DIRECTORY_SWHID, discovery_date=datetime.datetime( - 2020, 6, 26, 13, 50, 22, 640625, tzinfo=datetime.timezone.utc + 2020, 10, 9, 13, 38, 7, 394544, tzinfo=datetime.timezone.utc + ), + authority=HAL_AUTHORITY, + fetcher=FETCHER, + format="sword-v2-atom-codemeta-v2-in-json", + metadata=json.dumps(extrinsic_metadata).encode(), + origin=origin_url, + revision=parse_swhid( + "swh:1:rev:4a9d637ba507a2b93365250428e6e3f021f194d0" + ), + ), + ] + ), + call.raw_extrinsic_metadata_add( + [ + RawExtrinsicMetadata( + type=MetadataTargetType.DIRECTORY, + target=DIRECTORY_SWHID, + discovery_date=datetime.datetime( + 2020, 10, 9, 13, 38, 25, 888646, tzinfo=datetime.timezone.utc ), authority=SWH_AUTHORITY, fetcher=FETCHER, format="original-artifacts-json", metadata=json.dumps(original_artifacts).encode(), origin=origin_url, + revision=parse_swhid( + "swh:1:rev:4a9d637ba507a2b93365250428e6e3f021f194d0" + ), ), ] ), ] def test_deposit_3_and_wrong_external_id_in_metadata(): extrinsic_metadata = { "title": "VTune Perf tool", "@xmlns": "http://www.w3.org/2005/Atom", "client": "swh", "codemeta:url": "https://software.intel.com/en-us/vtune", "@xmlns:codemeta": "https://doi.org/10.5063/SCHEMA/CODEMETA-2.0", "codemeta:author": { "codemeta:name": "VTune developer", "codemeta:jobTitle": "Software Engineer", }, "external_identifier": "vtune-perf-tool", "codemeta:dateCreated": "2019-05-14", "codemeta:description": "Modified version of Linux Perf tool which is used by Intel VTune Amplifier", } source_original_artifacts = [ { "name": "archive.zip", "sha1": "07251dbb1d904d143fd7da9935701f17670d4d9b", "length": 4350528, "sha256": "1f7d111ac79e468002f3edf4b7b2487538d41f6bea362d49b2eb08a537efafb6", "sha1_git": "e2d894efcaad4ff36f09eda3b3c0096416b03429", "blake2s256": "e2c08b82efbc361fbb2d28aa8352668cd71217f165f63de16b61ed61ace7509d", "archive_type": "zip", } ] dest_original_artifacts = [ { "length": 4350528, "archive_type": "zip", "filename": "archive.zip", "checksums": { "sha1": "07251dbb1d904d143fd7da9935701f17670d4d9b", "sha256": "1f7d111ac79e468002f3edf4b7b2487538d41f6bea362d49b2eb08a537efafb6", "sha1_git": "e2d894efcaad4ff36f09eda3b3c0096416b03429", "blake2s256": "e2c08b82efbc361fbb2d28aa8352668cd71217f165f63de16b61ed61ace7509d", }, } ] row = { "id": b"\t5`S\xc4\x9a\xd0\xf9\xe6.Q\xc2\x9d>a|y\x11@\xdf", + "directory": DIRECTORY_ID, "date": datetime.datetime(2019, 5, 14, 0, 0, tzinfo=datetime.timezone.utc), "committer_date": datetime.datetime( 2019, 5, 14, 0, 0, tzinfo=datetime.timezone.utc ), "type": "tar", "message": b"intel: Deposit 268 in collection intel", "metadata": { **extrinsic_metadata, "original_artifact": source_original_artifacts, }, } swhid = ( "swh:1:dir:527c8e4a67d391f2bf1bbc86dd94af5d5cfc8ef7" ";origin=https://software.intel.com/f80482de-90a8-4c32-bce4-6f6918d492ff" ";visit=swh:1:snp:49d60943d9c061da1aba6266a811412f9db8de2e" ";anchor=swh:1:rev:09356053c49ad0f9e62e51c29d3e617c791140df" ";path=/" ) deposit_rows = [ { "deposit.id": 268, "deposit.external_id": "f80482de-90a8-4c32-bce4-6f6918d492ff", "deposit.swhid_context": swhid, "deposit.status": "success", "deposit_request.metadata": extrinsic_metadata, "deposit_request.date": datetime.datetime( 2019, 5, 14, 7, 49, 36, 775072, tzinfo=datetime.timezone.utc ), "deposit_client.provider_url": "https://software.intel.com", "deposit_collection.name": "intel", "auth_user.username": "intel", }, { "deposit.id": 268, "deposit.external_id": "f80482de-90a8-4c32-bce4-6f6918d492ff", "deposit.swhid_context": swhid, "deposit.status": "success", "deposit_request.metadata": None, "deposit_request.date": datetime.datetime( 2019, 5, 14, 7, 49, 36, 477061, tzinfo=datetime.timezone.utc ), "deposit_client.provider_url": "https://software.intel.com", "deposit_collection.name": "intel", "auth_user.username": "intel", }, { "deposit.id": 268, "deposit.external_id": "f80482de-90a8-4c32-bce4-6f6918d492ff", "deposit.swhid_context": swhid, "deposit.status": "success", "deposit_request.metadata": extrinsic_metadata, "deposit_request.date": datetime.datetime( 2019, 5, 14, 7, 28, 33, 210100, tzinfo=datetime.timezone.utc ), "deposit_client.provider_url": "https://software.intel.com", "deposit_collection.name": "intel", "auth_user.username": "intel", }, { "deposit.id": 268, "deposit.external_id": "f80482de-90a8-4c32-bce4-6f6918d492ff", "deposit.swhid_context": swhid, "deposit.status": "success", "deposit_request.metadata": None, "deposit_request.date": datetime.datetime( 2019, 5, 14, 7, 28, 33, 41454, tzinfo=datetime.timezone.utc ), "deposit_client.provider_url": "https://software.intel.com", "deposit_collection.name": "intel", "auth_user.username": "intel", }, ] origin_url = "https://software.intel.com/f80482de-90a8-4c32-bce4-6f6918d492ff" storage = Mock() def origin_get(urls): assert urls == [origin_url] return [Origin(url=origin_url)] storage.origin_get.side_effect = origin_get deposit_cur = get_mock_deposit_cur(deposit_rows) handle_row(copy.deepcopy(row), storage, deposit_cur, dry_run=False) deposit_cur.execute.assert_called_once() deposit_cur.__iter__.assert_called_once() assert storage.method_calls == [ call.origin_get([origin_url]), call.raw_extrinsic_metadata_add( [ RawExtrinsicMetadata( - type=MetadataTargetType.REVISION, - id=parse_swhid( - "swh:1:rev:09356053c49ad0f9e62e51c29d3e617c791140df" - ), + type=MetadataTargetType.DIRECTORY, + target=DIRECTORY_SWHID, discovery_date=datetime.datetime( 2019, 5, 14, 7, 49, 36, 775072, tzinfo=datetime.timezone.utc ), authority=INTEL_AUTHORITY, fetcher=FETCHER, format="sword-v2-atom-codemeta-v2-in-json", metadata=json.dumps(extrinsic_metadata).encode(), origin=origin_url, + revision=parse_swhid( + "swh:1:rev:09356053c49ad0f9e62e51c29d3e617c791140df" + ), ), ] ), call.raw_extrinsic_metadata_add( [ RawExtrinsicMetadata( - type=MetadataTargetType.REVISION, - id=parse_swhid( - "swh:1:rev:09356053c49ad0f9e62e51c29d3e617c791140df" - ), + type=MetadataTargetType.DIRECTORY, + target=DIRECTORY_SWHID, discovery_date=datetime.datetime( 2019, 5, 14, 7, 28, 33, 210100, tzinfo=datetime.timezone.utc ), authority=INTEL_AUTHORITY, fetcher=FETCHER, format="sword-v2-atom-codemeta-v2-in-json", metadata=json.dumps(extrinsic_metadata).encode(), origin=origin_url, + revision=parse_swhid( + "swh:1:rev:09356053c49ad0f9e62e51c29d3e617c791140df" + ), ), ] ), call.raw_extrinsic_metadata_add( [ RawExtrinsicMetadata( - type=MetadataTargetType.REVISION, - id=parse_swhid( - "swh:1:rev:09356053c49ad0f9e62e51c29d3e617c791140df" - ), + type=MetadataTargetType.DIRECTORY, + target=DIRECTORY_SWHID, discovery_date=datetime.datetime( 2019, 5, 14, 7, 49, 36, 775072, tzinfo=datetime.timezone.utc ), authority=SWH_AUTHORITY, fetcher=FETCHER, format="original-artifacts-json", metadata=json.dumps(dest_original_artifacts).encode(), origin=origin_url, + revision=parse_swhid( + "swh:1:rev:09356053c49ad0f9e62e51c29d3e617c791140df" + ), ), ] ), ] def test_deposit_3_and_no_swhid(): extrinsic_metadata = { "id": "hal-02337300", "@xmlns": "http://www.w3.org/2005/Atom", "author": {"name": "HAL", "email": "hal@ccsd.cnrs.fr"}, "client": "hal", "codemeta:url": "https://hal.archives-ouvertes.fr/hal-02337300", "codemeta:name": "R package SMM, Simulation and Estimation of Multi-State Discrete-Time Semi-Markov and Markov Models", "@xmlns:codemeta": "https://doi.org/10.5063/SCHEMA/CODEMETA-2.0", "codemeta:author": [ # ... ], # ... } original_artifacts = [ # ... ] row = { "id": b"\x91\xe5\xca\x8b'K\xf1\xa8cFd2\xd7Q\xf7A\xbc\x94\xba&", + "directory": DIRECTORY_ID, "date": datetime.datetime(2017, 1, 1, 0, 0, tzinfo=datetime.timezone.utc), "committer_date": datetime.datetime( 2019, 11, 6, 14, 47, 30, tzinfo=datetime.timezone.utc ), "type": "tar", "message": b"hal: Deposit 342 in collection hal", "metadata": {**extrinsic_metadata, "original_artifact": original_artifacts,}, } storage = Mock() deposit_cur = None handle_row(copy.deepcopy(row), storage, deposit_cur, dry_run=False) assert storage.method_calls == [] def test_deposit_3_and_unknown_deposit(): extrinsic_metadata = { "title": "Je suis GPL", "@xmlns": "http://www.w3.org/2005/Atom", "client": "swh", "codemeta:url": "https://forge.softwareheritage.org/source/jesuisgpl/", "@xmlns:codemeta": "https://doi.org/10.5063/SCHEMA/CODEMETA-2.0", "codemeta:author": { "codemeta:name": "Stefano Zacchiroli", "codemeta:jobTitle": "Maintainer", }, # ... } row = { "id": b"\x8e\x9c\xee\x14\xa6\xad9\xbc\xa44pw\xb8\x7f\xb5\xbb\xd8\x95;\xb1", + "directory": DIRECTORY_ID, "date": datetime.datetime( 2018, 7, 23, 12, 25, 45, 907132, tzinfo=datetime.timezone.utc ), "committer_date": datetime.datetime( 2018, 7, 23, 12, 25, 45, 907132, tzinfo=datetime.timezone.utc ), "type": "tar", "message": b"swh: Deposit 159 in collection swh", "metadata": extrinsic_metadata, } origin_url = "https://software.intel.com/f80482de-90a8-4c32-bce4-6f6918d492ff" storage = Mock() deposit_cur = None handle_row(copy.deepcopy(row), storage, deposit_cur, dry_run=False) assert storage.method_calls == [] def test_deposit_4_without_xmlns(): extrinsic_metadata = { "{http://www.w3.org/2005/Atom}id": "hal-01243573", "{http://www.w3.org/2005/Atom}author": { "{http://www.w3.org/2005/Atom}name": "HAL", "{http://www.w3.org/2005/Atom}email": "hal@ccsd.cnrs.fr", }, "{http://www.w3.org/2005/Atom}client": "hal", "{http://www.w3.org/2005/Atom}external_identifier": "hal-01243573", "{https://doi.org/10.5063/SCHEMA/CODEMETA-2.0}url": "https://hal-test.archives-ouvertes.fr/hal-01243573", "{https://doi.org/10.5063/SCHEMA/CODEMETA-2.0}name": "The assignment problem", "{https://doi.org/10.5063/SCHEMA/CODEMETA-2.0}author": { "{https://doi.org/10.5063/SCHEMA/CODEMETA-2.0}name": "Morane Gruenpeter" }, # ... } row = { "id": b"\x03\x98\x7f\x05n\xafE\x96\xcd \xd7\xb2\xee\x01\xc9\xb8L\xed\xdf\xa8", + "directory": DIRECTORY_ID, "date": datetime.datetime( 2018, 1, 17, 12, 49, 30, 902891, tzinfo=datetime.timezone.utc ), "committer_date": datetime.datetime( 2018, 1, 17, 12, 49, 30, 902891, tzinfo=datetime.timezone.utc ), "type": "tar", "message": b": Deposit 79 in collection hal", "metadata": extrinsic_metadata, } swhid = ( "swh:1:dir:e04b2a7b8a8838da0693e9fd992a10d6fd211b50" ";origin=https://hal.archives-ouvertes.fr/hal-01243573" ";visit=swh:1:snp:c31851534c86676a040fb10f438728c90f1c9d55" ";anchor=swh:1:rev:43549ebbe70c9cdf0be1647e6319392eaa06f3a3" ";path=/" ) deposit_rows = [ { "deposit.id": 79, "deposit.external_id": "hal-01243573", "deposit.swhid_context": swhid, "deposit.status": "success", "deposit_request.metadata": None, "deposit_request.date": datetime.datetime( 2018, 1, 17, 12, 49, 31, 208347, tzinfo=datetime.timezone.utc ), "deposit_client.provider_url": "https://hal.archives-ouvertes.fr/", "deposit_collection.name": "hal", "auth_user.username": "hal", }, { "deposit.id": 79, "deposit.external_id": "hal-01243573", "deposit.swhid_context": swhid, "deposit.status": "success", "deposit_request.metadata": extrinsic_metadata, "deposit_request.date": datetime.datetime( 2018, 1, 17, 12, 49, 30, 645576, tzinfo=datetime.timezone.utc ), "deposit_client.provider_url": "https://hal.archives-ouvertes.fr/", "deposit_collection.name": "hal", "auth_user.username": "hal", }, ] origin_url = "https://hal.archives-ouvertes.fr/hal-01243573" storage = Mock() def origin_get(urls): assert urls == [origin_url] return [Origin(url=origin_url)] storage.origin_get.side_effect = origin_get deposit_cur = get_mock_deposit_cur(deposit_rows) handle_row(copy.deepcopy(row), storage, deposit_cur, dry_run=False) deposit_cur.execute.assert_called_once() deposit_cur.__iter__.assert_called_once() assert storage.method_calls == [ call.origin_get([origin_url]), call.raw_extrinsic_metadata_add( [ RawExtrinsicMetadata( - type=MetadataTargetType.REVISION, - id=parse_swhid( - "swh:1:rev:03987f056eaf4596cd20d7b2ee01c9b84ceddfa8" - ), + type=MetadataTargetType.DIRECTORY, + target=DIRECTORY_SWHID, discovery_date=datetime.datetime( 2018, 1, 17, 12, 49, 30, 645576, tzinfo=datetime.timezone.utc ), authority=HAL_AUTHORITY, fetcher=FETCHER, format="sword-v2-atom-codemeta-v2-in-json-with-expanded-namespaces", metadata=json.dumps(extrinsic_metadata).encode(), origin=origin_url, + revision=parse_swhid( + "swh:1:rev:03987f056eaf4596cd20d7b2ee01c9b84ceddfa8" + ), ), ] ), # note: no original artifacts ] def test_deposit_4_wrong_origin(): extrinsic_metadata = { "{http://www.w3.org/2005/Atom}id": "hal-01588781", "{http://www.w3.org/2005/Atom}author": { "{http://www.w3.org/2005/Atom}name": "HAL", "{http://www.w3.org/2005/Atom}email": "hal@ccsd.cnrs.fr", }, "{http://www.w3.org/2005/Atom}client": "hal", "{http://www.w3.org/2005/Atom}external_identifier": "hal-01588781", "{https://doi.org/10.5063/SCHEMA/CODEMETA-2.0}url": "https://inria.halpreprod.archives-ouvertes.fr/hal-01588781", "{https://doi.org/10.5063/SCHEMA/CODEMETA-2.0}name": "The assignment problem ", "{https://doi.org/10.5063/SCHEMA/CODEMETA-2.0}author": { "{https://doi.org/10.5063/SCHEMA/CODEMETA-2.0}name": "Morane Gruenpeter", "{https://doi.org/10.5063/SCHEMA/CODEMETA-2.0}affiliation": "Initiative pour la Recherche et l'Innovation sur le Logiciel Libre", }, # ... } row = { "id": b"-{\xcec\x1f\xc7\x91\x08\x03\x11\xeb\x83\\GB\x8eXjn\xa4", + "directory": DIRECTORY_ID, "date": datetime.datetime( 2018, 1, 10, 13, 14, 51, 77033, tzinfo=datetime.timezone.utc ), "committer_date": datetime.datetime( 2018, 1, 10, 13, 14, 51, 77033, tzinfo=datetime.timezone.utc ), "type": "tar", "message": b": Deposit 75 in collection hal", "metadata": extrinsic_metadata, } swhid = ( "swh:1:dir:d8971c651fe256942aa4499a3ccdbaa305d3bade" ";origin=https://inria.halpreprod.archives-ouvertes.fr/hal-01588781" ";visit=swh:1:snp:7c70cc8ea5b79e376605fd6e9b3b04d98861ffc0" ";anchor=swh:1:rev:2d7bce631fc791080311eb835c47428e586a6ea4" ";path=/" ) deposit_rows = [ { "deposit.id": 75, "deposit.external_id": "hal-01588781", "deposit.swhid_context": swhid, "deposit.status": "success", "deposit_request.metadata": None, "deposit_request.date": datetime.datetime( 2018, 1, 10, 13, 14, 51, 523963, tzinfo=datetime.timezone.utc ), "deposit_client.provider_url": "https://hal.archives-ouvertes.fr/", "deposit_collection.name": "hal", "auth_user.username": "hal", }, { "deposit.id": 75, "deposit.external_id": "hal-01588781", "deposit.swhid_context": swhid, "deposit.status": "success", "deposit_request.metadata": extrinsic_metadata, "deposit_request.date": datetime.datetime( 2018, 1, 10, 13, 14, 50, 555143, tzinfo=datetime.timezone.utc ), "deposit_client.provider_url": "https://hal.archives-ouvertes.fr/", "deposit_collection.name": "hal", "auth_user.username": "hal", }, ] origin_url = "https://inria.halpreprod.archives-ouvertes.fr/hal-01588781" storage = Mock() def origin_get(urls): assert urls == [origin_url] return [Origin(url=origin_url)] storage.origin_get.side_effect = origin_get deposit_cur = get_mock_deposit_cur(deposit_rows) handle_row(copy.deepcopy(row), storage, deposit_cur, dry_run=False) deposit_cur.execute.assert_called_once() deposit_cur.__iter__.assert_called_once() assert storage.method_calls == [ call.origin_get([origin_url]), call.raw_extrinsic_metadata_add( [ RawExtrinsicMetadata( - type=MetadataTargetType.REVISION, - id=parse_swhid( - "swh:1:rev:2d7bce631fc791080311eb835c47428e586a6ea4" - ), + type=MetadataTargetType.DIRECTORY, + target=DIRECTORY_SWHID, discovery_date=datetime.datetime( 2018, 1, 10, 13, 14, 50, 555143, tzinfo=datetime.timezone.utc ), authority=HAL_AUTHORITY, fetcher=FETCHER, format="sword-v2-atom-codemeta-v2-in-json-with-expanded-namespaces", metadata=json.dumps(extrinsic_metadata).encode(), origin=origin_url, + revision=parse_swhid( + "swh:1:rev:2d7bce631fc791080311eb835c47428e586a6ea4" + ), ), ] ), # note: no original artifacts ] def test_deposit_missing_metadata_in_revision(): extrinsic_metadata = { "id": "hal-01243573", "@xmlns": "http://www.w3.org/2005/Atom", "author": {"name": "HAL", "email": "hal@ccsd.cnrs.fr"}, "client": "hal", "committer": "Administrateur Du Ccsd", "codemeta:url": "https://hal-test.archives-ouvertes.fr/hal-01243573", "codemeta:name": "The assignment problem", "@xmlns:codemeta": "https://doi.org/10.5063/SCHEMA/CODEMETA-2.0", "codemeta:author": {"codemeta:name": "Morane Gruenpeter"}, "codemeta:version": "1", "codemeta:identifier": {"#text": "10.5281/zenodo.438684", "@name": "doi",}, "external_identifier": "hal-01243573", "codemeta:dateCreated": "2017-11-16T14:54:23+01:00", } source_original_artifacts = [ { "name": "archive.zip", "sha1": "e8e46324970cd5af7f98c5a86f33f47fa4a41b4a", "length": 118650, "sha256": "fec81b63d666c43524f966bbd3263da5bee55051d2b48c1659cca5f56fd953e5", "sha1_git": "9da2bbd08bec590b36ede2ed43d74cd510b10a79", "blake2s256": "5d0973ba3644cc2bcfdb41ff1891744337d6aa9547a7e59fe466f684b027f295", "archive_type": "zip", } ] dest_original_artifacts = [ { "length": 118650, "archive_type": "zip", "filename": "archive.zip", "checksums": { "sha1": "e8e46324970cd5af7f98c5a86f33f47fa4a41b4a", "sha256": "fec81b63d666c43524f966bbd3263da5bee55051d2b48c1659cca5f56fd953e5", "sha1_git": "9da2bbd08bec590b36ede2ed43d74cd510b10a79", "blake2s256": "5d0973ba3644cc2bcfdb41ff1891744337d6aa9547a7e59fe466f684b027f295", }, } ] row = { "id": b"\x03@v\xf3\xf4\x1e\xe1 N\xb9\xf6@\x82\xcb\xe6\xe9P\xd7\xbb\x8a", + "directory": DIRECTORY_ID, "date": datetime.datetime( 2019, 2, 25, 15, 49, 16, 594536, tzinfo=datetime.timezone.utc ), "committer_date": datetime.datetime( 2019, 2, 25, 15, 49, 16, 594536, tzinfo=datetime.timezone.utc ), "type": "tar", "message": b"hal: Deposit 229 in collection hal", "metadata": {"original_artifact": source_original_artifacts}, } swhid = ( "swh:1:dir:3d65b6f065118cb856272829b459f0dfa55549aa" ";origin=https://hal-test.archives-ouvertes.fr/hal-01243573" ";visit=swh:1:snp:322c54ff4023d3216a994bc9ff9ee524ed80ee1f" ";anchor=swh:1:rev:034076f3f41ee1204eb9f64082cbe6e950d7bb8a" ";path=/" ) deposit_rows = [ { "deposit.id": 229, "deposit.external_id": "hal-01243573", "deposit.swhid_context": swhid, "deposit.status": "success", "deposit_request.metadata": None, "deposit_request.date": datetime.datetime( 2019, 2, 25, 15, 54, 30, 102072, tzinfo=datetime.timezone.utc ), "deposit_client.provider_url": "https://hal.archives-ouvertes.fr/", "deposit_collection.name": "hal", "auth_user.username": "hal", }, { "deposit.id": 229, "deposit.external_id": "hal-01243573", "deposit.swhid_context": swhid, "deposit.status": "success", "deposit_request.metadata": extrinsic_metadata, "deposit_request.date": datetime.datetime( 2019, 2, 25, 15, 49, 12, 302745, tzinfo=datetime.timezone.utc ), "deposit_client.provider_url": "https://hal.archives-ouvertes.fr/", "deposit_collection.name": "hal", "auth_user.username": "hal", }, ] origin_url = "https://hal.archives-ouvertes.fr/hal-01243573" # /!\ not https://hal-test.archives-ouvertes.fr/hal-01243573 # do not trust the metadata! storage = Mock() def origin_get(urls): assert urls == [origin_url] return [Origin(url=origin_url)] storage.origin_get.side_effect = origin_get deposit_cur = get_mock_deposit_cur(deposit_rows) handle_row(copy.deepcopy(row), storage, deposit_cur, dry_run=False) deposit_cur.execute.assert_called_once() deposit_cur.__iter__.assert_called_once() assert storage.method_calls == [ call.origin_get([origin_url]), call.raw_extrinsic_metadata_add( [ RawExtrinsicMetadata( - type=MetadataTargetType.REVISION, - id=parse_swhid( - "swh:1:rev:034076f3f41ee1204eb9f64082cbe6e950d7bb8a" - ), + type=MetadataTargetType.DIRECTORY, + target=DIRECTORY_SWHID, discovery_date=datetime.datetime( 2019, 2, 25, 15, 49, 12, 302745, tzinfo=datetime.timezone.utc ), authority=HAL_AUTHORITY, fetcher=FETCHER, format="sword-v2-atom-codemeta-v2-in-json", metadata=json.dumps(extrinsic_metadata).encode(), origin=origin_url, + revision=parse_swhid( + "swh:1:rev:034076f3f41ee1204eb9f64082cbe6e950d7bb8a" + ), ), ] ), call.raw_extrinsic_metadata_add( [ RawExtrinsicMetadata( - type=MetadataTargetType.REVISION, - id=parse_swhid( - "swh:1:rev:034076f3f41ee1204eb9f64082cbe6e950d7bb8a" - ), + type=MetadataTargetType.DIRECTORY, + target=DIRECTORY_SWHID, discovery_date=datetime.datetime( 2019, 2, 25, 15, 54, 30, 102072, tzinfo=datetime.timezone.utc ), authority=SWH_AUTHORITY, fetcher=FETCHER, format="original-artifacts-json", metadata=json.dumps(dest_original_artifacts).encode(), origin=origin_url, + revision=parse_swhid( + "swh:1:rev:034076f3f41ee1204eb9f64082cbe6e950d7bb8a" + ), ), ] ), ] diff --git a/swh/storage/tests/migrate_extrinsic_metadata/test_gnu.py b/swh/storage/tests/migrate_extrinsic_metadata/test_gnu.py index 2e0f5d52..7e7a4dcd 100644 --- a/swh/storage/tests/migrate_extrinsic_metadata/test_gnu.py +++ b/swh/storage/tests/migrate_extrinsic_metadata/test_gnu.py @@ -1,106 +1,111 @@ # Copyright (C) 2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information # flake8: noqa # because of long lines import copy import datetime import json from unittest.mock import Mock, call from swh.model.identifiers import parse_swhid from swh.model.model import ( MetadataAuthority, MetadataAuthorityType, MetadataFetcher, MetadataTargetType, Origin, RawExtrinsicMetadata, ) from swh.storage.migrate_extrinsic_metadata import cran_package_from_url, handle_row FETCHER = MetadataFetcher( name="migrate-extrinsic-metadata-from-revisions", version="0.0.1", ) SWH_AUTHORITY = MetadataAuthority( type=MetadataAuthorityType.REGISTRY, url="https://softwareheritage.org/", metadata={}, ) +DIRECTORY_ID = b"a" * 20 +DIRECTORY_SWHID = parse_swhid("swh:1:dir:" + DIRECTORY_ID.hex()) + def test_gnu(): original_artifacts = [ { "length": 842501, "filename": "gperf-3.0.1.tar.gz", "checksums": { "sha1": "c4453ee492032b369006ee464f4dd4e2c0c0e650", "sha256": "5be283ef62e1bd26abdaaf88b416dbea4b14c360b09befcda2f055656dc43f87", "sha1_git": "bf1d5bb57d571101dd7b6acab2b78ae11bb861de", "blake2s256": "661f84afeb1e0b914defe2b249d424af1dfe380a96016b3282ae758c70e19a70", }, } ] row = { "id": b"\x00\x1cqE\x8e@[%\xba\xcc\xc8\x0b\x99\xf6cM\xff\x9d+\x18", + "directory": DIRECTORY_ID, "date": datetime.datetime(2003, 6, 13, 0, 11, tzinfo=datetime.timezone.utc), "committer_date": datetime.datetime( 2003, 6, 13, 0, 11, tzinfo=datetime.timezone.utc ), "type": "tar", "message": b"swh-loader-package: synthetic revision message", "metadata": { "extrinsic": { "raw": { "url": "https://ftp.gnu.org/gnu/gperf/gperf-3.0.1.tar.gz", "time": "2003-06-13T00:11:00+00:00", "length": 842501, "version": "3.0.1", "filename": "gperf-3.0.1.tar.gz", }, "when": "2019-11-27T11:17:38.318997+00:00", "provider": "https://ftp.gnu.org/gnu/gperf/", }, "intrinsic": {}, "original_artifact": original_artifacts, }, } origin_url = "https://ftp.gnu.org/gnu/gperf/" storage = Mock() def origin_get(urls): assert urls == [origin_url] return [Origin(url=origin_url)] storage.origin_get.side_effect = origin_get deposit_cur = None handle_row(row, storage, deposit_cur, dry_run=False) assert storage.method_calls == [ call.origin_get([origin_url]), call.raw_extrinsic_metadata_add( [ RawExtrinsicMetadata( - type=MetadataTargetType.REVISION, - id=parse_swhid( - "swh:1:rev:001c71458e405b25baccc80b99f6634dff9d2b18" - ), + type=MetadataTargetType.DIRECTORY, + target=DIRECTORY_SWHID, discovery_date=datetime.datetime( 2019, 11, 27, 11, 17, 38, 318997, tzinfo=datetime.timezone.utc ), authority=SWH_AUTHORITY, fetcher=FETCHER, format="original-artifacts-json", metadata=json.dumps(original_artifacts).encode(), origin=origin_url, + revision=parse_swhid( + "swh:1:rev:001c71458e405b25baccc80b99f6634dff9d2b18" + ), ), ] ), ] diff --git a/swh/storage/tests/migrate_extrinsic_metadata/test_nixguix.py b/swh/storage/tests/migrate_extrinsic_metadata/test_nixguix.py index b1bcdcac..03d5a9f0 100644 --- a/swh/storage/tests/migrate_extrinsic_metadata/test_nixguix.py +++ b/swh/storage/tests/migrate_extrinsic_metadata/test_nixguix.py @@ -1,122 +1,128 @@ # Copyright (C) 2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information # flake8: noqa # because of long lines import copy import datetime import json from unittest.mock import Mock, call from swh.model.identifiers import parse_swhid from swh.model.model import ( MetadataAuthority, MetadataAuthorityType, MetadataFetcher, MetadataTargetType, Origin, RawExtrinsicMetadata, ) from swh.storage.migrate_extrinsic_metadata import cran_package_from_url, handle_row FETCHER = MetadataFetcher( name="migrate-extrinsic-metadata-from-revisions", version="0.0.1", ) SWH_AUTHORITY = MetadataAuthority( type=MetadataAuthorityType.REGISTRY, url="https://softwareheritage.org/", metadata={}, ) NIX_UNSTABLE_AUTHORITY = MetadataAuthority( type=MetadataAuthorityType.FORGE, url="https://nix-community.github.io/nixpkgs-swh/sources-unstable.json", metadata={}, ) +DIRECTORY_ID = b"a" * 20 +DIRECTORY_SWHID = parse_swhid("swh:1:dir:" + DIRECTORY_ID.hex()) + def test_nixguix(): extrinsic_metadata = { "url": "https://files.pythonhosted.org/packages/source/a/alerta/alerta-7.4.5.tar.gz", "integrity": "sha256-km8RAaG1ep+tYR8eHVr3UWk+/MNEqdsBr1Di/g02LYQ=", } original_artifacts = [ { "length": 34903, "filename": "alerta-7.4.5.tar.gz", "checksums": { "sha1": "66db4398b664de272fd5aa6610caa776b5e64651", "sha256": "926f1101a1b57a9fad611f1e1d5af751693efcc344a9db01af50e2fe0d362d84", }, } ] row = { "id": b"\x00\x01\xbaM\xd0S\x94\x85\x02\x11\xd7\xb3\x85M\x99\x13\xd2:\xe3y", + "directory": DIRECTORY_ID, "date": None, "committer_date": None, "type": "tar", "message": b"", "metadata": { "extrinsic": { "raw": extrinsic_metadata, "when": "2020-06-03T11:25:05.259341+00:00", "provider": "https://nix-community.github.io/nixpkgs-swh/sources-unstable.json", }, "original_artifact": original_artifacts, }, } origin_url = "https://nix-community.github.io/nixpkgs-swh/sources-unstable.json" storage = Mock() def origin_get(urls): assert urls == [origin_url] return [Origin(url=origin_url)] storage.origin_get.side_effect = origin_get deposit_cur = None handle_row(row, storage, deposit_cur, dry_run=False) assert storage.method_calls == [ call.origin_get([origin_url]), call.raw_extrinsic_metadata_add( [ RawExtrinsicMetadata( - type=MetadataTargetType.REVISION, - id=parse_swhid( - "swh:1:rev:0001ba4dd05394850211d7b3854d9913d23ae379" - ), + type=MetadataTargetType.DIRECTORY, + target=DIRECTORY_SWHID, discovery_date=datetime.datetime( 2020, 6, 3, 11, 25, 5, 259341, tzinfo=datetime.timezone.utc ), authority=NIX_UNSTABLE_AUTHORITY, fetcher=FETCHER, format="nixguix-sources-json", metadata=json.dumps(extrinsic_metadata).encode(), origin=origin_url, + revision=parse_swhid( + "swh:1:rev:0001ba4dd05394850211d7b3854d9913d23ae379" + ), ), ] ), call.raw_extrinsic_metadata_add( [ RawExtrinsicMetadata( - type=MetadataTargetType.REVISION, - id=parse_swhid( - "swh:1:rev:0001ba4dd05394850211d7b3854d9913d23ae379" - ), + type=MetadataTargetType.DIRECTORY, + target=DIRECTORY_SWHID, discovery_date=datetime.datetime( 2020, 6, 3, 11, 25, 5, 259341, tzinfo=datetime.timezone.utc ), authority=SWH_AUTHORITY, fetcher=FETCHER, format="original-artifacts-json", metadata=json.dumps(original_artifacts).encode(), origin=origin_url, + revision=parse_swhid( + "swh:1:rev:0001ba4dd05394850211d7b3854d9913d23ae379" + ), ), ] ), ] diff --git a/swh/storage/tests/migrate_extrinsic_metadata/test_npm.py b/swh/storage/tests/migrate_extrinsic_metadata/test_npm.py index 4f8890ba..0247d294 100644 --- a/swh/storage/tests/migrate_extrinsic_metadata/test_npm.py +++ b/swh/storage/tests/migrate_extrinsic_metadata/test_npm.py @@ -1,374 +1,386 @@ # Copyright (C) 2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information # flake8: noqa # because of long lines import copy import datetime import json from unittest.mock import Mock, call from swh.model.identifiers import parse_swhid from swh.model.model import ( MetadataAuthority, MetadataAuthorityType, MetadataFetcher, MetadataTargetType, Origin, RawExtrinsicMetadata, ) from swh.storage.migrate_extrinsic_metadata import ( handle_row, npm_package_from_source_url, ) FETCHER = MetadataFetcher( name="migrate-extrinsic-metadata-from-revisions", version="0.0.1", ) NPM_AUTHORITY = MetadataAuthority( type=MetadataAuthorityType.FORGE, url="https://npmjs.com/", metadata={}, ) SWH_AUTHORITY = MetadataAuthority( type=MetadataAuthorityType.REGISTRY, url="https://softwareheritage.org/", metadata={}, ) +DIRECTORY_ID = b"a" * 20 +DIRECTORY_SWHID = parse_swhid("swh:1:dir:" + DIRECTORY_ID.hex()) + def test_npm_package_from_source_url(): package_urls = [ ( "@l3ilkojr/jdinsults", "https://registry.npmjs.org/@l3ilkojr/jdinsults/-/jdinsults-3.0.0.tgz", ), ("simplemaps", "https://registry.npmjs.org/simplemaps/-/simplemaps-0.0.6.tgz"), ( "@piximi/components", "https://registry.npmjs.org/@piximi/components/-/components-0.1.11.tgz", ), ( "@chappa'ai/get-next-rc", "https://registry.npmjs.org/@chappa%27ai/get-next-rc/-/get-next-rc-1.0.0.tgz", ), ] for (package_name, source_url) in package_urls: assert npm_package_from_source_url(source_url) == package_name def test_npm_1(): """Tests loading a revision generated by a new NPM loader that has a provider.""" extrinsic_metadata = { "_id": "@l3ilkojr/jdinsults@3.0.0", "dist": { "shasum": "b7f0d66090e0285f4e95d082d39bcb0c1b8f4ec8", "tarball": "https://registry.npmjs.org/@l3ilkojr/jdinsults/-/jdinsults-3.0.0.tgz", "fileCount": 4, "integrity": "sha512-qpv8Zg51g0l51VjODEooMUGSGanGUuQpzX5msfR7ZzbgTsgPbpDNyTIsQ0wQzI9RzCCUjS84Ii2VhMISEQcEUA==", "unpackedSize": 1583, "npm-signature": "-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.4\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJeUMS5CRA9TVsSAnZWagAAXpgP/0YgNOWN0U/Fz2RGeQhR\nVIKPvfGqZ2UfFxxUXWIc4QHvwyLCNUedCctpVdqnqmGJ9m/hj3K2zbRPD7Tm\n3nPl0HfzE7v3T8TDZfGhzW3c9mWxig+syr+sjo0EKyAgZVJ0mxbjOl4KHt+U\nQEwl/4falBsyYtK/pkCXWmmuC606QmPn/c6ZRD1Fw4vJjT9i5qi1KaBkIf6M\nnFmpOFxTcwxGGltOk3s3TKDtr8CIeWmdm3VkgsP2ErkPKAOcu12AT4/5tkg0\nDU+m1XmJb67rskb4Ncjvic/VutnPkEfNrk1IRXrmjDZBQbHtCJ7hd5ETmb9S\nE5WmMV8cpaGiW7AZvGTmkn5WETwQQU7po914zYiMg9+ozdwc7yC8cpGj/UoF\niKxsc1uxdfwWk/p3dShegEYM7sveloIXYsPaxbd84WRIfnwkWFZV82op96E3\neX+FRkhMfsHlK8OjZsBPXkppaB48jnZdm3GOOzT9YgyphV33j3J9GnNcDMDe\nriyCLV1BNSKDHElCDrvl1cBGg+C5qn/cTYjQdfEPPY2Hl2MgW9s4UV2s+YSx\n0BBd2A3j80wncP+Y7HFeC4Pv0SM0Pdq6xJaf3ELhj6j0rVZeTW1O3E/PFLXK\nnn/DZcsFXgIzjY+eBIMQgAhqyeJve8LeQNnGt3iNW10E2nZMpfc+dn0ESiwV\n2Gw4\r\n=8uqZ\r\n-----END PGP SIGNATURE-----\r\n", }, "name": "@l3ilkojr/jdinsults", "version": "3.0.0", "_npmUser": {"name": "l3ilkojr", "email": "l3ilkojr@example.com"}, "_npmVersion": "6.13.6", "description": "Generates insults", "directories": {}, "maintainers": [{"name": "l3ilkojr", "email": "l3ilkojr@example.com"}], "_nodeVersion": "10.14.0", "_hasShrinkwrap": False, "_npmOperationalInternal": { "tmp": "tmp/jdinsults_3.0.0_1582351545285_0.2614827716102821", "host": "s3://npm-registry-packages", }, } original_artifacts = [ { "length": 1033, "filename": "jdinsults-3.0.0.tgz", "checksums": { "sha1": "b7f0d66090e0285f4e95d082d39bcb0c1b8f4ec8", "sha256": "42f22795ac883b02fded0b2bf3d8a77f6507d40bc67f28eea6b1b73eb59c515f", }, } ] row = { "id": b"\x00\x00\x02\xa4\x9b\xba\x17\xca\x8c\xf3\x7f_=\x16\xaa\xac\xf9S`\xfc", + "directory": DIRECTORY_ID, "date": datetime.datetime(2020, 2, 22, 6, 5, 45, tzinfo=datetime.timezone.utc), "committer_date": datetime.datetime( 2020, 2, 22, 6, 5, 45, tzinfo=datetime.timezone.utc ), "type": "tar", "message": b"3.0.0", "metadata": { "extrinsic": { "raw": extrinsic_metadata, "when": "2020-02-27T01:35:47.965375+00:00", "provider": "https://replicate.npmjs.com/%40l3ilkojr%2Fjdinsults/", }, "intrinsic": { "raw": {"name": "@l3ilkojr/jdinsults", "version": "3.0.0"}, "tool": "package.json", }, "original_artifact": original_artifacts, }, } origin_url = "https://www.npmjs.com/package/@l3ilkojr/jdinsults" storage = Mock() def origin_get(urls): assert urls == [origin_url] return [Origin(url=origin_url)] storage.origin_get.side_effect = origin_get deposit_cur = None handle_row(copy.deepcopy(row), storage, deposit_cur, dry_run=False) assert storage.method_calls == [ call.origin_get([origin_url]), call.raw_extrinsic_metadata_add( [ RawExtrinsicMetadata( - type=MetadataTargetType.REVISION, - id=parse_swhid( - "swh:1:rev:000002a49bba17ca8cf37f5f3d16aaacf95360fc" - ), + type=MetadataTargetType.DIRECTORY, + target=DIRECTORY_SWHID, discovery_date=datetime.datetime( 2020, 2, 27, 1, 35, 47, 965375, tzinfo=datetime.timezone.utc, ), authority=NPM_AUTHORITY, fetcher=FETCHER, format="replicate-npm-package-json", metadata=json.dumps(extrinsic_metadata).encode(), origin=origin_url, + revision=parse_swhid( + "swh:1:rev:000002a49bba17ca8cf37f5f3d16aaacf95360fc" + ), ), ] ), call.raw_extrinsic_metadata_add( [ RawExtrinsicMetadata( - type=MetadataTargetType.REVISION, - id=parse_swhid( - "swh:1:rev:000002a49bba17ca8cf37f5f3d16aaacf95360fc" - ), + type=MetadataTargetType.DIRECTORY, + target=DIRECTORY_SWHID, discovery_date=datetime.datetime( 2020, 2, 27, 1, 35, 47, 965375, tzinfo=datetime.timezone.utc, ), authority=SWH_AUTHORITY, fetcher=FETCHER, format="original-artifacts-json", metadata=json.dumps(original_artifacts).encode(), origin=origin_url, + revision=parse_swhid( + "swh:1:rev:000002a49bba17ca8cf37f5f3d16aaacf95360fc" + ), ), ] ), ] def test_npm_2_unscoped(): """Tests loading a revision generated by an old NPM loader that doesn't have a provider; and the package name is unscoped (ie. doesn't contain a slash).""" extrinsic_metadata = { "bugs": {"url": "https://github.com/niwasawa/simplemaps/issues"}, "name": "simplemaps", "author": "Naoki Iwasawa", "license": "MIT", # ... } package_source = { "url": "https://registry.npmjs.org/simplemaps/-/simplemaps-0.0.6.tgz", "date": "2016-12-23T07:21:29.733Z", "name": "simplemaps", "sha1": "e2b8222930196def764527f5c61048c5b28fe3c4", "sha256": "3ce94927bab5feafea5695d1fa4c2b8131413e53e249b32f9ac2ccff4d865a0b", "version": "0.0.6", "filename": "simplemaps-0.0.6.tgz", "blake2s256": "6769b4009f8162be2e745604b153443d4907a85781d31a724217a3e2d42a7462", } original_artifacts = [ { "filename": "simplemaps-0.0.6.tgz", "checksums": { "sha1": "e2b8222930196def764527f5c61048c5b28fe3c4", "sha256": "3ce94927bab5feafea5695d1fa4c2b8131413e53e249b32f9ac2ccff4d865a0b", "blake2s256": "6769b4009f8162be2e745604b153443d4907a85781d31a724217a3e2d42a7462", }, "url": "https://registry.npmjs.org/simplemaps/-/simplemaps-0.0.6.tgz", } ] row = { "id": b"\x00\x00\x04\xae\xed\t\xee\x08\x9cx\x12d\xc0M%d\xfdX\xfe\xb5", + "directory": DIRECTORY_ID, "date": datetime.datetime( 2016, 12, 23, 7, 21, 29, tzinfo=datetime.timezone.utc ), "committer_date": datetime.datetime( 2016, 12, 23, 7, 21, 29, tzinfo=datetime.timezone.utc ), "type": "tar", "message": b"0.0.6", "metadata": {"package": extrinsic_metadata, "package_source": package_source,}, } origin_url = "https://www.npmjs.com/package/simplemaps" storage = Mock() def origin_get(urls): assert urls == [origin_url] return [Origin(url=origin_url)] storage.origin_get.side_effect = origin_get deposit_cur = None handle_row(copy.deepcopy(row), storage, deposit_cur, dry_run=False) assert storage.method_calls == [ call.origin_get([origin_url]), call.raw_extrinsic_metadata_add( [ RawExtrinsicMetadata( - type=MetadataTargetType.REVISION, - id=parse_swhid( - "swh:1:rev:000004aeed09ee089c781264c04d2564fd58feb5" - ), + type=MetadataTargetType.DIRECTORY, + target=DIRECTORY_SWHID, discovery_date=datetime.datetime( 2016, 12, 23, 7, 21, 29, tzinfo=datetime.timezone.utc, ), authority=NPM_AUTHORITY, fetcher=FETCHER, format="replicate-npm-package-json", metadata=json.dumps(extrinsic_metadata).encode(), origin=origin_url, + revision=parse_swhid( + "swh:1:rev:000004aeed09ee089c781264c04d2564fd58feb5" + ), ), ] ), call.raw_extrinsic_metadata_add( [ RawExtrinsicMetadata( - type=MetadataTargetType.REVISION, - id=parse_swhid( - "swh:1:rev:000004aeed09ee089c781264c04d2564fd58feb5" - ), + type=MetadataTargetType.DIRECTORY, + target=DIRECTORY_SWHID, discovery_date=datetime.datetime( 2016, 12, 23, 7, 21, 29, tzinfo=datetime.timezone.utc, ), authority=SWH_AUTHORITY, fetcher=FETCHER, format="original-artifacts-json", metadata=json.dumps(original_artifacts).encode(), origin=origin_url, + revision=parse_swhid( + "swh:1:rev:000004aeed09ee089c781264c04d2564fd58feb5" + ), ), ] ), ] def test_npm_2_scoped(): """Tests loading a revision generated by an old NPM loader that doesn't have a provider; and the package name is scoped (ie. in the format @org/name).""" extrinsic_metadata = { "bugs": {"url": "https://github.com/piximi/components/issues"}, "name": "@piximi/components", # ... } package_source = { "url": "https://registry.npmjs.org/@piximi/components/-/components-0.1.11.tgz", "date": "2019-06-07T19:56:04.753Z", "name": "@piximi/components", "sha1": "4ab74e563cb61bb5b2022601a5133a2dd19d19ec", "sha256": "69bb980bd6de3277b6bca86fd79c91f1c28db6910c8d03ecd05b32b78a35188f", "version": "0.1.11", "filename": "components-0.1.11.tgz", "blake2s256": "ce33181d5eff25b70ffdd6f1a18acd472a1707ede23cd2adc6af272dfc40dbfd", } original_artifacts = [ { "filename": "components-0.1.11.tgz", "checksums": { "sha1": "4ab74e563cb61bb5b2022601a5133a2dd19d19ec", "sha256": "69bb980bd6de3277b6bca86fd79c91f1c28db6910c8d03ecd05b32b78a35188f", "blake2s256": "ce33181d5eff25b70ffdd6f1a18acd472a1707ede23cd2adc6af272dfc40dbfd", }, "url": "https://registry.npmjs.org/@piximi/components/-/components-0.1.11.tgz", } ] row = { "id": b"\x00\x00 \x19\xc5wXt\xbc\xed\x00zR\x9b\xd3\xb7\x8b\xf6\x04W", + "directory": DIRECTORY_ID, "date": datetime.datetime(2019, 6, 7, 19, 56, 4, tzinfo=datetime.timezone.utc), "committer_date": datetime.datetime( 2019, 6, 7, 19, 56, 4, tzinfo=datetime.timezone.utc ), "type": "tar", "message": b"0.1.11", "metadata": {"package": extrinsic_metadata, "package_source": package_source,}, } origin_url = "https://www.npmjs.com/package/@piximi/components" storage = Mock() def origin_get(urls): assert urls == [origin_url] return [Origin(url=origin_url)] storage.origin_get.side_effect = origin_get deposit_cur = None handle_row(copy.deepcopy(row), storage, deposit_cur, dry_run=False) assert storage.method_calls == [ call.origin_get([origin_url]), call.raw_extrinsic_metadata_add( [ RawExtrinsicMetadata( - type=MetadataTargetType.REVISION, - id=parse_swhid( - "swh:1:rev:00002019c5775874bced007a529bd3b78bf60457" - ), + type=MetadataTargetType.DIRECTORY, + target=DIRECTORY_SWHID, discovery_date=datetime.datetime( 2019, 6, 7, 19, 56, 4, tzinfo=datetime.timezone.utc, ), authority=NPM_AUTHORITY, fetcher=FETCHER, format="replicate-npm-package-json", metadata=json.dumps(extrinsic_metadata).encode(), origin=origin_url, + revision=parse_swhid( + "swh:1:rev:00002019c5775874bced007a529bd3b78bf60457" + ), ), ] ), call.raw_extrinsic_metadata_add( [ RawExtrinsicMetadata( - type=MetadataTargetType.REVISION, - id=parse_swhid( - "swh:1:rev:00002019c5775874bced007a529bd3b78bf60457" - ), + type=MetadataTargetType.DIRECTORY, + target=DIRECTORY_SWHID, discovery_date=datetime.datetime( 2019, 6, 7, 19, 56, 4, tzinfo=datetime.timezone.utc, ), authority=SWH_AUTHORITY, fetcher=FETCHER, format="original-artifacts-json", metadata=json.dumps(original_artifacts).encode(), origin=origin_url, + revision=parse_swhid( + "swh:1:rev:00002019c5775874bced007a529bd3b78bf60457" + ), ), ] ), ] diff --git a/swh/storage/tests/migrate_extrinsic_metadata/test_pypi.py b/swh/storage/tests/migrate_extrinsic_metadata/test_pypi.py index e0590151..2a39eb48 100644 --- a/swh/storage/tests/migrate_extrinsic_metadata/test_pypi.py +++ b/swh/storage/tests/migrate_extrinsic_metadata/test_pypi.py @@ -1,557 +1,646 @@ # Copyright (C) 2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information # flake8: noqa # because of long lines import copy import datetime import json +import urllib.error import attr from swh.model.identifiers import parse_swhid from swh.model.model import ( MetadataAuthority, MetadataAuthorityType, MetadataFetcher, MetadataTargetType, Origin, OriginVisit, OriginVisitStatus, RawExtrinsicMetadata, Snapshot, SnapshotBranch, TargetType, ) from swh.storage import get_storage from swh.storage.interface import PagedResult from swh.storage.migrate_extrinsic_metadata import ( handle_row, + pypi_origin_from_filename, pypi_project_from_filename, ) FETCHER = MetadataFetcher( name="migrate-extrinsic-metadata-from-revisions", version="0.0.1", ) PYPI_AUTHORITY = MetadataAuthority( type=MetadataAuthorityType.FORGE, url="https://pypi.org/", ) SWH_AUTHORITY = MetadataAuthority( type=MetadataAuthorityType.REGISTRY, url="https://softwareheritage.org/", ) +DIRECTORY_ID = b"a" * 20 +DIRECTORY_SWHID = parse_swhid("swh:1:dir:" + DIRECTORY_ID.hex()) + def now(): return datetime.datetime.now(tz=datetime.timezone.utc) def test_pypi_project_from_filename(): files = [ ("django-agent-trust-0.1.8.tar.gz", "django-agent-trust"), ("python_test-1.0.1.zip", "python_test"), ("py-evm-0.2.0a9.tar.gz", "py-evm"), ("collective.texttospeech-1.0rc1.tar.gz", "collective.texttospeech"), ("flatland-fork-0.4.post1.dev40550160.zip", "flatland-fork"), ("fake-factory-0.5.6-proper.tar.gz", "fake-factory"), ("ariane_procos-0.1.2-b05.tar.gz", "ariane_procos"), ("Yelpy-0.2.2dev.tar.gz", "Yelpy"), ("geventhttpclient_c-1.0a-t1.tar.gz", "geventhttpclient_c"), ("codeforlife-portal-1.0.0.post.dev618.tar.gz", "codeforlife-portal"), ("ChecklistDSL-0.0.1.alpha.1.tar.gz", "ChecklistDSL"), ("transifex-1.1.0beta.tar.gz", "transifex"), ("thespian-2.5.10.tar.bz2", "thespian"), ("janis pipelines-0.5.3.tar.gz", "janis-pipelines"), ("pants-1.0.0-beta.2.tar.gz", "pants"), ("uforge_python_sdk-3.8.4-RC15.tar.gz", "uforge_python_sdk"), ("virtuoso-0.11.0.48.b5865c2b46fb.tar.gz", "virtuoso"), ("cloud_ftp-v1.0.0.tar.gz", "cloud_ftp"), ("frozenordereddict-1.0.0.tgz", "frozenordereddict"), ("pywebsite-0.1.2pre.tar.gz", "pywebsite"), ("Flask Unchained-0.2.0.tar.gz", "Flask-Unchained"), ("mongomotor-0.13.0.n.tar.gz", "mongomotor"), ("datahaven-rev8784.tar.gz", "datahaven"), ("geopandas-0.1.0.dev-120d5ee.tar.gz", "geopandas"), ("aimmo-v0.1.1-alpha.post.dev61.tar.gz", "aimmo"), ("django-migrations-plus-0.1.0.dev5.gdd1abd3.tar.gz", "django-migrations-plus"), ("function_shield.tar.gz", "function_shield"), ("Dtls-0.1.0.sdist_with_openssl.mingw-win32.tar.gz", "Dtls"), ("pytz-2005m.tar.gz", "pytz"), ("python-librsync-0.1-3.tar.gz", "python-librsync"), ("powny-1.4.0-alpha-20141205-1452-f5a2b03.tar.gz", "powny"), ("stp-3pc-batch-0.1.11.tar.gz", "stp-3pc-batch"), ("obedient.powny-3.0.0-alpha-20141027-2102-9e53ebd.tar.gz", "obedient.powny"), ("mojimoji-0.0.9_2.tar.gz", "mojimoji"), ("devpi-theme-16-2.0.0.tar.gz", "devpi-theme-16"), ("Orange3-WONDER-1-1.0.7.tar.gz", "Orange3-WONDER-1"), ("obj-34.tar.gz", "obj"), ("pytorch-ignite-nightly-20190825.tar.gz", "pytorch-ignite-nightly"), ("tlds-2019081900.tar.gz", "tlds"), ("dominator-12.1.2-alpha-20141027-1446-ad46e0f.tar.gz", "dominator"), ("waferslim-1.0.0-py3.1.zip", "waferslim"), ("Beaver-21.tar.gz", "Beaver"), ("aimmo-0.post.dev460.tar.gz", "aimmo"), ("ohai-1!0.tar.gz", "ohai"), ("nevolution-risk-139.tar.gz", "nevolution-risk"), ("collective.topicitemsevent-0.1dvl.tar.gz", "collective.topicitemsevent"), ("lesscpy-0.9g.tar.gz", "lesscpy"), ("SpiNNStorageHandlers-1!4.0.0a1.tar.gz", "SpiNNStorageHandlers"), ("limnoria-2013-03-27T16:32:26+0100.tar.gz", "limnoria"), ( "sPyNNakerExternalDevicesPlugin-1!4.0.0a2.tar.gz", "sPyNNakerExternalDevicesPlugin", ), ("django-bootstrap-italia_0.1.tar.gz", "django-bootstrap-italia"), ("sPyNNaker8-1!4.0.0a1.tar.gz", "sPyNNaker8"), ("betahaus.openmember-0.1adev-r1651.tar.gz", "betahaus.openmember"), ("mailer.0.8.0.zip", "mailer"), ("pytz-2005k.tar.bz2", "pytz"), ("aha.plugin.microne-0.62bdev.tar.gz", "aha.plugin.microne"), ("youtube_dl_server-alpha.3.tar.gz", "youtube_dl_server"), ("json-extensions-b76bc7d.tar.gz", "json-extensions"), ("LitReview-0.6989ev.tar.gz", "LitReview"), ("django_options-r5.tar.gz", "django_options"), ("ddlib-2013-11-07.tar.gz", "ddlib"), ("python-morfeusz-0.3000+py3k.tar.gz", "python-morfeusz"), ("gaepytz-2011h.zip", "gaepytz"), ("ftldat-r3.tar.gz", "ftldat"), ("tigretoolbox-0.0.0-py2.7-linux-x86_64.egg", None), ( "Greater than, equal, or less Library-0.1.tar.gz", "Greater-than-equal-or-less-Library", ), ("upstart--main-.-VLazy.object.at.0x104ba8b50-.tar.gz", "upstart"), + ("duckduckpy0.1.tar.gz", "duckduckpy"), + ("QUI for MPlayer snapshot_9-14-2011.zip", "QUI-for-MPlayer"), + ("Eddy's Memory Game-1.0.zip", "Eddy-s-Memory-Game"), + ("jekyll2nikola-0-0-1.tar.gz", "jekyll2nikola"), + ("ore.workflowed-0-6-2.tar.gz", "ore.workflowed"), + ("instancemanager-1.0rc-r34317.tar.gz", "instancemanager"), + ("OrzMC_W&L-1.0.0.tar.gz", "OrzMC-W-L"), ] for (filename, project) in files: assert pypi_project_from_filename(filename) == project +def test_pypi_origin_from_project_name(mocker): + origin_url = "https://pypi.org/project/ProjectName/" + + storage = get_storage("memory") + + revision_id = b"41" * 10 + snapshot_id = b"42" * 10 + storage.origin_add([Origin(url=origin_url)]) + storage.origin_visit_add( + [OriginVisit(origin=origin_url, visit=1, date=now(), type="pypi")] + ) + storage.origin_visit_status_add( + [ + OriginVisitStatus( + origin=origin_url, + visit=1, + date=now(), + status="partial", + snapshot=snapshot_id, + ) + ] + ) + storage.snapshot_add( + [ + Snapshot( + id=snapshot_id, + branches={ + b"foo": SnapshotBranch( + target_type=TargetType.REVISION, target=revision_id, + ) + }, + ) + ] + ) + + class response: + code = 200 + + def read(self): + return b'{"info": {"name": "ProjectName"}}' + + mock_urlopen = mocker.patch( + "swh.storage.migrate_extrinsic_metadata.urlopen", return_value=response(), + ) + + assert ( + pypi_origin_from_filename(storage, revision_id, "ProjectName-1.0.0.tar.gz") + == origin_url + ) + mock_urlopen.assert_not_called() + assert ( + pypi_origin_from_filename(storage, revision_id, "projectname-1.0.0.tar.gz") + == origin_url + ) + mock_urlopen.assert_called_once_with("https://pypi.org/pypi/projectname/json/") + + def test_pypi_1(): """Tests loading a revision generated by a new PyPI loader that has a provider.""" extrinsic_metadata = { "url": "https://files.pythonhosted.org/packages/70/89/a498245baf1bf3dde73d3da00b4b067a8aa7c7378ad83472078803ea3e43/m3-ui-2.2.73.tar.gz", "size": 3933168, "digests": { "md5": "a374ac3f655e97df5db5335e2142d344", "sha256": "1bc2756f7d0d2e15cf5880ca697682ff35e8b58116bf73eb9c78b3db358c5b7d", }, "has_sig": False, "filename": "m3-ui-2.2.73.tar.gz", "downloads": -1, "md5_digest": "a374ac3f655e97df5db5335e2142d344", "packagetype": "sdist", "upload_time": "2019-11-11T06:21:20", "comment_text": "", "python_version": "source", "requires_python": None, "upload_time_iso_8601": "2019-11-11T06:21:20.073082Z", } original_artifacts = [ { "length": 3933168, "filename": "m3-ui-2.2.73.tar.gz", "checksums": { "sha1": "9f4ec7ce64b7fea4b122e85d47ea31146c367b03", "sha256": "1bc2756f7d0d2e15cf5880ca697682ff35e8b58116bf73eb9c78b3db358c5b7d", }, } ] row = { "id": b"\x00\x00\x07a{S\xe7\xb1E\x8fi]\xd0}\xe4\xceU\xaf\x15\x17", + "directory": DIRECTORY_ID, "date": datetime.datetime( 2019, 11, 11, 6, 21, 20, tzinfo=datetime.timezone.utc, ), "committer_date": datetime.datetime( 2019, 11, 11, 6, 21, 20, tzinfo=datetime.timezone.utc, ), "type": "tar", "message": b"2.2.73", "metadata": { "extrinsic": { "raw": extrinsic_metadata, "when": "2020-01-23T18:43:09.109407+00:00", "provider": "https://pypi.org/pypi/m3-ui/json", }, "intrinsic": { "raw": { "name": "m3-ui", "summary": "======", "version": "2.2.73", # ... "metadata_version": "1.1", }, "tool": "PKG-INFO", }, "original_artifact": original_artifacts, }, } origin_url = "https://pypi.org/project/m3-ui/" storage = get_storage("memory") storage.origin_add([Origin(url=origin_url)]) storage.metadata_authority_add( [ attr.evolve(PYPI_AUTHORITY, metadata={}), attr.evolve(SWH_AUTHORITY, metadata={}), ] ) storage.metadata_fetcher_add([FETCHER]) deposit_cur = None handle_row(copy.deepcopy(row), storage, deposit_cur, dry_run=False) revision_swhid = parse_swhid("swh:1:rev:000007617b53e7b1458f695dd07de4ce55af1517") assert storage.raw_extrinsic_metadata_get( - MetadataTargetType.REVISION, revision_swhid, authority=PYPI_AUTHORITY, + MetadataTargetType.DIRECTORY, DIRECTORY_SWHID, authority=PYPI_AUTHORITY, ) == PagedResult( results=[ RawExtrinsicMetadata( - type=MetadataTargetType.REVISION, - id=revision_swhid, + type=MetadataTargetType.DIRECTORY, + target=DIRECTORY_SWHID, discovery_date=datetime.datetime( 2020, 1, 23, 18, 43, 9, 109407, tzinfo=datetime.timezone.utc, ), authority=PYPI_AUTHORITY, fetcher=FETCHER, format="pypi-project-json", metadata=json.dumps(extrinsic_metadata).encode(), origin=origin_url, + revision=revision_swhid, ), ], next_page_token=None, ) assert storage.raw_extrinsic_metadata_get( - MetadataTargetType.REVISION, revision_swhid, authority=SWH_AUTHORITY, + MetadataTargetType.DIRECTORY, DIRECTORY_SWHID, authority=SWH_AUTHORITY, ) == PagedResult( results=[ RawExtrinsicMetadata( - type=MetadataTargetType.REVISION, - id=revision_swhid, + type=MetadataTargetType.DIRECTORY, + target=DIRECTORY_SWHID, discovery_date=datetime.datetime( 2020, 1, 23, 18, 43, 9, 109407, tzinfo=datetime.timezone.utc, ), authority=SWH_AUTHORITY, fetcher=FETCHER, format="original-artifacts-json", metadata=json.dumps(original_artifacts).encode(), origin=origin_url, + revision=revision_swhid, ), ], next_page_token=None, ) -def test_pypi_2(): +def test_pypi_2(mocker): """Tests loading a revision generated by an old PyPI loader that does not have a provider, but has 'project' metadata.""" + mocker.patch( + "swh.storage.migrate_extrinsic_metadata.urlopen", + side_effect=urllib.error.HTTPError(None, 404, "Not Found", None, None), + ) + extrinsic_metadata = { "name": "jupyterhub-simx", "author": "Jupyter Development Team", "license": "BSD", "summary": "JupyterHub: A multi-user server for Jupyter notebooks", "version": "1.0.5", # ... } source_original_artifacts = [ { "url": "https://files.pythonhosted.org/packages/72/28/a8098763d78e2c4607cb67602c0d726a97ac38d4c1f531aac28f49de2e1a/jupyterhub-simx-1.0.5.tar.gz", "date": "2019-01-23T22:10:55", "sha1": "ede3eadd5a06e70912e3ba7cfccef789c4ad3168", "size": 2346538, "sha256": "0399d7f5f0d90c525d369f0507ad0e8ef8729c1c7fa63aadfc46a27514d14a46", "filename": "jupyterhub-simx-1.0.5.tar.gz", "sha1_git": "734301124712182eb30fc90e97cc18cef5432f02", "blake2s256": "bb4aa82ffb5891a05dcf6d4dce3ad56fd2c18e9abdba9d20972910649d869322", "archive_type": "tar", } ] dest_original_artifacts = [ { "url": "https://files.pythonhosted.org/packages/72/28/a8098763d78e2c4607cb67602c0d726a97ac38d4c1f531aac28f49de2e1a/jupyterhub-simx-1.0.5.tar.gz", "filename": "jupyterhub-simx-1.0.5.tar.gz", "archive_type": "tar", "length": 2346538, "checksums": { "sha1": "ede3eadd5a06e70912e3ba7cfccef789c4ad3168", "sha256": "0399d7f5f0d90c525d369f0507ad0e8ef8729c1c7fa63aadfc46a27514d14a46", "sha1_git": "734301124712182eb30fc90e97cc18cef5432f02", "blake2s256": "bb4aa82ffb5891a05dcf6d4dce3ad56fd2c18e9abdba9d20972910649d869322", }, } ] row = { "id": b"\x00\x00\x04\xd68,J\xd4\xc0Q\x92fbl6U\x1f\x0eQ\xca", + "directory": DIRECTORY_ID, "date": datetime.datetime( 2019, 1, 23, 22, 10, 55, tzinfo=datetime.timezone.utc ), "committer_date": datetime.datetime( 2019, 1, 23, 22, 10, 55, tzinfo=datetime.timezone.utc ), "type": "tar", "message": b"1.0.5", "metadata": { "project": extrinsic_metadata, "original_artifact": source_original_artifacts, }, } origin_url = "https://pypi.org/project/jupyterhub-simx/" storage = get_storage("memory") storage.origin_add([Origin(url=origin_url)]) storage.metadata_authority_add( [ attr.evolve(PYPI_AUTHORITY, metadata={}), attr.evolve(SWH_AUTHORITY, metadata={}), ] ) storage.metadata_fetcher_add([FETCHER]) deposit_cur = None handle_row(copy.deepcopy(row), storage, deposit_cur, dry_run=False) revision_swhid = parse_swhid("swh:1:rev:000004d6382c4ad4c0519266626c36551f0e51ca") assert storage.raw_extrinsic_metadata_get( - MetadataTargetType.REVISION, revision_swhid, authority=PYPI_AUTHORITY, + MetadataTargetType.DIRECTORY, DIRECTORY_SWHID, authority=PYPI_AUTHORITY, ) == PagedResult( results=[ RawExtrinsicMetadata( - type=MetadataTargetType.REVISION, - id=revision_swhid, + type=MetadataTargetType.DIRECTORY, + target=DIRECTORY_SWHID, discovery_date=datetime.datetime( 2019, 1, 23, 22, 10, 55, tzinfo=datetime.timezone.utc, ), authority=PYPI_AUTHORITY, fetcher=FETCHER, format="pypi-project-json", metadata=json.dumps(extrinsic_metadata).encode(), origin=None, + revision=revision_swhid, ), ], next_page_token=None, ) assert storage.raw_extrinsic_metadata_get( - MetadataTargetType.REVISION, revision_swhid, authority=SWH_AUTHORITY, + MetadataTargetType.DIRECTORY, DIRECTORY_SWHID, authority=SWH_AUTHORITY, ) == PagedResult( results=[ RawExtrinsicMetadata( - type=MetadataTargetType.REVISION, - id=revision_swhid, + type=MetadataTargetType.DIRECTORY, + target=DIRECTORY_SWHID, discovery_date=datetime.datetime( 2019, 1, 23, 22, 10, 55, tzinfo=datetime.timezone.utc, ), authority=SWH_AUTHORITY, fetcher=FETCHER, format="original-artifacts-json", metadata=json.dumps(dest_original_artifacts).encode(), origin=None, + revision=revision_swhid, ), ], next_page_token=None, ) -def test_pypi_3(): +def test_pypi_3(mocker): """Tests loading a revision generated by a very old PyPI loader that does not have a provider or has 'project' metadata.""" + mocker.patch( + "swh.storage.migrate_extrinsic_metadata.urlopen", + side_effect=urllib.error.HTTPError(None, 404, "Not Found", None, None), + ) + source_original_artifact = { "url": "https://files.pythonhosted.org/packages/34/4f/30087f22eaae8ad7077a28ce157342745a2977e264b8a8e4e7f804a8aa5e/PyPDFLite-0.1.32.tar.gz", "date": "2014-05-07T22:03:00", "sha1": "3289269f75b4111dd00eaea53e00330db9a1db12", "size": 46644, "sha256": "911497d655cf7ef6530c5b57773dad7da97e21cf4d608ad9ad1e38bd7bec7824", "filename": "PyPDFLite-0.1.32.tar.gz", "sha1_git": "1e5c38014731242cfa8594839bcba8a0c4e158c5", "blake2s256": "45792e57873f56d385c694e36c98a580cbba60d5ea91eb6fd0a2d1c71c1fb385", "archive_type": "tar", } dest_original_artifacts = [ { "url": "https://files.pythonhosted.org/packages/34/4f/30087f22eaae8ad7077a28ce157342745a2977e264b8a8e4e7f804a8aa5e/PyPDFLite-0.1.32.tar.gz", "filename": "PyPDFLite-0.1.32.tar.gz", "archive_type": "tar", "length": 46644, "checksums": { "sha1": "3289269f75b4111dd00eaea53e00330db9a1db12", "sha256": "911497d655cf7ef6530c5b57773dad7da97e21cf4d608ad9ad1e38bd7bec7824", "sha1_git": "1e5c38014731242cfa8594839bcba8a0c4e158c5", "blake2s256": "45792e57873f56d385c694e36c98a580cbba60d5ea91eb6fd0a2d1c71c1fb385", }, } ] row = { "id": b"N\xa9\x91|\xdfS\xcd\x13SJ\x04.N\xb3x{\x86\xc84\xd2", + "directory": DIRECTORY_ID, "date": datetime.datetime(2014, 5, 7, 22, 3, tzinfo=datetime.timezone.utc), "committer_date": datetime.datetime( 2014, 5, 7, 22, 3, tzinfo=datetime.timezone.utc ), "type": "tar", "message": b"0.1.32", "metadata": {"original_artifact": source_original_artifact}, } origin_url = "https://pypi.org/project/PyPDFLite/" storage = get_storage("memory") storage.origin_add([Origin(url=origin_url)]) storage.metadata_authority_add( [ attr.evolve(PYPI_AUTHORITY, metadata={}), attr.evolve(SWH_AUTHORITY, metadata={}), ] ) storage.metadata_fetcher_add([FETCHER]) deposit_cur = None handle_row(copy.deepcopy(row), storage, deposit_cur, dry_run=False) revision_swhid = parse_swhid("swh:1:rev:4ea9917cdf53cd13534a042e4eb3787b86c834d2") assert storage.raw_extrinsic_metadata_get( - MetadataTargetType.REVISION, revision_swhid, authority=PYPI_AUTHORITY, + MetadataTargetType.DIRECTORY, DIRECTORY_SWHID, authority=PYPI_AUTHORITY, ) == PagedResult(results=[], next_page_token=None,) assert storage.raw_extrinsic_metadata_get( - MetadataTargetType.REVISION, revision_swhid, authority=SWH_AUTHORITY, + MetadataTargetType.DIRECTORY, DIRECTORY_SWHID, authority=SWH_AUTHORITY, ) == PagedResult( results=[ RawExtrinsicMetadata( - type=MetadataTargetType.REVISION, - id=revision_swhid, + type=MetadataTargetType.DIRECTORY, + target=DIRECTORY_SWHID, discovery_date=datetime.datetime( 2014, 5, 7, 22, 3, tzinfo=datetime.timezone.utc, ), authority=SWH_AUTHORITY, fetcher=FETCHER, format="original-artifacts-json", metadata=json.dumps(dest_original_artifacts).encode(), origin=None, + revision=revision_swhid, ), ], next_page_token=None, ) def test_pypi_good_origin(): """Tests loading a revision whose origin we can find""" source_original_artifact = { "url": "https://files.pythonhosted.org/packages/34/4f/30087f22eaae8ad7077a28ce157342745a2977e264b8a8e4e7f804a8aa5e/PyPDFLite-0.1.32.tar.gz", "date": "2014-05-07T22:03:00", "sha1": "3289269f75b4111dd00eaea53e00330db9a1db12", "size": 46644, "sha256": "911497d655cf7ef6530c5b57773dad7da97e21cf4d608ad9ad1e38bd7bec7824", "filename": "PyPDFLite-0.1.32.tar.gz", "sha1_git": "1e5c38014731242cfa8594839bcba8a0c4e158c5", "blake2s256": "45792e57873f56d385c694e36c98a580cbba60d5ea91eb6fd0a2d1c71c1fb385", "archive_type": "tar", } dest_original_artifacts = [ { "url": "https://files.pythonhosted.org/packages/34/4f/30087f22eaae8ad7077a28ce157342745a2977e264b8a8e4e7f804a8aa5e/PyPDFLite-0.1.32.tar.gz", "filename": "PyPDFLite-0.1.32.tar.gz", "archive_type": "tar", "length": 46644, "checksums": { "sha1": "3289269f75b4111dd00eaea53e00330db9a1db12", "sha256": "911497d655cf7ef6530c5b57773dad7da97e21cf4d608ad9ad1e38bd7bec7824", "sha1_git": "1e5c38014731242cfa8594839bcba8a0c4e158c5", "blake2s256": "45792e57873f56d385c694e36c98a580cbba60d5ea91eb6fd0a2d1c71c1fb385", }, } ] revision_id = b"N\xa9\x91|\xdfS\xcd\x13SJ\x04.N\xb3x{\x86\xc84\xd2" row = { "id": revision_id, + "directory": DIRECTORY_ID, "date": datetime.datetime(2014, 5, 7, 22, 3, tzinfo=datetime.timezone.utc), "committer_date": datetime.datetime( 2014, 5, 7, 22, 3, tzinfo=datetime.timezone.utc ), "type": "tar", "message": b"0.1.32", "metadata": {"original_artifact": source_original_artifact}, } origin_url = "https://pypi.org/project/PyPDFLite/" storage = get_storage("memory") snapshot_id = b"42" * 10 storage.origin_add([Origin(url=origin_url)]) storage.origin_visit_add( [OriginVisit(origin=origin_url, visit=1, date=now(), type="pypi")] ) storage.origin_visit_status_add( [ OriginVisitStatus( origin=origin_url, visit=1, date=now(), status="partial", snapshot=snapshot_id, ) ] ) storage.snapshot_add( [ Snapshot( id=snapshot_id, branches={ b"foo": SnapshotBranch( target_type=TargetType.REVISION, target=revision_id, ) }, ) ] ) storage.metadata_authority_add( [ attr.evolve(PYPI_AUTHORITY, metadata={}), attr.evolve(SWH_AUTHORITY, metadata={}), ] ) storage.metadata_fetcher_add([FETCHER]) deposit_cur = None handle_row(copy.deepcopy(row), storage, deposit_cur, dry_run=False) revision_swhid = parse_swhid("swh:1:rev:4ea9917cdf53cd13534a042e4eb3787b86c834d2") assert storage.raw_extrinsic_metadata_get( - MetadataTargetType.REVISION, revision_swhid, authority=PYPI_AUTHORITY, + MetadataTargetType.DIRECTORY, DIRECTORY_SWHID, authority=PYPI_AUTHORITY, ) == PagedResult(results=[], next_page_token=None,) assert storage.raw_extrinsic_metadata_get( - MetadataTargetType.REVISION, revision_swhid, authority=SWH_AUTHORITY, + MetadataTargetType.DIRECTORY, DIRECTORY_SWHID, authority=SWH_AUTHORITY, ) == PagedResult( results=[ RawExtrinsicMetadata( - type=MetadataTargetType.REVISION, - id=revision_swhid, + type=MetadataTargetType.DIRECTORY, + target=DIRECTORY_SWHID, discovery_date=datetime.datetime( 2014, 5, 7, 22, 3, tzinfo=datetime.timezone.utc, ), authority=SWH_AUTHORITY, fetcher=FETCHER, format="original-artifacts-json", metadata=json.dumps(dest_original_artifacts).encode(), origin=origin_url, + revision=revision_swhid, ), ], next_page_token=None, ) diff --git a/swh/storage/tests/storage_data.py b/swh/storage/tests/storage_data.py index 12429721..53c06a48 100644 --- a/swh/storage/tests/storage_data.py +++ b/swh/storage/tests/storage_data.py @@ -1,552 +1,552 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime from typing import Tuple import attr from swh.model import from_disk from swh.model.hashutil import hash_to_bytes, hash_to_hex from swh.model.identifiers import parse_swhid from swh.model.model import ( Content, Directory, DirectoryEntry, MetadataAuthority, MetadataAuthorityType, MetadataFetcher, MetadataTargetType, ObjectType, Origin, OriginVisit, Person, RawExtrinsicMetadata, Release, Revision, RevisionType, SkippedContent, Snapshot, SnapshotBranch, TargetType, Timestamp, TimestampWithTimezone, ) class StorageData: """Data model objects to use within tests. """ content = Content( data=b"42\n", length=3, sha1=hash_to_bytes("34973274ccef6ab4dfaaf86599792fa9c3fe4689"), sha1_git=hash_to_bytes("d81cc0710eb6cf9efd5b920a8453e1e07157b6cd"), sha256=hash_to_bytes( "084c799cd551dd1d8d5c5f9a5d593b2e931f5e36122ee5c793c1d08a19839cc0" ), blake2s256=hash_to_bytes( "d5fe1939576527e42cfd76a9455a2432fe7f56669564577dd93c4280e76d661d" ), status="visible", ) content2 = Content( data=b"4242\n", length=5, sha1=hash_to_bytes("61c2b3a30496d329e21af70dd2d7e097046d07b7"), sha1_git=hash_to_bytes("36fade77193cb6d2bd826161a0979d64c28ab4fa"), sha256=hash_to_bytes( "859f0b154fdb2d630f45e1ecae4a862915435e663248bb8461d914696fc047cd" ), blake2s256=hash_to_bytes( "849c20fad132b7c2d62c15de310adfe87be94a379941bed295e8141c6219810d" ), status="visible", ) content3 = Content( data=b"424242\n", length=7, sha1=hash_to_bytes("3e21cc4942a4234c9e5edd8a9cacd1670fe59f13"), sha1_git=hash_to_bytes("c932c7649c6dfa4b82327d121215116909eb3bea"), sha256=hash_to_bytes( "92fb72daf8c6818288a35137b72155f507e5de8d892712ab96277aaed8cf8a36" ), blake2s256=hash_to_bytes( "76d0346f44e5a27f6bafdd9c2befd304aff83780f93121d801ab6a1d4769db11" ), status="visible", ctime=datetime.datetime(2019, 12, 1, tzinfo=datetime.timezone.utc), ) contents: Tuple[Content, ...] = (content, content2, content3) skipped_content = SkippedContent( length=1024 * 1024 * 200, sha1_git=hash_to_bytes("33e45d56f88993aae6a0198013efa80716fd8920"), sha1=hash_to_bytes("43e45d56f88993aae6a0198013efa80716fd8920"), sha256=hash_to_bytes( "7bbd052ab054ef222c1c87be60cd191addedd24cc882d1f5f7f7be61dc61bb3a" ), blake2s256=hash_to_bytes( "ade18b1adecb33f891ca36664da676e12c772cc193778aac9a137b8dc5834b9b" ), reason="Content too long", status="absent", origin="file:///dev/zero", ) skipped_content2 = SkippedContent( length=1024 * 1024 * 300, sha1_git=hash_to_bytes("44e45d56f88993aae6a0198013efa80716fd8921"), sha1=hash_to_bytes("54e45d56f88993aae6a0198013efa80716fd8920"), sha256=hash_to_bytes( "8cbd052ab054ef222c1c87be60cd191addedd24cc882d1f5f7f7be61dc61bb3a" ), blake2s256=hash_to_bytes( "9ce18b1adecb33f891ca36664da676e12c772cc193778aac9a137b8dc5834b9b" ), reason="Content too long", status="absent", ) skipped_contents: Tuple[SkippedContent, ...] = (skipped_content, skipped_content2) directory5 = Directory(entries=()) directory = Directory( id=hash_to_bytes("5256e856a0a0898966d6ba14feb4388b8b82d302"), entries=tuple( [ DirectoryEntry( name=b"foo", type="file", target=content.sha1_git, perms=from_disk.DentryPerms.content, ), DirectoryEntry( name=b"bar\xc3", type="dir", target=directory5.id, perms=from_disk.DentryPerms.directory, ), ], ), ) directory2 = Directory( id=hash_to_bytes("8505808532953da7d2581741f01b29c04b1cb9ab"), entries=tuple( [ DirectoryEntry( name=b"oof", type="file", target=content2.sha1_git, perms=from_disk.DentryPerms.content, ) ], ), ) directory3 = Directory( id=hash_to_bytes("4ea8c6b2f54445e5dd1a9d5bb2afd875d66f3150"), entries=tuple( [ DirectoryEntry( name=b"foo", type="file", target=content.sha1_git, perms=from_disk.DentryPerms.content, ), DirectoryEntry( name=b"subdir", type="dir", target=directory.id, perms=from_disk.DentryPerms.directory, ), DirectoryEntry( name=b"hello", type="file", target=content2.sha1_git, perms=from_disk.DentryPerms.content, ), ], ), ) directory4 = Directory( id=hash_to_bytes("377aa5fcd944fbabf502dbfda55cd14d33c8c3c6"), entries=tuple( [ DirectoryEntry( name=b"subdir1", type="dir", target=directory3.id, perms=from_disk.DentryPerms.directory, ) ], ), ) directories: Tuple[Directory, ...] = ( directory2, directory, directory3, directory4, directory5, ) revision = Revision( id=hash_to_bytes("01a7114f36fddd5ef2511b2cadda237a68adbb12"), message=b"hello", author=Person( name=b"Nicolas Dandrimont", email=b"nicolas@example.com", fullname=b"Nicolas Dandrimont ", ), date=TimestampWithTimezone( timestamp=Timestamp(seconds=1234567890, microseconds=0), offset=120, negative_utc=False, ), committer=Person( name=b"St\xc3fano Zacchiroli", email=b"stefano@example.com", fullname=b"St\xc3fano Zacchiroli ", ), committer_date=TimestampWithTimezone( timestamp=Timestamp(seconds=1123456789, microseconds=0), offset=120, negative_utc=False, ), parents=(), type=RevisionType.GIT, directory=directory.id, metadata={ "checksums": {"sha1": "tarball-sha1", "sha256": "tarball-sha256",}, "signed-off-by": "some-dude", }, extra_headers=( (b"gpgsig", b"test123"), (b"mergetag", b"foo\\bar"), (b"mergetag", b"\x22\xaf\x89\x80\x01\x00"), ), synthetic=True, ) revision2 = Revision( id=hash_to_bytes("df7a6f6a99671fb7f7343641aff983a314ef6161"), message=b"hello again", author=Person( name=b"Roberto Dicosmo", email=b"roberto@example.com", fullname=b"Roberto Dicosmo ", ), date=TimestampWithTimezone( timestamp=Timestamp(seconds=1234567843, microseconds=220000,), offset=-720, negative_utc=False, ), committer=Person( name=b"tony", email=b"ar@dumont.fr", fullname=b"tony ", ), committer_date=TimestampWithTimezone( timestamp=Timestamp(seconds=1123456789, microseconds=220000,), offset=0, negative_utc=False, ), parents=tuple([revision.id]), type=RevisionType.GIT, directory=directory2.id, metadata=None, extra_headers=(), synthetic=False, ) revision3 = Revision( id=hash_to_bytes("2cbd7bb22c653bbb23a29657852a50a01b591d46"), message=b"a simple revision with no parents this time", author=Person( name=b"Roberto Dicosmo", email=b"roberto@example.com", fullname=b"Roberto Dicosmo ", ), date=TimestampWithTimezone( timestamp=Timestamp(seconds=1234567843, microseconds=220000,), offset=-720, negative_utc=False, ), committer=Person( name=b"tony", email=b"ar@dumont.fr", fullname=b"tony ", ), committer_date=TimestampWithTimezone( timestamp=Timestamp(seconds=1127351742, microseconds=220000,), offset=0, negative_utc=False, ), parents=tuple([revision.id, revision2.id]), type=RevisionType.GIT, directory=directory2.id, metadata=None, extra_headers=(), synthetic=True, ) revision4 = Revision( id=hash_to_bytes("88cd5126fc958ed70089d5340441a1c2477bcc20"), message=b"parent of self.revision2", author=Person( name=b"me", email=b"me@soft.heri", fullname=b"me ", ), date=TimestampWithTimezone( timestamp=Timestamp(seconds=1234567843, microseconds=220000,), offset=-720, negative_utc=False, ), committer=Person( name=b"committer-dude", email=b"committer@dude.com", fullname=b"committer-dude ", ), committer_date=TimestampWithTimezone( timestamp=Timestamp(seconds=1244567843, microseconds=220000,), offset=-720, negative_utc=False, ), parents=tuple([revision3.id]), type=RevisionType.GIT, directory=directory.id, metadata=None, extra_headers=(), synthetic=False, ) revisions: Tuple[Revision, ...] = (revision, revision2, revision3, revision4) origins: Tuple[Origin, ...] = ( Origin(url="https://github.com/user1/repo1"), Origin(url="https://github.com/user2/repo1"), Origin(url="https://github.com/user3/repo1"), Origin(url="https://gitlab.com/user1/repo1"), Origin(url="https://gitlab.com/user2/repo1"), Origin(url="https://forge.softwareheritage.org/source/repo1"), ) origin, origin2 = origins[:2] metadata_authority = MetadataAuthority( type=MetadataAuthorityType.DEPOSIT_CLIENT, url="http://hal.inria.example.com/", metadata={"location": "France"}, ) metadata_authority2 = MetadataAuthority( type=MetadataAuthorityType.REGISTRY, url="http://wikidata.example.com/", metadata={}, ) authorities: Tuple[MetadataAuthority, ...] = ( metadata_authority, metadata_authority2, ) metadata_fetcher = MetadataFetcher( name="swh-deposit", version="0.0.1", metadata={"sword_version": "2"}, ) metadata_fetcher2 = MetadataFetcher( name="swh-example", version="0.0.1", metadata={}, ) fetchers: Tuple[MetadataFetcher, ...] = (metadata_fetcher, metadata_fetcher2) date_visit1 = datetime.datetime(2015, 1, 1, 23, 0, 0, tzinfo=datetime.timezone.utc) date_visit2 = datetime.datetime(2017, 1, 1, 23, 0, 0, tzinfo=datetime.timezone.utc) date_visit3 = datetime.datetime(2018, 1, 1, 23, 0, 0, tzinfo=datetime.timezone.utc) type_visit1 = "git" type_visit2 = "hg" type_visit3 = "deb" origin_visit = OriginVisit( origin=origin.url, visit=1, date=date_visit1, type=type_visit1, ) origin_visit2 = OriginVisit( origin=origin.url, visit=2, date=date_visit2, type=type_visit1, ) origin_visit3 = OriginVisit( origin=origin2.url, visit=1, date=date_visit1, type=type_visit2, ) origin_visits: Tuple[OriginVisit, ...] = ( origin_visit, origin_visit2, origin_visit3, ) release = Release( id=hash_to_bytes("f7f222093a18ec60d781070abec4a630c850b837"), name=b"v0.0.1", author=Person( name=b"olasd", email=b"nic@olasd.fr", fullname=b"olasd ", ), date=TimestampWithTimezone( timestamp=Timestamp(seconds=1234567890, microseconds=0), offset=42, negative_utc=False, ), target=revision.id, target_type=ObjectType.REVISION, message=b"synthetic release", synthetic=True, ) release2 = Release( id=hash_to_bytes("6902bd4c82b7d19a421d224aedab2b74197e420d"), name=b"v0.0.2", author=Person( name=b"tony", email=b"ar@dumont.fr", fullname=b"tony ", ), date=TimestampWithTimezone( timestamp=Timestamp(seconds=1634366813, microseconds=0), offset=-120, negative_utc=False, ), target=revision2.id, target_type=ObjectType.REVISION, message=b"v0.0.2\nMisc performance improvements + bug fixes", synthetic=False, ) release3 = Release( id=hash_to_bytes("3e9050196aa288264f2a9d279d6abab8b158448b"), name=b"v0.0.2", author=Person( name=b"tony", email=b"tony@ardumont.fr", fullname=b"tony ", ), date=TimestampWithTimezone( timestamp=Timestamp(seconds=1634366813, microseconds=0), offset=-120, negative_utc=False, ), target=revision3.id, target_type=ObjectType.REVISION, message=b"yet another synthetic release", synthetic=True, ) releases: Tuple[Release, ...] = (release, release2, release3) snapshot = Snapshot( id=hash_to_bytes("9b922e6d8d5b803c1582aabe5525b7b91150788e"), branches={ b"master": SnapshotBranch( target=revision.id, target_type=TargetType.REVISION, ), }, ) empty_snapshot = Snapshot( id=hash_to_bytes("1a8893e6a86f444e8be8e7bda6cb34fb1735a00e"), branches={}, ) complete_snapshot = Snapshot( id=hash_to_bytes("a56ce2d81c190023bb99a3a36279307522cb85f6"), branches={ b"directory": SnapshotBranch( target=directory.id, target_type=TargetType.DIRECTORY, ), b"directory2": SnapshotBranch( target=directory2.id, target_type=TargetType.DIRECTORY, ), b"content": SnapshotBranch( target=content.sha1_git, target_type=TargetType.CONTENT, ), b"alias": SnapshotBranch(target=b"revision", target_type=TargetType.ALIAS,), b"revision": SnapshotBranch( target=revision.id, target_type=TargetType.REVISION, ), b"release": SnapshotBranch( target=release.id, target_type=TargetType.RELEASE, ), b"snapshot": SnapshotBranch( target=empty_snapshot.id, target_type=TargetType.SNAPSHOT, ), b"dangling": None, }, ) snapshots: Tuple[Snapshot, ...] = (snapshot, empty_snapshot, complete_snapshot) content_metadata1 = RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, - id=parse_swhid(f"swh:1:cnt:{hash_to_hex(content.sha1_git)}"), + target=parse_swhid(f"swh:1:cnt:{hash_to_hex(content.sha1_git)}"), origin=origin.url, discovery_date=datetime.datetime( 2015, 1, 1, 21, 0, 0, tzinfo=datetime.timezone.utc ), authority=attr.evolve(metadata_authority, metadata=None), fetcher=attr.evolve(metadata_fetcher, metadata=None), format="json", metadata=b'{"foo": "bar"}', ) content_metadata2 = RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, - id=parse_swhid(f"swh:1:cnt:{hash_to_hex(content.sha1_git)}"), + target=parse_swhid(f"swh:1:cnt:{hash_to_hex(content.sha1_git)}"), origin=origin2.url, discovery_date=datetime.datetime( 2017, 1, 1, 22, 0, 0, tzinfo=datetime.timezone.utc ), authority=attr.evolve(metadata_authority, metadata=None), fetcher=attr.evolve(metadata_fetcher, metadata=None), format="yaml", metadata=b"foo: bar", ) content_metadata3 = RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, - id=parse_swhid(f"swh:1:cnt:{hash_to_hex(content.sha1_git)}"), + target=parse_swhid(f"swh:1:cnt:{hash_to_hex(content.sha1_git)}"), discovery_date=datetime.datetime( 2017, 1, 1, 22, 0, 0, tzinfo=datetime.timezone.utc ), authority=attr.evolve(metadata_authority2, metadata=None), fetcher=attr.evolve(metadata_fetcher2, metadata=None), format="yaml", metadata=b"foo: bar", origin=origin.url, visit=42, snapshot=parse_swhid(f"swh:1:snp:{hash_to_hex(snapshot.id)}"), release=parse_swhid(f"swh:1:rel:{hash_to_hex(release.id)}"), revision=parse_swhid(f"swh:1:rev:{hash_to_hex(revision.id)}"), directory=parse_swhid(f"swh:1:dir:{hash_to_hex(directory.id)}"), path=b"/foo/bar", ) content_metadata: Tuple[RawExtrinsicMetadata, ...] = ( content_metadata1, content_metadata2, content_metadata3, ) origin_metadata1 = RawExtrinsicMetadata( type=MetadataTargetType.ORIGIN, - id=origin.url, + target=origin.url, discovery_date=datetime.datetime( 2015, 1, 1, 21, 0, 0, tzinfo=datetime.timezone.utc ), authority=attr.evolve(metadata_authority, metadata=None), fetcher=attr.evolve(metadata_fetcher, metadata=None), format="json", metadata=b'{"foo": "bar"}', ) origin_metadata2 = RawExtrinsicMetadata( type=MetadataTargetType.ORIGIN, - id=origin.url, + target=origin.url, discovery_date=datetime.datetime( 2017, 1, 1, 22, 0, 0, tzinfo=datetime.timezone.utc ), authority=attr.evolve(metadata_authority, metadata=None), fetcher=attr.evolve(metadata_fetcher, metadata=None), format="yaml", metadata=b"foo: bar", ) origin_metadata3 = RawExtrinsicMetadata( type=MetadataTargetType.ORIGIN, - id=origin.url, + target=origin.url, discovery_date=datetime.datetime( 2017, 1, 1, 22, 0, 0, tzinfo=datetime.timezone.utc ), authority=attr.evolve(metadata_authority2, metadata=None), fetcher=attr.evolve(metadata_fetcher2, metadata=None), format="yaml", metadata=b"foo: bar", ) origin_metadata: Tuple[RawExtrinsicMetadata, ...] = ( origin_metadata1, origin_metadata2, origin_metadata3, ) diff --git a/swh/storage/tests/storage_tests.py b/swh/storage/tests/storage_tests.py index 911d6d98..702a5ac2 100644 --- a/swh/storage/tests/storage_tests.py +++ b/swh/storage/tests/storage_tests.py @@ -1,3927 +1,3927 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from collections import defaultdict import datetime from datetime import timedelta import inspect import itertools import math import random from typing import Any, ClassVar, Dict, Iterator, Optional import attr from hypothesis import HealthCheck, given, settings, strategies import pytest from swh.model import from_disk from swh.model.hashutil import hash_to_bytes from swh.model.hypothesis_strategies import objects from swh.model.identifiers import SWHID from swh.model.model import ( Content, Directory, MetadataTargetType, Origin, OriginVisit, OriginVisitStatus, Person, Revision, SkippedContent, Snapshot, TargetType, ) from swh.storage import get_storage from swh.storage.common import origin_url_to_sha1 as sha1 from swh.storage.exc import HashCollision, StorageArgumentException from swh.storage.interface import ListOrder, PagedResult, StorageInterface from swh.storage.utils import content_hex_hashes, now, round_to_milliseconds def transform_entries( storage: StorageInterface, dir_: Directory, *, prefix: bytes = b"" ) -> Iterator[Dict[str, Any]]: """Iterate through a directory's entries, and yields the items 'directory_ls' is expected to return; including content metadata for file entries.""" for ent in dir_.entries: if ent.type == "dir": yield { "dir_id": dir_.id, "type": ent.type, "target": ent.target, "name": prefix + ent.name, "perms": ent.perms, "status": None, "sha1": None, "sha1_git": None, "sha256": None, "length": None, } elif ent.type == "file": contents = storage.content_find({"sha1_git": ent.target}) assert contents ent_dict = contents[0].to_dict() for key in ["ctime", "blake2s256"]: ent_dict.pop(key, None) ent_dict.update( { "dir_id": dir_.id, "type": ent.type, "target": ent.target, "name": prefix + ent.name, "perms": ent.perms, } ) yield ent_dict def assert_contents_ok( expected_contents, actual_contents, keys_to_check={"sha1", "data"} ): """Assert that a given list of contents matches on a given set of keys. """ for k in keys_to_check: expected_list = set([c.get(k) for c in expected_contents]) actual_list = set([c.get(k) for c in actual_contents]) assert actual_list == expected_list, k class LazyContent(Content): def with_data(self): return Content.from_dict({**self.to_dict(), "data": b"42\n"}) class TestStorage: """Main class for Storage testing. This class is used as-is to test local storage (see TestLocalStorage below) and remote storage (see TestRemoteStorage in test_remote_storage.py. We need to have the two classes inherit from this base class separately to avoid nosetests running the tests from the base class twice. """ maxDiff = None # type: ClassVar[Optional[int]] def test_types(self, swh_storage_backend_config): """Checks all methods of StorageInterface are implemented by this backend, and that they have the same signature.""" # Create an instance of the protocol (which cannot be instantiated # directly, so this creates a subclass, then instantiates it) interface = type("_", (StorageInterface,), {})() storage = get_storage(**swh_storage_backend_config) assert "content_add" in dir(interface) missing_methods = [] for meth_name in dir(interface): if meth_name.startswith("_"): continue interface_meth = getattr(interface, meth_name) try: concrete_meth = getattr(storage, meth_name) except AttributeError: if not getattr(interface_meth, "deprecated_endpoint", False): # The backend is missing a (non-deprecated) endpoint missing_methods.append(meth_name) continue expected_signature = inspect.signature(interface_meth) actual_signature = inspect.signature(concrete_meth) assert expected_signature == actual_signature, meth_name assert missing_methods == [] # If all the assertions above succeed, then this one should too. # But there's no harm in double-checking. # And we could replace the assertions above by this one, but unlike # the assertions above, it doesn't explain what is missing. assert isinstance(storage, StorageInterface) def test_check_config(self, swh_storage): assert swh_storage.check_config(check_write=True) assert swh_storage.check_config(check_write=False) def test_content_add(self, swh_storage, sample_data): cont = sample_data.content insertion_start_time = now() actual_result = swh_storage.content_add([cont]) insertion_end_time = now() assert actual_result == { "content:add": 1, "content:add:bytes": cont.length, } assert swh_storage.content_get_data(cont.sha1) == cont.data expected_cont = attr.evolve(cont, data=None) contents = [ obj for (obj_type, obj) in swh_storage.journal_writer.journal.objects if obj_type == "content" ] assert len(contents) == 1 for obj in contents: assert insertion_start_time <= obj.ctime assert obj.ctime <= insertion_end_time assert obj == expected_cont swh_storage.refresh_stat_counters() assert swh_storage.stat_counters()["content"] == 1 def test_content_add_from_lazy_content(self, swh_storage, sample_data): cont = sample_data.content lazy_content = LazyContent.from_dict(cont.to_dict()) insertion_start_time = now() actual_result = swh_storage.content_add([lazy_content]) insertion_end_time = now() assert actual_result == { "content:add": 1, "content:add:bytes": cont.length, } # the fact that we retrieve the content object from the storage with # the correct 'data' field ensures it has been 'called' assert swh_storage.content_get_data(cont.sha1) == cont.data expected_cont = attr.evolve(lazy_content, data=None, ctime=None) contents = [ obj for (obj_type, obj) in swh_storage.journal_writer.journal.objects if obj_type == "content" ] assert len(contents) == 1 for obj in contents: assert insertion_start_time <= obj.ctime assert obj.ctime <= insertion_end_time assert attr.evolve(obj, ctime=None).to_dict() == expected_cont.to_dict() swh_storage.refresh_stat_counters() assert swh_storage.stat_counters()["content"] == 1 def test_content_get_data_missing(self, swh_storage, sample_data): cont, cont2 = sample_data.contents[:2] swh_storage.content_add([cont]) # Query a single missing content actual_content_data = swh_storage.content_get_data(cont2.sha1) assert actual_content_data is None # Check content_get does not abort after finding a missing content actual_content_data = swh_storage.content_get_data(cont.sha1) assert actual_content_data == cont.data actual_content_data = swh_storage.content_get_data(cont2.sha1) assert actual_content_data is None def test_content_add_different_input(self, swh_storage, sample_data): cont, cont2 = sample_data.contents[:2] actual_result = swh_storage.content_add([cont, cont2]) assert actual_result == { "content:add": 2, "content:add:bytes": cont.length + cont2.length, } def test_content_add_twice(self, swh_storage, sample_data): cont, cont2 = sample_data.contents[:2] actual_result = swh_storage.content_add([cont]) assert actual_result == { "content:add": 1, "content:add:bytes": cont.length, } assert len(swh_storage.journal_writer.journal.objects) == 1 actual_result = swh_storage.content_add([cont, cont2]) assert actual_result == { "content:add": 1, "content:add:bytes": cont2.length, } assert 2 <= len(swh_storage.journal_writer.journal.objects) <= 3 assert len(swh_storage.content_find(cont.to_dict())) == 1 assert len(swh_storage.content_find(cont2.to_dict())) == 1 def test_content_add_collision(self, swh_storage, sample_data): cont1 = sample_data.content # create (corrupted) content with same sha1{,_git} but != sha256 sha256_array = bytearray(cont1.sha256) sha256_array[0] += 1 cont1b = attr.evolve(cont1, sha256=bytes(sha256_array)) with pytest.raises(HashCollision) as cm: swh_storage.content_add([cont1, cont1b]) exc = cm.value actual_algo = exc.algo assert actual_algo in ["sha1", "sha1_git"] actual_id = exc.hash_id assert actual_id == getattr(cont1, actual_algo).hex() collisions = exc.args[2] assert len(collisions) == 2 assert collisions == [ content_hex_hashes(cont1.hashes()), content_hex_hashes(cont1b.hashes()), ] assert exc.colliding_content_hashes() == [ cont1.hashes(), cont1b.hashes(), ] def test_content_add_duplicate(self, swh_storage, sample_data): cont = sample_data.content swh_storage.content_add([cont, cont]) assert swh_storage.content_get_data(cont.sha1) == cont.data def test_content_update(self, swh_storage, sample_data): cont1 = sample_data.content if hasattr(swh_storage, "journal_writer"): swh_storage.journal_writer.journal = None # TODO, not supported swh_storage.content_add([cont1]) # alter the sha1_git for example cont1b = attr.evolve( cont1, sha1_git=hash_to_bytes("3a60a5275d0333bf13468e8b3dcab90f4046e654") ) swh_storage.content_update([cont1b.to_dict()], keys=["sha1_git"]) actual_contents = swh_storage.content_get([cont1.sha1]) expected_content = attr.evolve(cont1b, data=None) assert actual_contents == [expected_content] def test_content_add_metadata(self, swh_storage, sample_data): cont = attr.evolve(sample_data.content, data=None, ctime=now()) actual_result = swh_storage.content_add_metadata([cont]) assert actual_result == { "content:add": 1, } expected_cont = cont assert swh_storage.content_get([cont.sha1]) == [expected_cont] contents = [ obj for (obj_type, obj) in swh_storage.journal_writer.journal.objects if obj_type == "content" ] assert len(contents) == 1 for obj in contents: obj = attr.evolve(obj, ctime=None) assert obj == cont def test_content_add_metadata_different_input(self, swh_storage, sample_data): contents = sample_data.contents[:2] cont = attr.evolve(contents[0], data=None, ctime=now()) cont2 = attr.evolve(contents[1], data=None, ctime=now()) actual_result = swh_storage.content_add_metadata([cont, cont2]) assert actual_result == { "content:add": 2, } def test_content_add_metadata_collision(self, swh_storage, sample_data): cont1 = attr.evolve(sample_data.content, data=None, ctime=now()) # create (corrupted) content with same sha1{,_git} but != sha256 sha1_git_array = bytearray(cont1.sha256) sha1_git_array[0] += 1 cont1b = attr.evolve(cont1, sha256=bytes(sha1_git_array)) with pytest.raises(HashCollision) as cm: swh_storage.content_add_metadata([cont1, cont1b]) exc = cm.value actual_algo = exc.algo assert actual_algo in ["sha1", "sha1_git", "blake2s256"] actual_id = exc.hash_id assert actual_id == getattr(cont1, actual_algo).hex() collisions = exc.args[2] assert len(collisions) == 2 assert collisions == [ content_hex_hashes(cont1.hashes()), content_hex_hashes(cont1b.hashes()), ] assert exc.colliding_content_hashes() == [ cont1.hashes(), cont1b.hashes(), ] def test_skipped_content_add(self, swh_storage, sample_data): contents = sample_data.skipped_contents[:2] cont = contents[0] cont2 = attr.evolve(contents[1], blake2s256=None) contents_dict = [c.to_dict() for c in [cont, cont2]] missing = list(swh_storage.skipped_content_missing(contents_dict)) assert missing == [cont.hashes(), cont2.hashes()] actual_result = swh_storage.skipped_content_add([cont, cont, cont2]) assert 2 <= actual_result.pop("skipped_content:add") <= 3 assert actual_result == {} missing = list(swh_storage.skipped_content_missing(contents_dict)) assert missing == [] def test_skipped_content_add_missing_hashes(self, swh_storage, sample_data): cont, cont2 = [ attr.evolve(c, sha1_git=None) for c in sample_data.skipped_contents[:2] ] contents_dict = [c.to_dict() for c in [cont, cont2]] missing = list(swh_storage.skipped_content_missing(contents_dict)) assert len(missing) == 2 actual_result = swh_storage.skipped_content_add([cont, cont, cont2]) assert 2 <= actual_result.pop("skipped_content:add") <= 3 assert actual_result == {} missing = list(swh_storage.skipped_content_missing(contents_dict)) assert missing == [] def test_skipped_content_missing_partial_hash(self, swh_storage, sample_data): cont = sample_data.skipped_content cont2 = attr.evolve(cont, sha1_git=None) contents_dict = [c.to_dict() for c in [cont, cont2]] missing = list(swh_storage.skipped_content_missing(contents_dict)) assert len(missing) == 2 actual_result = swh_storage.skipped_content_add([cont]) assert actual_result.pop("skipped_content:add") == 1 assert actual_result == {} missing = list(swh_storage.skipped_content_missing(contents_dict)) assert missing == [cont2.hashes()] @pytest.mark.property_based @settings(deadline=None) # this test is very slow @given( strategies.sets( elements=strategies.sampled_from(["sha256", "sha1_git", "blake2s256"]), min_size=0, ) ) def test_content_missing(self, swh_storage, sample_data, algos): algos |= {"sha1"} content, missing_content = [sample_data.content2, sample_data.skipped_content] swh_storage.content_add([content]) test_contents = [content.to_dict()] missing_per_hash = defaultdict(list) for i in range(256): test_content = missing_content.to_dict() for hash in algos: test_content[hash] = bytes([i]) + test_content[hash][1:] missing_per_hash[hash].append(test_content[hash]) test_contents.append(test_content) assert set(swh_storage.content_missing(test_contents)) == set( missing_per_hash["sha1"] ) for hash in algos: assert set( swh_storage.content_missing(test_contents, key_hash=hash) ) == set(missing_per_hash[hash]) @pytest.mark.property_based @given( strategies.sets( elements=strategies.sampled_from(["sha256", "sha1_git", "blake2s256"]), min_size=0, ) ) def test_content_missing_unknown_algo(self, swh_storage, sample_data, algos): algos |= {"sha1"} content, missing_content = [sample_data.content2, sample_data.skipped_content] swh_storage.content_add([content]) test_contents = [content.to_dict()] missing_per_hash = defaultdict(list) for i in range(16): test_content = missing_content.to_dict() for hash in algos: test_content[hash] = bytes([i]) + test_content[hash][1:] missing_per_hash[hash].append(test_content[hash]) test_content["nonexisting_algo"] = b"\x00" test_contents.append(test_content) assert set(swh_storage.content_missing(test_contents)) == set( missing_per_hash["sha1"] ) for hash in algos: assert set( swh_storage.content_missing(test_contents, key_hash=hash) ) == set(missing_per_hash[hash]) def test_content_missing_per_sha1(self, swh_storage, sample_data): # given cont = sample_data.content cont2 = sample_data.content2 missing_cont = sample_data.skipped_content missing_cont2 = sample_data.skipped_content2 swh_storage.content_add([cont, cont2]) # when gen = swh_storage.content_missing_per_sha1( [cont.sha1, missing_cont.sha1, cont2.sha1, missing_cont2.sha1] ) # then assert list(gen) == [missing_cont.sha1, missing_cont2.sha1] def test_content_missing_per_sha1_git(self, swh_storage, sample_data): cont, cont2 = sample_data.contents[:2] missing_cont = sample_data.skipped_content swh_storage.content_add([cont, cont2]) contents = [cont.sha1_git, cont2.sha1_git, missing_cont.sha1_git] missing_contents = swh_storage.content_missing_per_sha1_git(contents) assert list(missing_contents) == [missing_cont.sha1_git] def test_content_get_partition(self, swh_storage, swh_contents): """content_get_partition paginates results if limit exceeded""" expected_contents = [ attr.evolve(c, data=None) for c in swh_contents if c.status != "absent" ] actual_contents = [] for i in range(16): actual_result = swh_storage.content_get_partition(i, 16) assert actual_result.next_page_token is None actual_contents.extend(actual_result.results) assert len(actual_contents) == len(expected_contents) for content in actual_contents: assert content in expected_contents assert content.ctime is None def test_content_get_partition_full(self, swh_storage, swh_contents): """content_get_partition for a single partition returns all available contents """ expected_contents = [ attr.evolve(c, data=None) for c in swh_contents if c.status != "absent" ] actual_result = swh_storage.content_get_partition(0, 1) assert actual_result.next_page_token is None actual_contents = actual_result.results assert len(actual_contents) == len(expected_contents) for content in actual_contents: assert content in expected_contents def test_content_get_partition_empty(self, swh_storage, swh_contents): """content_get_partition when at least one of the partitions is empty""" expected_contents = { cont.sha1 for cont in swh_contents if cont.status != "absent" } # nb_partitions = smallest power of 2 such that at least one of # the partitions is empty nb_partitions = 1 << math.floor(math.log2(len(swh_contents)) + 1) seen_sha1s = [] for i in range(nb_partitions): actual_result = swh_storage.content_get_partition( i, nb_partitions, limit=len(swh_contents) + 1 ) for content in actual_result.results: seen_sha1s.append(content.sha1) # Limit is higher than the max number of results assert actual_result.next_page_token is None assert set(seen_sha1s) == expected_contents def test_content_get_partition_limit_none(self, swh_storage): """content_get_partition call with wrong limit input should fail""" with pytest.raises(StorageArgumentException, match="limit should not be None"): swh_storage.content_get_partition(1, 16, limit=None) def test_content_get_partition_pagination_generate(self, swh_storage, swh_contents): """content_get_partition returns contents within range provided""" expected_contents = [ attr.evolve(c, data=None) for c in swh_contents if c.status != "absent" ] # retrieve contents actual_contents = [] for i in range(4): page_token = None while True: actual_result = swh_storage.content_get_partition( i, 4, limit=3, page_token=page_token ) actual_contents.extend(actual_result.results) page_token = actual_result.next_page_token if page_token is None: break assert len(actual_contents) == len(expected_contents) for content in actual_contents: assert content in expected_contents def test_content_get(self, swh_storage, sample_data): cont1, cont2 = sample_data.contents[:2] swh_storage.content_add([cont1, cont2]) actual_contents = swh_storage.content_get([cont1.sha1, cont2.sha1]) # we only retrieve the metadata so no data nor ctime within expected_contents = [attr.evolve(c, data=None) for c in [cont1, cont2]] assert actual_contents == expected_contents for content in actual_contents: assert content.ctime is None def test_content_get_missing_sha1(self, swh_storage, sample_data): cont1, cont2 = sample_data.contents[:2] assert cont1.sha1 != cont2.sha1 missing_cont = sample_data.skipped_content swh_storage.content_add([cont1, cont2]) actual_contents = swh_storage.content_get( [cont1.sha1, cont2.sha1, missing_cont.sha1] ) expected_contents = [ attr.evolve(c, data=None) if c else None for c in [cont1, cont2, None] ] assert actual_contents == expected_contents def test_content_get_random(self, swh_storage, sample_data): cont, cont2, cont3 = sample_data.contents[:3] swh_storage.content_add([cont, cont2, cont3]) assert swh_storage.content_get_random() in { cont.sha1_git, cont2.sha1_git, cont3.sha1_git, } def test_directory_add(self, swh_storage, sample_data): content = sample_data.content directory = sample_data.directories[1] assert directory.entries[0].target == content.sha1_git swh_storage.content_add([content]) init_missing = list(swh_storage.directory_missing([directory.id])) assert [directory.id] == init_missing actual_result = swh_storage.directory_add([directory]) assert actual_result == {"directory:add": 1} assert ("directory", directory) in list( swh_storage.journal_writer.journal.objects ) actual_data = list(swh_storage.directory_ls(directory.id)) expected_data = list(transform_entries(swh_storage, directory)) for data in actual_data: assert data in expected_data after_missing = list(swh_storage.directory_missing([directory.id])) assert after_missing == [] swh_storage.refresh_stat_counters() assert swh_storage.stat_counters()["directory"] == 1 def test_directory_add_twice(self, swh_storage, sample_data): directory = sample_data.directories[1] actual_result = swh_storage.directory_add([directory]) assert actual_result == {"directory:add": 1} assert list(swh_storage.journal_writer.journal.objects) == [ ("directory", directory) ] actual_result = swh_storage.directory_add([directory]) assert actual_result == {"directory:add": 0} assert list(swh_storage.journal_writer.journal.objects) == [ ("directory", directory) ] def test_directory_ls_recursive(self, swh_storage, sample_data): # create consistent dataset regarding the directories we want to list content, content2 = sample_data.contents[:2] swh_storage.content_add([content, content2]) dir1, dir2, dir3 = sample_data.directories[:3] dir_ids = [d.id for d in [dir1, dir2, dir3]] init_missing = list(swh_storage.directory_missing(dir_ids)) assert init_missing == dir_ids actual_result = swh_storage.directory_add([dir1, dir2, dir3]) assert actual_result == {"directory:add": 3} # List directory containing one file actual_data = list(swh_storage.directory_ls(dir1.id, recursive=True)) expected_data = list(transform_entries(swh_storage, dir1)) for data in actual_data: assert data in expected_data # List directory containing a file and an unknown subdirectory actual_data = list(swh_storage.directory_ls(dir2.id, recursive=True)) expected_data = list(transform_entries(swh_storage, dir2)) for data in actual_data: assert data in expected_data # List directory containing both a known and unknown subdirectory, entries # should be both those of the directory and of the known subdir (up to contents) actual_data = list(swh_storage.directory_ls(dir3.id, recursive=True)) expected_data = list( itertools.chain( transform_entries(swh_storage, dir3), transform_entries(swh_storage, dir2, prefix=b"subdir/"), ) ) for data in actual_data: assert data in expected_data def test_directory_ls_non_recursive(self, swh_storage, sample_data): # create consistent dataset regarding the directories we want to list content, content2 = sample_data.contents[:2] swh_storage.content_add([content, content2]) dir1, dir2, dir3, _, dir5 = sample_data.directories[:5] dir_ids = [d.id for d in [dir1, dir2, dir3, dir5]] init_missing = list(swh_storage.directory_missing(dir_ids)) assert init_missing == dir_ids actual_result = swh_storage.directory_add([dir1, dir2, dir3, dir5]) assert actual_result == {"directory:add": 4} # List directory containing a file and an unknown subdirectory actual_data = list(swh_storage.directory_ls(dir1.id)) expected_data = list(transform_entries(swh_storage, dir1)) for data in actual_data: assert data in expected_data # List directory containing a single file actual_data = list(swh_storage.directory_ls(dir2.id)) expected_data = list(transform_entries(swh_storage, dir2)) for data in actual_data: assert data in expected_data # List directory containing a known subdirectory, entries should # only be those of the parent directory, not of the subdir actual_data = list(swh_storage.directory_ls(dir3.id)) expected_data = list(transform_entries(swh_storage, dir3)) for data in actual_data: assert data in expected_data def test_directory_ls_missing_content(self, swh_storage, sample_data): swh_storage.directory_add([sample_data.directory2]) assert list(swh_storage.directory_ls(sample_data.directory2.id)) == [ { "dir_id": sample_data.directory2.id, "length": None, "name": b"oof", "perms": 33188, "sha1": None, "sha1_git": None, "sha256": None, "status": None, "target": sample_data.directory2.entries[0].target, "type": "file", }, ] def test_directory_ls_skipped_content(self, swh_storage, sample_data): swh_storage.directory_add([sample_data.directory2]) cont = SkippedContent( sha1_git=sample_data.directory2.entries[0].target, sha1=b"c" * 20, sha256=None, blake2s256=None, length=42, status="absent", reason="You need a premium subscription to access this content", ) swh_storage.skipped_content_add([cont]) assert list(swh_storage.directory_ls(sample_data.directory2.id)) == [ { "dir_id": sample_data.directory2.id, "length": 42, "name": b"oof", "perms": 33188, "sha1": b"c" * 20, "sha1_git": sample_data.directory2.entries[0].target, "sha256": None, "status": "absent", "target": sample_data.directory2.entries[0].target, "type": "file", }, ] def test_directory_entry_get_by_path(self, swh_storage, sample_data): cont, content2 = sample_data.contents[:2] dir1, dir2, dir3, dir4, dir5 = sample_data.directories[:5] # given dir_ids = [d.id for d in [dir1, dir2, dir3, dir4, dir5]] init_missing = list(swh_storage.directory_missing(dir_ids)) assert init_missing == dir_ids actual_result = swh_storage.directory_add([dir3, dir4]) assert actual_result == {"directory:add": 2} expected_entries = [ { "dir_id": dir3.id, "name": b"foo", "type": "file", "target": cont.sha1_git, "sha1": None, "sha1_git": None, "sha256": None, "status": None, "perms": from_disk.DentryPerms.content, "length": None, }, { "dir_id": dir3.id, "name": b"subdir", "type": "dir", "target": dir2.id, "sha1": None, "sha1_git": None, "sha256": None, "status": None, "perms": from_disk.DentryPerms.directory, "length": None, }, { "dir_id": dir3.id, "name": b"hello", "type": "file", "target": content2.sha1_git, "sha1": None, "sha1_git": None, "sha256": None, "status": None, "perms": from_disk.DentryPerms.content, "length": None, }, ] # when (all must be found here) for entry, expected_entry in zip(dir3.entries, expected_entries): actual_entry = swh_storage.directory_entry_get_by_path( dir3.id, [entry.name] ) assert actual_entry == expected_entry # same, but deeper for entry, expected_entry in zip(dir3.entries, expected_entries): actual_entry = swh_storage.directory_entry_get_by_path( dir4.id, [b"subdir1", entry.name] ) expected_entry = expected_entry.copy() expected_entry["name"] = b"subdir1/" + expected_entry["name"] assert actual_entry == expected_entry # when (nothing should be found here since `dir` is not persisted.) for entry in dir2.entries: actual_entry = swh_storage.directory_entry_get_by_path( dir2.id, [entry.name] ) assert actual_entry is None def test_directory_get_random(self, swh_storage, sample_data): dir1, dir2, dir3 = sample_data.directories[:3] swh_storage.directory_add([dir1, dir2, dir3]) assert swh_storage.directory_get_random() in { dir1.id, dir2.id, dir3.id, } def test_revision_add(self, swh_storage, sample_data): revision = sample_data.revision init_missing = swh_storage.revision_missing([revision.id]) assert list(init_missing) == [revision.id] actual_result = swh_storage.revision_add([revision]) assert actual_result == {"revision:add": 1} end_missing = swh_storage.revision_missing([revision.id]) assert list(end_missing) == [] assert list(swh_storage.journal_writer.journal.objects) == [ ("revision", revision) ] # already there so nothing added actual_result = swh_storage.revision_add([revision]) assert actual_result == {"revision:add": 0} swh_storage.refresh_stat_counters() assert swh_storage.stat_counters()["revision"] == 1 def test_revision_add_twice(self, swh_storage, sample_data): revision, revision2 = sample_data.revisions[:2] actual_result = swh_storage.revision_add([revision]) assert actual_result == {"revision:add": 1} assert list(swh_storage.journal_writer.journal.objects) == [ ("revision", revision) ] actual_result = swh_storage.revision_add([revision, revision2]) assert actual_result == {"revision:add": 1} assert list(swh_storage.journal_writer.journal.objects) == [ ("revision", revision), ("revision", revision2), ] def test_revision_add_name_clash(self, swh_storage, sample_data): revision, revision2 = sample_data.revisions[:2] revision1 = attr.evolve( revision, author=Person( fullname=b"John Doe ", name=b"John Doe", email=b"john.doe@example.com", ), ) revision2 = attr.evolve( revision2, author=Person( fullname=b"John Doe ", name=b"John Doe ", email=b"john.doe@example.com ", ), ) actual_result = swh_storage.revision_add([revision1, revision2]) assert actual_result == {"revision:add": 2} def test_revision_get_order(self, swh_storage, sample_data): revision, revision2 = sample_data.revisions[:2] add_result = swh_storage.revision_add([revision, revision2]) assert add_result == {"revision:add": 2} # order 1 actual_revisions = swh_storage.revision_get([revision.id, revision2.id]) assert actual_revisions == [revision, revision2] # order 2 actual_revisions2 = swh_storage.revision_get([revision2.id, revision.id]) assert actual_revisions2 == [revision2, revision] def test_revision_log(self, swh_storage, sample_data): revision1, revision2, revision3, revision4 = sample_data.revisions[:4] # rev4 -is-child-of-> rev3 -> rev1, (rev2 -> rev1) swh_storage.revision_add([revision1, revision2, revision3, revision4]) # when results = list(swh_storage.revision_log([revision4.id])) # for comparison purposes actual_results = [Revision.from_dict(r) for r in results] assert len(actual_results) == 4 # rev4 -child-> rev3 -> rev1, (rev2 -> rev1) assert actual_results == [revision4, revision3, revision1, revision2] def test_revision_log_with_limit(self, swh_storage, sample_data): revision1, revision2, revision3, revision4 = sample_data.revisions[:4] # revision4 -is-child-of-> revision3 swh_storage.revision_add([revision3, revision4]) results = list(swh_storage.revision_log([revision4.id], 1)) actual_results = [Revision.from_dict(r) for r in results] assert len(actual_results) == 1 assert actual_results[0] == revision4 def test_revision_log_unknown_revision(self, swh_storage, sample_data): revision = sample_data.revision rev_log = list(swh_storage.revision_log([revision.id])) assert rev_log == [] def test_revision_shortlog(self, swh_storage, sample_data): revision1, revision2, revision3, revision4 = sample_data.revisions[:4] # rev4 -is-child-of-> rev3 -> (rev1, rev2); rev2 -> rev1 swh_storage.revision_add([revision1, revision2, revision3, revision4]) results = list(swh_storage.revision_shortlog([revision4.id])) actual_results = [[id, tuple(parents)] for (id, parents) in results] assert len(actual_results) == 4 assert actual_results == [ [revision4.id, revision4.parents], [revision3.id, revision3.parents], [revision1.id, revision1.parents], [revision2.id, revision2.parents], ] def test_revision_shortlog_with_limit(self, swh_storage, sample_data): revision1, revision2, revision3, revision4 = sample_data.revisions[:4] # revision4 -is-child-of-> revision3 swh_storage.revision_add([revision1, revision2, revision3, revision4]) results = list(swh_storage.revision_shortlog([revision4.id], 1)) actual_results = [[id, tuple(parents)] for (id, parents) in results] assert len(actual_results) == 1 assert list(actual_results[0]) == [revision4.id, revision4.parents] def test_revision_get(self, swh_storage, sample_data): revision, revision2 = sample_data.revisions[:2] swh_storage.revision_add([revision]) actual_revisions = swh_storage.revision_get([revision.id, revision2.id]) assert len(actual_revisions) == 2 assert actual_revisions == [revision, None] def test_revision_get_no_parents(self, swh_storage, sample_data): revision = sample_data.revision swh_storage.revision_add([revision]) actual_revision = swh_storage.revision_get([revision.id])[0] assert revision.parents == () assert actual_revision.parents == () # no parents on this one def test_revision_get_random(self, swh_storage, sample_data): revision1, revision2, revision3 = sample_data.revisions[:3] swh_storage.revision_add([revision1, revision2, revision3]) assert swh_storage.revision_get_random() in { revision1.id, revision2.id, revision3.id, } def test_release_add(self, swh_storage, sample_data): release, release2 = sample_data.releases[:2] init_missing = swh_storage.release_missing([release.id, release2.id]) assert list(init_missing) == [release.id, release2.id] actual_result = swh_storage.release_add([release, release2]) assert actual_result == {"release:add": 2} end_missing = swh_storage.release_missing([release.id, release2.id]) assert list(end_missing) == [] assert list(swh_storage.journal_writer.journal.objects) == [ ("release", release), ("release", release2), ] # already present so nothing added actual_result = swh_storage.release_add([release, release2]) assert actual_result == {"release:add": 0} swh_storage.refresh_stat_counters() assert swh_storage.stat_counters()["release"] == 2 def test_release_add_no_author_date(self, swh_storage, sample_data): full_release = sample_data.release release = attr.evolve(full_release, author=None, date=None) actual_result = swh_storage.release_add([release]) assert actual_result == {"release:add": 1} end_missing = swh_storage.release_missing([release.id]) assert list(end_missing) == [] assert list(swh_storage.journal_writer.journal.objects) == [ ("release", release) ] def test_release_add_twice(self, swh_storage, sample_data): release, release2 = sample_data.releases[:2] actual_result = swh_storage.release_add([release]) assert actual_result == {"release:add": 1} assert list(swh_storage.journal_writer.journal.objects) == [ ("release", release) ] actual_result = swh_storage.release_add([release, release2, release, release2]) assert actual_result == {"release:add": 1} assert set(swh_storage.journal_writer.journal.objects) == set( [("release", release), ("release", release2),] ) def test_release_add_name_clash(self, swh_storage, sample_data): release, release2 = [ attr.evolve( c, author=Person( fullname=b"John Doe ", name=b"John Doe", email=b"john.doe@example.com", ), ) for c in sample_data.releases[:2] ] actual_result = swh_storage.release_add([release, release2]) assert actual_result == {"release:add": 2} def test_release_get(self, swh_storage, sample_data): release, release2, release3 = sample_data.releases[:3] # given swh_storage.release_add([release, release2]) # when actual_releases = swh_storage.release_get([release.id, release2.id]) # then assert actual_releases == [release, release2] unknown_releases = swh_storage.release_get([release3.id]) assert unknown_releases[0] is None def test_release_get_order(self, swh_storage, sample_data): release, release2 = sample_data.releases[:2] add_result = swh_storage.release_add([release, release2]) assert add_result == {"release:add": 2} # order 1 actual_releases = swh_storage.release_get([release.id, release2.id]) assert actual_releases == [release, release2] # order 2 actual_releases2 = swh_storage.release_get([release2.id, release.id]) assert actual_releases2 == [release2, release] def test_release_get_random(self, swh_storage, sample_data): release, release2, release3 = sample_data.releases[:3] swh_storage.release_add([release, release2, release3]) assert swh_storage.release_get_random() in { release.id, release2.id, release3.id, } def test_origin_add(self, swh_storage, sample_data): origins = list(sample_data.origins[:2]) origin_urls = [o.url for o in origins] assert swh_storage.origin_get(origin_urls) == [None, None] stats = swh_storage.origin_add(origins) assert stats == {"origin:add": 2} actual_origins = swh_storage.origin_get(origin_urls) assert actual_origins == origins assert set(swh_storage.journal_writer.journal.objects) == set( [("origin", origins[0]), ("origin", origins[1]),] ) swh_storage.refresh_stat_counters() assert swh_storage.stat_counters()["origin"] == 2 def test_origin_add_twice(self, swh_storage, sample_data): origin, origin2 = sample_data.origins[:2] add1 = swh_storage.origin_add([origin, origin2]) assert set(swh_storage.journal_writer.journal.objects) == set( [("origin", origin), ("origin", origin2),] ) assert add1 == {"origin:add": 2} add2 = swh_storage.origin_add([origin, origin2]) assert set(swh_storage.journal_writer.journal.objects) == set( [("origin", origin), ("origin", origin2),] ) assert add2 == {"origin:add": 0} def test_origin_add_twice_at_once(self, swh_storage, sample_data): origin, origin2 = sample_data.origins[:2] add1 = swh_storage.origin_add([origin, origin2, origin, origin2]) assert set(swh_storage.journal_writer.journal.objects) == set( [("origin", origin), ("origin", origin2),] ) assert add1 == {"origin:add": 2} add2 = swh_storage.origin_add([origin, origin2, origin, origin2]) assert set(swh_storage.journal_writer.journal.objects) == set( [("origin", origin), ("origin", origin2),] ) assert add2 == {"origin:add": 0} def test_origin_get(self, swh_storage, sample_data): origin, origin2 = sample_data.origins[:2] assert swh_storage.origin_get([origin.url]) == [None] swh_storage.origin_add([origin]) actual_origins = swh_storage.origin_get([origin.url]) assert actual_origins == [origin] actual_origins = swh_storage.origin_get([origin.url, "not://exists"]) assert actual_origins == [origin, None] def _generate_random_visits(self, nb_visits=100, start=0, end=7): """Generate random visits within the last 2 months (to avoid computations) """ visits = [] today = now() for weeks in range(nb_visits, 0, -1): hours = random.randint(0, 24) minutes = random.randint(0, 60) seconds = random.randint(0, 60) days = random.randint(0, 28) weeks = random.randint(start, end) date_visit = today - timedelta( weeks=weeks, hours=hours, minutes=minutes, seconds=seconds, days=days ) visits.append(date_visit) return visits def test_origin_visit_get__unknown_origin(self, swh_storage): actual_page = swh_storage.origin_visit_get("foo") assert actual_page.next_page_token is None assert actual_page.results == [] assert actual_page == PagedResult() def test_origin_visit_get__validation_failure(self, swh_storage, sample_data): origin = sample_data.origin swh_storage.origin_add([origin]) with pytest.raises( StorageArgumentException, match="page_token must be a string" ): swh_storage.origin_visit_get(origin.url, page_token=10) # not bytes with pytest.raises( StorageArgumentException, match="order must be a ListOrder value" ): swh_storage.origin_visit_get(origin.url, order="foobar") # wrong order def test_origin_visit_get_all(self, swh_storage, sample_data): origin = sample_data.origin swh_storage.origin_add([origin]) ov1, ov2, ov3 = swh_storage.origin_visit_add( [ OriginVisit( origin=origin.url, date=sample_data.date_visit1, type=sample_data.type_visit1, ), OriginVisit( origin=origin.url, date=sample_data.date_visit2, type=sample_data.type_visit2, ), OriginVisit( origin=origin.url, date=sample_data.date_visit2, type=sample_data.type_visit2, ), ] ) # order asc, no token, no limit actual_page = swh_storage.origin_visit_get(origin.url) assert actual_page.next_page_token is None assert actual_page.results == [ov1, ov2, ov3] # order asc, no token, limit actual_page = swh_storage.origin_visit_get(origin.url, limit=2) next_page_token = actual_page.next_page_token assert next_page_token is not None assert actual_page.results == [ov1, ov2] # order asc, token, no limit actual_page = swh_storage.origin_visit_get( origin.url, page_token=next_page_token ) assert actual_page.next_page_token is None assert actual_page.results == [ov3] # order asc, no token, limit actual_page = swh_storage.origin_visit_get(origin.url, limit=1) next_page_token = actual_page.next_page_token assert next_page_token is not None assert actual_page.results == [ov1] # order asc, token, no limit actual_page = swh_storage.origin_visit_get( origin.url, page_token=next_page_token ) assert actual_page.next_page_token is None assert actual_page.results == [ov2, ov3] # order asc, token, limit actual_page = swh_storage.origin_visit_get( origin.url, page_token=next_page_token, limit=2 ) assert actual_page.next_page_token is None assert actual_page.results == [ov2, ov3] actual_page = swh_storage.origin_visit_get( origin.url, page_token=next_page_token, limit=1 ) next_page_token = actual_page.next_page_token assert next_page_token is not None assert actual_page.results == [ov2] actual_page = swh_storage.origin_visit_get( origin.url, page_token=next_page_token, limit=1 ) assert actual_page.next_page_token is None assert actual_page.results == [ov3] # order desc, no token, no limit actual_page = swh_storage.origin_visit_get(origin.url, order=ListOrder.DESC) assert actual_page.next_page_token is None assert actual_page.results == [ov3, ov2, ov1] # order desc, no token, limit actual_page = swh_storage.origin_visit_get( origin.url, limit=2, order=ListOrder.DESC ) next_page_token = actual_page.next_page_token assert next_page_token is not None assert actual_page.results == [ov3, ov2] # order desc, token, no limit actual_page = swh_storage.origin_visit_get( origin.url, page_token=next_page_token, order=ListOrder.DESC ) assert actual_page.next_page_token is None assert actual_page.results == [ov1] # order desc, no token, limit actual_page = swh_storage.origin_visit_get( origin.url, limit=1, order=ListOrder.DESC ) next_page_token = actual_page.next_page_token assert next_page_token is not None assert actual_page.results == [ov3] # order desc, token, no limit actual_page = swh_storage.origin_visit_get( origin.url, page_token=next_page_token, order=ListOrder.DESC ) assert actual_page.next_page_token is None assert actual_page.results == [ov2, ov1] # order desc, token, limit actual_page = swh_storage.origin_visit_get( origin.url, page_token=next_page_token, order=ListOrder.DESC, limit=1 ) next_page_token = actual_page.next_page_token assert next_page_token is not None assert actual_page.results == [ov2] actual_page = swh_storage.origin_visit_get( origin.url, page_token=next_page_token, order=ListOrder.DESC ) assert actual_page.next_page_token is None assert actual_page.results == [ov1] def test_origin_visit_status_get__unknown_cases(self, swh_storage, sample_data): origin = sample_data.origin actual_page = swh_storage.origin_visit_status_get("foobar", 1) assert actual_page.next_page_token is None assert actual_page.results == [] actual_page = swh_storage.origin_visit_status_get(origin.url, 1) assert actual_page.next_page_token is None assert actual_page.results == [] origin = sample_data.origin swh_storage.origin_add([origin]) ov1 = swh_storage.origin_visit_add( [ OriginVisit( origin=origin.url, date=sample_data.date_visit1, type=sample_data.type_visit1, ), ] )[0] actual_page = swh_storage.origin_visit_status_get(origin.url, ov1.visit + 10) assert actual_page.next_page_token is None assert actual_page.results == [] def test_origin_visit_status_get_all(self, swh_storage, sample_data): origin = sample_data.origin swh_storage.origin_add([origin]) date_visit3 = round_to_milliseconds(now()) date_visit1 = date_visit3 - datetime.timedelta(hours=2) date_visit2 = date_visit3 - datetime.timedelta(hours=1) assert date_visit1 < date_visit2 < date_visit3 ov1 = swh_storage.origin_visit_add( [ OriginVisit( origin=origin.url, date=date_visit1, type=sample_data.type_visit1, ), ] )[0] ovs1 = OriginVisitStatus( origin=origin.url, visit=ov1.visit, date=date_visit1, status="created", snapshot=None, ) ovs2 = OriginVisitStatus( origin=origin.url, visit=ov1.visit, date=date_visit2, status="partial", snapshot=None, ) ovs3 = OriginVisitStatus( origin=origin.url, visit=ov1.visit, date=date_visit3, status="full", snapshot=sample_data.snapshot.id, metadata={}, ) swh_storage.origin_visit_status_add([ovs2, ovs3]) # order asc, no token, no limit actual_page = swh_storage.origin_visit_status_get(origin.url, ov1.visit) assert actual_page.next_page_token is None assert actual_page.results == [ovs1, ovs2, ovs3] # order asc, no token, limit actual_page = swh_storage.origin_visit_status_get( origin.url, ov1.visit, limit=2 ) next_page_token = actual_page.next_page_token assert next_page_token is not None assert actual_page.results == [ovs1, ovs2] # order asc, token, no limit actual_page = swh_storage.origin_visit_status_get( origin.url, ov1.visit, page_token=next_page_token ) assert actual_page.next_page_token is None assert actual_page.results == [ovs3] # order asc, no token, limit actual_page = swh_storage.origin_visit_status_get( origin.url, ov1.visit, limit=1 ) next_page_token = actual_page.next_page_token assert next_page_token is not None assert actual_page.results == [ovs1] actual_page = swh_storage.origin_visit_status_get( origin.url, ov1.visit, page_token=next_page_token ) assert actual_page.next_page_token is None assert actual_page.results == [ovs2, ovs3] # order asc, token, limit actual_page = swh_storage.origin_visit_status_get( origin.url, ov1.visit, page_token=next_page_token, limit=2 ) assert actual_page.next_page_token is None assert actual_page.results == [ovs2, ovs3] # order asc, no token, limit actual_page = swh_storage.origin_visit_status_get( origin.url, ov1.visit, limit=2 ) next_page_token = actual_page.next_page_token assert next_page_token is not None assert actual_page.results == [ovs1, ovs2] actual_page = swh_storage.origin_visit_status_get( origin.url, ov1.visit, page_token=next_page_token, limit=1 ) assert actual_page.next_page_token is None assert actual_page.results == [ovs3] # order desc, no token, no limit actual_page = swh_storage.origin_visit_status_get( origin.url, ov1.visit, order=ListOrder.DESC ) assert actual_page.next_page_token is None assert actual_page.results == [ovs3, ovs2, ovs1] # order desc, no token, limit actual_page = swh_storage.origin_visit_status_get( origin.url, ov1.visit, limit=2, order=ListOrder.DESC ) next_page_token = actual_page.next_page_token assert next_page_token is not None assert actual_page.results == [ovs3, ovs2] actual_page = swh_storage.origin_visit_status_get( origin.url, ov1.visit, page_token=next_page_token, order=ListOrder.DESC ) assert actual_page.next_page_token is None assert actual_page.results == [ovs1] # order desc, no token, limit actual_page = swh_storage.origin_visit_status_get( origin.url, ov1.visit, order=ListOrder.DESC, limit=1 ) next_page_token = actual_page.next_page_token assert next_page_token is not None assert actual_page.results == [ovs3] # order desc, token, no limit actual_page = swh_storage.origin_visit_status_get( origin.url, ov1.visit, page_token=next_page_token, order=ListOrder.DESC ) assert actual_page.next_page_token is None assert actual_page.results == [ovs2, ovs1] # order desc, token, limit actual_page = swh_storage.origin_visit_status_get( origin.url, ov1.visit, page_token=next_page_token, order=ListOrder.DESC, limit=1, ) next_page_token = actual_page.next_page_token assert next_page_token is not None assert actual_page.results == [ovs2] actual_page = swh_storage.origin_visit_status_get( origin.url, ov1.visit, page_token=next_page_token, order=ListOrder.DESC ) assert actual_page.next_page_token is None assert actual_page.results == [ovs1] def test_origin_visit_status_get_random(self, swh_storage, sample_data): origins = sample_data.origins[:2] swh_storage.origin_add(origins) # Add some random visits within the selection range visits = self._generate_random_visits() visit_type = "git" # Add visits to those origins for origin in origins: for date_visit in visits: visit = swh_storage.origin_visit_add( [OriginVisit(origin=origin.url, date=date_visit, type=visit_type,)] )[0] swh_storage.origin_visit_status_add( [ OriginVisitStatus( origin=origin.url, visit=visit.visit, date=now(), status="full", snapshot=None, ) ] ) swh_storage.refresh_stat_counters() stats = swh_storage.stat_counters() assert stats["origin"] == len(origins) assert stats["origin_visit"] == len(origins) * len(visits) random_ov, random_ovs = swh_storage.origin_visit_status_get_random(visit_type) assert random_ov and random_ovs assert random_ov.origin is not None assert random_ov.origin == random_ovs.origin assert random_ov.origin in [o.url for o in origins] def test_origin_visit_status_get_random_nothing_found( self, swh_storage, sample_data ): origins = sample_data.origins swh_storage.origin_add(origins) visit_type = "hg" # Add some visits outside of the random generation selection so nothing # will be found by the random selection visits = self._generate_random_visits(nb_visits=3, start=13, end=24) for origin in origins: for date_visit in visits: visit = swh_storage.origin_visit_add( [OriginVisit(origin=origin.url, date=date_visit, type=visit_type,)] )[0] swh_storage.origin_visit_status_add( [ OriginVisitStatus( origin=origin.url, visit=visit.visit, date=now(), status="full", snapshot=None, ) ] ) random_origin_visit = swh_storage.origin_visit_status_get_random(visit_type) assert random_origin_visit is None def test_origin_get_by_sha1(self, swh_storage, sample_data): origin = sample_data.origin assert swh_storage.origin_get([origin.url])[0] is None swh_storage.origin_add([origin]) origins = list(swh_storage.origin_get_by_sha1([sha1(origin.url)])) assert len(origins) == 1 assert origins[0]["url"] == origin.url def test_origin_get_by_sha1_not_found(self, swh_storage, sample_data): unknown_origin = sample_data.origin assert swh_storage.origin_get([unknown_origin.url])[0] is None origins = list(swh_storage.origin_get_by_sha1([sha1(unknown_origin.url)])) assert len(origins) == 1 assert origins[0] is None def test_origin_search_single_result(self, swh_storage, sample_data): origin, origin2 = sample_data.origins[:2] actual_page = swh_storage.origin_search(origin.url) assert actual_page.next_page_token is None assert actual_page.results == [] actual_page = swh_storage.origin_search(origin.url, regexp=True) assert actual_page.next_page_token is None assert actual_page.results == [] swh_storage.origin_add([origin]) actual_page = swh_storage.origin_search(origin.url) assert actual_page.next_page_token is None assert actual_page.results == [origin] actual_page = swh_storage.origin_search(f".{origin.url[1:-1]}.", regexp=True) assert actual_page.next_page_token is None assert actual_page.results == [origin] swh_storage.origin_add([origin2]) actual_page = swh_storage.origin_search(origin2.url) assert actual_page.next_page_token is None assert actual_page.results == [origin2] actual_page = swh_storage.origin_search(f".{origin2.url[1:-1]}.", regexp=True) assert actual_page.next_page_token is None assert actual_page.results == [origin2] def test_origin_search_no_regexp(self, swh_storage, sample_data): origin, origin2 = sample_data.origins[:2] swh_storage.origin_add([origin, origin2]) # no pagination actual_page = swh_storage.origin_search("/") assert actual_page.next_page_token is None assert actual_page.results == [origin, origin2] # offset=0 actual_page = swh_storage.origin_search("/", page_token=None, limit=1) next_page_token = actual_page.next_page_token assert next_page_token is not None assert actual_page.results == [origin] # offset=1 actual_page = swh_storage.origin_search( "/", page_token=next_page_token, limit=1 ) assert actual_page.next_page_token is None assert actual_page.results == [origin2] def test_origin_search_regexp_substring(self, swh_storage, sample_data): origin, origin2 = sample_data.origins[:2] swh_storage.origin_add([origin, origin2]) # no pagination actual_page = swh_storage.origin_search("/", regexp=True) assert actual_page.next_page_token is None assert actual_page.results == [origin, origin2] # offset=0 actual_page = swh_storage.origin_search( "/", page_token=None, limit=1, regexp=True ) next_page_token = actual_page.next_page_token assert next_page_token is not None assert actual_page.results == [origin] # offset=1 actual_page = swh_storage.origin_search( "/", page_token=next_page_token, limit=1, regexp=True ) assert actual_page.next_page_token is None assert actual_page.results == [origin2] def test_origin_search_regexp_fullstring(self, swh_storage, sample_data): origin, origin2 = sample_data.origins[:2] swh_storage.origin_add([origin, origin2]) # no pagination actual_page = swh_storage.origin_search(".*/.*", regexp=True) assert actual_page.next_page_token is None assert actual_page.results == [origin, origin2] # offset=0 actual_page = swh_storage.origin_search( ".*/.*", page_token=None, limit=1, regexp=True ) next_page_token = actual_page.next_page_token assert next_page_token is not None assert actual_page.results == [origin] # offset=1 actual_page = swh_storage.origin_search( ".*/.*", page_token=next_page_token, limit=1, regexp=True ) assert actual_page.next_page_token is None assert actual_page.results == [origin2] def test_origin_visit_add(self, swh_storage, sample_data): origin1 = sample_data.origins[1] swh_storage.origin_add([origin1]) date_visit = now() date_visit2 = date_visit + datetime.timedelta(minutes=1) date_visit = round_to_milliseconds(date_visit) date_visit2 = round_to_milliseconds(date_visit2) visit1 = OriginVisit( origin=origin1.url, date=date_visit, type=sample_data.type_visit1, ) visit2 = OriginVisit( origin=origin1.url, date=date_visit2, type=sample_data.type_visit2, ) # add once ov1, ov2 = swh_storage.origin_visit_add([visit1, visit2]) # then again (will be ignored as they already exist) origin_visit1, origin_visit2 = swh_storage.origin_visit_add([ov1, ov2]) assert ov1 == origin_visit1 assert ov2 == origin_visit2 ovs1 = OriginVisitStatus( origin=origin1.url, visit=ov1.visit, date=date_visit, status="created", snapshot=None, ) ovs2 = OriginVisitStatus( origin=origin1.url, visit=ov2.visit, date=date_visit2, status="created", snapshot=None, ) actual_visits = swh_storage.origin_visit_get(origin1.url).results expected_visits = [ov1, ov2] assert len(expected_visits) == len(actual_visits) for visit in expected_visits: assert visit in actual_visits actual_objects = list(swh_storage.journal_writer.journal.objects) expected_objects = list( [("origin", origin1)] + [("origin_visit", visit) for visit in expected_visits] * 2 + [("origin_visit_status", ovs) for ovs in [ovs1, ovs2]] ) for obj in expected_objects: assert obj in actual_objects def test_origin_visit_add_validation(self, swh_storage, sample_data): """Unknown origin when adding visits should raise""" visit = attr.evolve(sample_data.origin_visit, origin="something-unknonw") with pytest.raises(StorageArgumentException, match="Unknown origin"): swh_storage.origin_visit_add([visit]) objects = list(swh_storage.journal_writer.journal.objects) assert not objects def test_origin_visit_status_add_validation(self, swh_storage): """Wrong origin_visit_status input should raise storage argument error""" date_visit = now() visit_status1 = OriginVisitStatus( origin="unknown-origin-url", visit=10, date=date_visit, status="full", snapshot=None, ) with pytest.raises(StorageArgumentException, match="Unknown origin"): swh_storage.origin_visit_status_add([visit_status1]) objects = list(swh_storage.journal_writer.journal.objects) assert not objects def test_origin_visit_status_add(self, swh_storage, sample_data): """Correct origin visit statuses should add a new visit status """ snapshot = sample_data.snapshot origin1 = sample_data.origins[1] origin2 = Origin(url="new-origin") swh_storage.origin_add([origin1, origin2]) ov1, ov2 = swh_storage.origin_visit_add( [ OriginVisit( origin=origin1.url, date=sample_data.date_visit1, type=sample_data.type_visit1, ), OriginVisit( origin=origin2.url, date=sample_data.date_visit2, type=sample_data.type_visit2, ), ] ) ovs1 = OriginVisitStatus( origin=origin1.url, visit=ov1.visit, date=sample_data.date_visit1, status="created", snapshot=None, ) ovs2 = OriginVisitStatus( origin=origin2.url, visit=ov2.visit, date=sample_data.date_visit2, status="created", snapshot=None, ) date_visit_now = round_to_milliseconds(now()) visit_status1 = OriginVisitStatus( origin=ov1.origin, visit=ov1.visit, date=date_visit_now, status="full", snapshot=snapshot.id, ) date_visit_now = round_to_milliseconds(now()) visit_status2 = OriginVisitStatus( origin=ov2.origin, visit=ov2.visit, date=date_visit_now, status="ongoing", snapshot=None, metadata={"intrinsic": "something"}, ) swh_storage.origin_visit_status_add([visit_status1, visit_status2]) visit = swh_storage.origin_visit_get_latest(origin1.url, require_snapshot=True) visit_status = swh_storage.origin_visit_status_get_latest( origin1.url, visit.visit, require_snapshot=True ) assert visit_status == visit_status1 visit = swh_storage.origin_visit_get_latest(origin2.url, require_snapshot=False) visit_status = swh_storage.origin_visit_status_get_latest( origin2.url, visit.visit, require_snapshot=False ) assert origin2.url != origin1.url assert visit_status == visit_status2 actual_objects = list(swh_storage.journal_writer.journal.objects) expected_origins = [origin1, origin2] expected_visits = [ov1, ov2] expected_visit_statuses = [ovs1, ovs2, visit_status1, visit_status2] expected_objects = ( [("origin", o) for o in expected_origins] + [("origin_visit", v) for v in expected_visits] + [("origin_visit_status", ovs) for ovs in expected_visit_statuses] ) for obj in expected_objects: assert obj in actual_objects def test_origin_visit_status_add_twice(self, swh_storage, sample_data): """Correct origin visit statuses should add a new visit status """ snapshot = sample_data.snapshot origin1 = sample_data.origins[1] swh_storage.origin_add([origin1]) ov1 = swh_storage.origin_visit_add( [ OriginVisit( origin=origin1.url, date=sample_data.date_visit1, type=sample_data.type_visit1, ), ] )[0] ovs1 = OriginVisitStatus( origin=origin1.url, visit=ov1.visit, date=sample_data.date_visit1, status="created", snapshot=None, ) date_visit_now = round_to_milliseconds(now()) visit_status1 = OriginVisitStatus( origin=ov1.origin, visit=ov1.visit, date=date_visit_now, status="full", snapshot=snapshot.id, ) swh_storage.origin_visit_status_add([visit_status1]) # second call will ignore existing entries (will send to storage though) swh_storage.origin_visit_status_add([visit_status1]) visit_status = swh_storage.origin_visit_status_get_latest(ov1.origin, ov1.visit) assert visit_status == visit_status1 actual_objects = list(swh_storage.journal_writer.journal.objects) expected_origins = [origin1] expected_visits = [ov1] expected_visit_statuses = [ovs1, visit_status1, visit_status1] # write twice in the journal expected_objects = ( [("origin", o) for o in expected_origins] + [("origin_visit", v) for v in expected_visits] + [("origin_visit_status", ovs) for ovs in expected_visit_statuses] ) for obj in expected_objects: assert obj in actual_objects def test_origin_visit_find_by_date(self, swh_storage, sample_data): origin = sample_data.origin swh_storage.origin_add([origin]) visit1 = OriginVisit( origin=origin.url, date=sample_data.date_visit2, type=sample_data.type_visit1, ) visit2 = OriginVisit( origin=origin.url, date=sample_data.date_visit3, type=sample_data.type_visit2, ) visit3 = OriginVisit( origin=origin.url, date=sample_data.date_visit2, type=sample_data.type_visit3, ) ov1, ov2, ov3 = swh_storage.origin_visit_add([visit1, visit2, visit3]) ovs1 = OriginVisitStatus( origin=origin.url, visit=ov1.visit, date=sample_data.date_visit2, status="ongoing", snapshot=None, ) ovs2 = OriginVisitStatus( origin=origin.url, visit=ov2.visit, date=sample_data.date_visit3, status="ongoing", snapshot=None, ) ovs3 = OriginVisitStatus( origin=origin.url, visit=ov3.visit, date=sample_data.date_visit2, status="ongoing", snapshot=None, ) swh_storage.origin_visit_status_add([ovs1, ovs2, ovs3]) # Simple case actual_visit = swh_storage.origin_visit_find_by_date( origin.url, sample_data.date_visit3 ) assert actual_visit == ov2 # There are two visits at the same date, the latest must be returned actual_visit = swh_storage.origin_visit_find_by_date( origin.url, sample_data.date_visit2 ) assert actual_visit == ov3 def test_origin_visit_find_by_date__unknown_origin(self, swh_storage, sample_data): actual_visit = swh_storage.origin_visit_find_by_date( "foo", sample_data.date_visit2 ) assert actual_visit is None def test_origin_visit_get_by(self, swh_storage, sample_data): snapshot = sample_data.snapshot origins = sample_data.origins[:2] swh_storage.origin_add(origins) origin_url, origin_url2 = [o.url for o in origins] visit = OriginVisit( origin=origin_url, date=sample_data.date_visit2, type=sample_data.type_visit2, ) origin_visit1 = swh_storage.origin_visit_add([visit])[0] swh_storage.snapshot_add([snapshot]) swh_storage.origin_visit_status_add( [ OriginVisitStatus( origin=origin_url, visit=origin_visit1.visit, date=now(), status="ongoing", snapshot=snapshot.id, ) ] ) # Add some other {origin, visit} entries visit2 = OriginVisit( origin=origin_url, date=sample_data.date_visit3, type=sample_data.type_visit3, ) visit3 = OriginVisit( origin=origin_url2, date=sample_data.date_visit3, type=sample_data.type_visit3, ) swh_storage.origin_visit_add([visit2, visit3]) # when visit1_metadata = { "contents": 42, "directories": 22, } swh_storage.origin_visit_status_add( [ OriginVisitStatus( origin=origin_url, visit=origin_visit1.visit, date=now(), status="full", snapshot=snapshot.id, metadata=visit1_metadata, ) ] ) actual_visit = swh_storage.origin_visit_get_by(origin_url, origin_visit1.visit) assert actual_visit == origin_visit1 def test_origin_visit_get_by__no_result(self, swh_storage, sample_data): actual_visit = swh_storage.origin_visit_get_by("unknown", 10) # unknown origin assert actual_visit is None origin = sample_data.origin swh_storage.origin_add([origin]) actual_visit = swh_storage.origin_visit_get_by(origin.url, 999) # unknown visit assert actual_visit is None def test_origin_visit_get_latest_edge_cases(self, swh_storage, sample_data): # unknown origin so no result assert swh_storage.origin_visit_get_latest("unknown-origin") is None # unknown type so no result origin = sample_data.origin swh_storage.origin_add([origin]) assert swh_storage.origin_visit_get_latest(origin.url, type="unknown") is None # unknown allowed statuses should raise with pytest.raises(StorageArgumentException, match="Unknown allowed statuses"): swh_storage.origin_visit_get_latest( origin.url, allowed_statuses=["unknown"] ) def test_origin_visit_get_latest_filter_type(self, swh_storage, sample_data): """Filtering origin visit get latest with filter type should be ok """ origin = sample_data.origin swh_storage.origin_add([origin]) visit1 = OriginVisit( origin=origin.url, date=sample_data.date_visit1, type="git", ) visit2 = OriginVisit( origin=origin.url, date=sample_data.date_visit2, type="hg", ) date_now = round_to_milliseconds(now()) visit3 = OriginVisit(origin=origin.url, date=date_now, type="hg",) assert sample_data.date_visit1 < sample_data.date_visit2 assert sample_data.date_visit2 < date_now ov1, ov2, ov3 = swh_storage.origin_visit_add([visit1, visit2, visit3]) # Check type filter is ok actual_visit = swh_storage.origin_visit_get_latest(origin.url, type="git") assert actual_visit == ov1 actual_visit = swh_storage.origin_visit_get_latest(origin.url, type="hg") assert actual_visit == ov3 actual_visit_unknown_type = swh_storage.origin_visit_get_latest( origin.url, type="npm", # no visit matching that type ) assert actual_visit_unknown_type is None def test_origin_visit_get_latest(self, swh_storage, sample_data): empty_snapshot, complete_snapshot = sample_data.snapshots[1:3] origin = sample_data.origin swh_storage.origin_add([origin]) visit1 = OriginVisit( origin=origin.url, date=sample_data.date_visit1, type="git", ) visit2 = OriginVisit( origin=origin.url, date=sample_data.date_visit2, type="hg", ) date_now = round_to_milliseconds(now()) visit3 = OriginVisit(origin=origin.url, date=date_now, type="hg",) assert visit1.date < visit2.date assert visit2.date < visit3.date ov1, ov2, ov3 = swh_storage.origin_visit_add([visit1, visit2, visit3]) # no filters, latest visit is the last one (whose date is most recent) actual_visit = swh_storage.origin_visit_get_latest(origin.url) assert actual_visit == ov3 # 3 visits, none has snapshot so nothing is returned actual_visit = swh_storage.origin_visit_get_latest( origin.url, require_snapshot=True ) assert actual_visit is None # visit are created with "created" status, so nothing will get returned actual_visit = swh_storage.origin_visit_get_latest( origin.url, allowed_statuses=["partial"] ) assert actual_visit is None # visit are created with "created" status, so most recent again actual_visit = swh_storage.origin_visit_get_latest( origin.url, allowed_statuses=["created"] ) assert actual_visit == ov3 # Add snapshot to visit1; require_snapshot=True makes it return first visit swh_storage.snapshot_add([complete_snapshot]) visit_status_with_snapshot = OriginVisitStatus( origin=origin.url, visit=ov1.visit, date=round_to_milliseconds(now()), status="ongoing", snapshot=complete_snapshot.id, ) swh_storage.origin_visit_status_add([visit_status_with_snapshot]) # only the first visit has a snapshot now actual_visit = swh_storage.origin_visit_get_latest( origin.url, require_snapshot=True ) assert actual_visit == ov1 # only the first visit has a status ongoing now actual_visit = swh_storage.origin_visit_get_latest( origin.url, allowed_statuses=["ongoing"] ) assert actual_visit == ov1 actual_visit_status = swh_storage.origin_visit_status_get_latest( origin.url, ov1.visit, require_snapshot=True ) assert actual_visit_status == visit_status_with_snapshot # ... and require_snapshot=False (defaults) still returns latest visit (3rd) actual_visit = swh_storage.origin_visit_get_latest( origin.url, require_snapshot=False ) assert actual_visit == ov3 # no specific filter, this returns as before the latest visit actual_visit = swh_storage.origin_visit_get_latest(origin.url) assert actual_visit == ov3 # Status filter: all three visits are status=ongoing, so no visit # returned actual_visit = swh_storage.origin_visit_get_latest( origin.url, allowed_statuses=["full"] ) assert actual_visit is None visit_status1_full = OriginVisitStatus( origin=origin.url, visit=ov1.visit, date=round_to_milliseconds(now()), status="full", snapshot=complete_snapshot.id, ) # Mark the first visit as completed and check status filter again swh_storage.origin_visit_status_add([visit_status1_full]) # only the first visit has the full status actual_visit = swh_storage.origin_visit_get_latest( origin.url, allowed_statuses=["full"] ) assert actual_visit == ov1 actual_visit_status = swh_storage.origin_visit_status_get_latest( origin.url, ov1.visit, allowed_statuses=["full"] ) assert actual_visit_status == visit_status1_full # no specific filter, this returns as before the latest visit actual_visit = swh_storage.origin_visit_get_latest(origin.url) assert actual_visit == ov3 # Add snapshot to visit2 and check that the new snapshot is returned swh_storage.snapshot_add([empty_snapshot]) visit_status2_full = OriginVisitStatus( origin=origin.url, visit=ov2.visit, date=round_to_milliseconds(now()), status="ongoing", snapshot=empty_snapshot.id, ) swh_storage.origin_visit_status_add([visit_status2_full]) actual_visit = swh_storage.origin_visit_get_latest( origin.url, require_snapshot=True ) # 2nd visit is most recent with a snapshot assert actual_visit == ov2 actual_visit_status = swh_storage.origin_visit_status_get_latest( origin.url, ov2.visit, require_snapshot=True ) assert actual_visit_status == visit_status2_full # no specific filter, this returns as before the latest visit, 3rd one actual_origin = swh_storage.origin_visit_get_latest(origin.url) assert actual_origin == ov3 # full status is still the first visit actual_visit = swh_storage.origin_visit_get_latest( origin.url, allowed_statuses=["full"] ) assert actual_visit == ov1 # Add snapshot to visit3 (same date as visit2) visit_status3_with_snapshot = OriginVisitStatus( origin=origin.url, visit=ov3.visit, date=round_to_milliseconds(now()), status="ongoing", snapshot=complete_snapshot.id, ) swh_storage.origin_visit_status_add([visit_status3_with_snapshot]) # full status is still the first visit actual_visit = swh_storage.origin_visit_get_latest( origin.url, allowed_statuses=["full"], require_snapshot=True, ) assert actual_visit == ov1 actual_visit_status = swh_storage.origin_visit_status_get_latest( origin.url, visit=actual_visit.visit, allowed_statuses=["full"], require_snapshot=True, ) assert actual_visit_status == visit_status1_full # most recent is still the 3rd visit actual_visit = swh_storage.origin_visit_get_latest(origin.url) assert actual_visit == ov3 # 3rd visit has a snapshot now, so it's elected actual_visit = swh_storage.origin_visit_get_latest( origin.url, require_snapshot=True ) assert actual_visit == ov3 actual_visit_status = swh_storage.origin_visit_status_get_latest( origin.url, ov3.visit, require_snapshot=True ) assert actual_visit_status == visit_status3_with_snapshot def test_origin_visit_get_latest__same_date(self, swh_storage, sample_data): empty_snapshot, complete_snapshot = sample_data.snapshots[1:3] origin = sample_data.origin swh_storage.origin_add([origin]) visit1 = OriginVisit( origin=origin.url, date=sample_data.date_visit1, type="git", ) visit2 = OriginVisit( origin=origin.url, date=sample_data.date_visit1, type="hg", ) ov1, ov2 = swh_storage.origin_visit_add([visit1, visit2]) # ties should be broken by using the visit id actual_visit = swh_storage.origin_visit_get_latest(origin.url) assert actual_visit == ov2 def test_origin_visit_get_latest__not_last(self, swh_storage, sample_data): origin = sample_data.origin swh_storage.origin_add([origin]) visit1, visit2 = sample_data.origin_visits[:2] assert visit1.origin == origin.url swh_storage.origin_visit_add([visit1]) ov1 = swh_storage.origin_visit_get_latest(origin.url) # Add snapshot to visit1, latest snapshot = visit 1 snapshot complete_snapshot = sample_data.snapshots[2] swh_storage.snapshot_add([complete_snapshot]) swh_storage.origin_visit_status_add( [ OriginVisitStatus( origin=origin.url, visit=ov1.visit, date=visit2.date, status="partial", snapshot=None, ) ] ) assert visit1.date < visit2.date # no snapshot associated to the visit, so None visit = swh_storage.origin_visit_get_latest( origin.url, allowed_statuses=["partial"], require_snapshot=True, ) assert visit is None date_now = now() assert visit2.date < date_now swh_storage.origin_visit_status_add( [ OriginVisitStatus( origin=origin.url, visit=ov1.visit, date=date_now, status="full", snapshot=complete_snapshot.id, ) ] ) swh_storage.origin_visit_add( [OriginVisit(origin=origin.url, date=now(), type=visit1.type,)] ) visit = swh_storage.origin_visit_get_latest(origin.url, require_snapshot=True) assert visit is not None def test_origin_visit_status_get_latest__validation(self, swh_storage, sample_data): origin = sample_data.origin swh_storage.origin_add([origin]) visit1 = OriginVisit( origin=origin.url, date=sample_data.date_visit1, type="git", ) # unknown allowed statuses should raise with pytest.raises(StorageArgumentException, match="Unknown allowed statuses"): swh_storage.origin_visit_status_get_latest( origin.url, visit1.visit, allowed_statuses=["unknown"] ) def test_origin_visit_status_get_latest(self, swh_storage, sample_data): snapshot = sample_data.snapshots[2] origin1 = sample_data.origin swh_storage.origin_add([origin1]) # to have some reference visits ov1, ov2 = swh_storage.origin_visit_add( [ OriginVisit( origin=origin1.url, date=sample_data.date_visit1, type=sample_data.type_visit1, ), OriginVisit( origin=origin1.url, date=sample_data.date_visit2, type=sample_data.type_visit2, ), ] ) swh_storage.snapshot_add([snapshot]) date_now = round_to_milliseconds(now()) assert sample_data.date_visit1 < sample_data.date_visit2 assert sample_data.date_visit2 < date_now ovs1 = OriginVisitStatus( origin=origin1.url, visit=ov1.visit, date=sample_data.date_visit1, status="partial", snapshot=None, ) ovs2 = OriginVisitStatus( origin=origin1.url, visit=ov1.visit, date=sample_data.date_visit2, status="ongoing", snapshot=None, ) ovs3 = OriginVisitStatus( origin=origin1.url, visit=ov2.visit, date=sample_data.date_visit2 + datetime.timedelta(minutes=1), # to not be ignored status="ongoing", snapshot=None, ) ovs4 = OriginVisitStatus( origin=origin1.url, visit=ov2.visit, date=date_now, status="full", snapshot=snapshot.id, metadata={"something": "wicked"}, ) swh_storage.origin_visit_status_add([ovs1, ovs2, ovs3, ovs4]) # unknown origin so no result actual_origin_visit = swh_storage.origin_visit_status_get_latest( "unknown-origin", ov1.visit ) assert actual_origin_visit is None # unknown visit so no result actual_origin_visit = swh_storage.origin_visit_status_get_latest( ov1.origin, ov1.visit + 10 ) assert actual_origin_visit is None # Two visits, both with no snapshot, take the most recent actual_origin_visit2 = swh_storage.origin_visit_status_get_latest( origin1.url, ov1.visit ) assert isinstance(actual_origin_visit2, OriginVisitStatus) assert actual_origin_visit2 == ovs2 assert ovs2.origin == origin1.url assert ovs2.visit == ov1.visit actual_origin_visit = swh_storage.origin_visit_status_get_latest( origin1.url, ov1.visit, require_snapshot=True ) # there is no visit with snapshot yet for that visit assert actual_origin_visit is None actual_origin_visit2 = swh_storage.origin_visit_status_get_latest( origin1.url, ov1.visit, allowed_statuses=["partial", "ongoing"] ) # visit status with partial status visit elected assert actual_origin_visit2 == ovs2 assert actual_origin_visit2.status == "ongoing" actual_origin_visit4 = swh_storage.origin_visit_status_get_latest( origin1.url, ov2.visit, require_snapshot=True ) assert actual_origin_visit4 == ovs4 assert actual_origin_visit4.snapshot == snapshot.id actual_origin_visit = swh_storage.origin_visit_status_get_latest( origin1.url, ov2.visit, require_snapshot=True, allowed_statuses=["ongoing"] ) # nothing matches so nothing assert actual_origin_visit is None # there is no visit with status full actual_origin_visit3 = swh_storage.origin_visit_status_get_latest( origin1.url, ov2.visit, allowed_statuses=["ongoing"] ) assert actual_origin_visit3 == ovs3 def test_person_fullname_unicity(self, swh_storage, sample_data): revision, rev2 = sample_data.revisions[0:2] # create a revision with same committer fullname but wo name and email revision2 = attr.evolve( rev2, committer=Person( fullname=revision.committer.fullname, name=None, email=None ), ) swh_storage.revision_add([revision, revision2]) # when getting added revisions revisions = swh_storage.revision_get([revision.id, revision2.id]) # then check committers are the same assert revisions[0].committer == revisions[1].committer def test_snapshot_add_get_empty(self, swh_storage, sample_data): empty_snapshot = sample_data.snapshots[1] empty_snapshot_dict = empty_snapshot.to_dict() origin = sample_data.origin swh_storage.origin_add([origin]) ov1 = swh_storage.origin_visit_add( [ OriginVisit( origin=origin.url, date=sample_data.date_visit1, type=sample_data.type_visit1, ) ] )[0] actual_result = swh_storage.snapshot_add([empty_snapshot]) assert actual_result == {"snapshot:add": 1} date_now = now() swh_storage.origin_visit_status_add( [ OriginVisitStatus( origin=origin.url, visit=ov1.visit, date=date_now, status="full", snapshot=empty_snapshot.id, ) ] ) by_id = swh_storage.snapshot_get(empty_snapshot.id) assert by_id == {**empty_snapshot_dict, "next_branch": None} ovs1 = OriginVisitStatus.from_dict( { "origin": origin.url, "date": sample_data.date_visit1, "visit": ov1.visit, "status": "created", "snapshot": None, "metadata": None, } ) ovs2 = OriginVisitStatus.from_dict( { "origin": origin.url, "date": date_now, "visit": ov1.visit, "status": "full", "metadata": None, "snapshot": empty_snapshot.id, } ) actual_objects = list(swh_storage.journal_writer.journal.objects) expected_objects = [ ("origin", origin), ("origin_visit", ov1), ("origin_visit_status", ovs1,), ("snapshot", empty_snapshot), ("origin_visit_status", ovs2,), ] for obj in expected_objects: assert obj in actual_objects def test_snapshot_add_get_complete(self, swh_storage, sample_data): complete_snapshot = sample_data.snapshots[2] complete_snapshot_dict = complete_snapshot.to_dict() origin = sample_data.origin swh_storage.origin_add([origin]) visit = OriginVisit( origin=origin.url, date=sample_data.date_visit1, type=sample_data.type_visit1, ) origin_visit1 = swh_storage.origin_visit_add([visit])[0] actual_result = swh_storage.snapshot_add([complete_snapshot]) swh_storage.origin_visit_status_add( [ OriginVisitStatus( origin=origin.url, visit=origin_visit1.visit, date=now(), status="ongoing", snapshot=complete_snapshot.id, ) ] ) assert actual_result == {"snapshot:add": 1} by_id = swh_storage.snapshot_get(complete_snapshot.id) assert by_id == {**complete_snapshot_dict, "next_branch": None} def test_snapshot_add_many(self, swh_storage, sample_data): snapshot, _, complete_snapshot = sample_data.snapshots[:3] actual_result = swh_storage.snapshot_add([snapshot, complete_snapshot]) assert actual_result == {"snapshot:add": 2} assert swh_storage.snapshot_get(complete_snapshot.id) == { **complete_snapshot.to_dict(), "next_branch": None, } assert swh_storage.snapshot_get(snapshot.id) == { **snapshot.to_dict(), "next_branch": None, } swh_storage.refresh_stat_counters() assert swh_storage.stat_counters()["snapshot"] == 2 def test_snapshot_add_many_incremental(self, swh_storage, sample_data): snapshot, _, complete_snapshot = sample_data.snapshots[:3] actual_result = swh_storage.snapshot_add([complete_snapshot]) assert actual_result == {"snapshot:add": 1} actual_result2 = swh_storage.snapshot_add([snapshot, complete_snapshot]) assert actual_result2 == {"snapshot:add": 1} assert swh_storage.snapshot_get(complete_snapshot.id) == { **complete_snapshot.to_dict(), "next_branch": None, } assert swh_storage.snapshot_get(snapshot.id) == { **snapshot.to_dict(), "next_branch": None, } def test_snapshot_add_twice(self, swh_storage, sample_data): snapshot, empty_snapshot = sample_data.snapshots[:2] actual_result = swh_storage.snapshot_add([empty_snapshot]) assert actual_result == {"snapshot:add": 1} assert list(swh_storage.journal_writer.journal.objects) == [ ("snapshot", empty_snapshot) ] actual_result = swh_storage.snapshot_add([snapshot]) assert actual_result == {"snapshot:add": 1} assert list(swh_storage.journal_writer.journal.objects) == [ ("snapshot", empty_snapshot), ("snapshot", snapshot), ] def test_snapshot_add_count_branches(self, swh_storage, sample_data): complete_snapshot = sample_data.snapshots[2] actual_result = swh_storage.snapshot_add([complete_snapshot]) assert actual_result == {"snapshot:add": 1} snp_size = swh_storage.snapshot_count_branches(complete_snapshot.id) expected_snp_size = { "alias": 1, "content": 1, "directory": 2, "release": 1, "revision": 1, "snapshot": 1, None: 1, } assert snp_size == expected_snp_size def test_snapshot_add_get_paginated(self, swh_storage, sample_data): complete_snapshot = sample_data.snapshots[2] swh_storage.snapshot_add([complete_snapshot]) snp_id = complete_snapshot.id branches = complete_snapshot.branches branch_names = list(sorted(branches)) # Test branch_from snapshot = swh_storage.snapshot_get_branches(snp_id, branches_from=b"release") rel_idx = branch_names.index(b"release") expected_snapshot = { "id": snp_id, "branches": {name: branches[name] for name in branch_names[rel_idx:]}, "next_branch": None, } assert snapshot == expected_snapshot # Test branches_count snapshot = swh_storage.snapshot_get_branches(snp_id, branches_count=1) expected_snapshot = { "id": snp_id, "branches": {branch_names[0]: branches[branch_names[0]],}, "next_branch": b"content", } assert snapshot == expected_snapshot # test branch_from + branches_count snapshot = swh_storage.snapshot_get_branches( snp_id, branches_from=b"directory", branches_count=3 ) dir_idx = branch_names.index(b"directory") expected_snapshot = { "id": snp_id, "branches": { name: branches[name] for name in branch_names[dir_idx : dir_idx + 3] }, "next_branch": branch_names[dir_idx + 3], } assert snapshot == expected_snapshot def test_snapshot_add_get_filtered(self, swh_storage, sample_data): origin = sample_data.origin complete_snapshot = sample_data.snapshots[2] swh_storage.origin_add([origin]) visit = OriginVisit( origin=origin.url, date=sample_data.date_visit1, type=sample_data.type_visit1, ) origin_visit1 = swh_storage.origin_visit_add([visit])[0] swh_storage.snapshot_add([complete_snapshot]) swh_storage.origin_visit_status_add( [ OriginVisitStatus( origin=origin.url, visit=origin_visit1.visit, date=now(), status="ongoing", snapshot=complete_snapshot.id, ) ] ) snp_id = complete_snapshot.id branches = complete_snapshot.branches snapshot = swh_storage.snapshot_get_branches( snp_id, target_types=["release", "revision"] ) expected_snapshot = { "id": snp_id, "branches": { name: tgt for name, tgt in branches.items() if tgt and tgt.target_type in [TargetType.RELEASE, TargetType.REVISION] }, "next_branch": None, } assert snapshot == expected_snapshot snapshot = swh_storage.snapshot_get_branches(snp_id, target_types=["alias"]) expected_snapshot = { "id": snp_id, "branches": { name: tgt for name, tgt in branches.items() if tgt and tgt.target_type == TargetType.ALIAS }, "next_branch": None, } assert snapshot == expected_snapshot def test_snapshot_add_get_filtered_and_paginated(self, swh_storage, sample_data): complete_snapshot = sample_data.snapshots[2] swh_storage.snapshot_add([complete_snapshot]) snp_id = complete_snapshot.id branches = complete_snapshot.branches branch_names = list(sorted(branches)) # Test branch_from snapshot = swh_storage.snapshot_get_branches( snp_id, target_types=["directory", "release"], branches_from=b"directory2" ) expected_snapshot = { "id": snp_id, "branches": {name: branches[name] for name in (b"directory2", b"release")}, "next_branch": None, } assert snapshot == expected_snapshot # Test branches_count snapshot = swh_storage.snapshot_get_branches( snp_id, target_types=["directory", "release"], branches_count=1 ) expected_snapshot = { "id": snp_id, "branches": {b"directory": branches[b"directory"]}, "next_branch": b"directory2", } assert snapshot == expected_snapshot # Test branches_count snapshot = swh_storage.snapshot_get_branches( snp_id, target_types=["directory", "release"], branches_count=2 ) expected_snapshot = { "id": snp_id, "branches": { name: branches[name] for name in (b"directory", b"directory2") }, "next_branch": b"release", } assert snapshot == expected_snapshot # test branch_from + branches_count snapshot = swh_storage.snapshot_get_branches( snp_id, target_types=["directory", "release"], branches_from=b"directory2", branches_count=1, ) dir_idx = branch_names.index(b"directory2") expected_snapshot = { "id": snp_id, "branches": {branch_names[dir_idx]: branches[branch_names[dir_idx]],}, "next_branch": b"release", } assert snapshot == expected_snapshot def test_snapshot_add_get_branch_by_type(self, swh_storage, sample_data): complete_snapshot = sample_data.snapshots[2] snapshot = complete_snapshot.to_dict() alias1 = b"alias1" alias2 = b"alias2" target1 = random.choice(list(snapshot["branches"].keys())) target2 = random.choice(list(snapshot["branches"].keys())) snapshot["branches"][alias2] = { "target": target2, "target_type": "alias", } snapshot["branches"][alias1] = { "target": target1, "target_type": "alias", } new_snapshot = Snapshot.from_dict(snapshot) swh_storage.snapshot_add([new_snapshot]) branches = swh_storage.snapshot_get_branches( new_snapshot.id, target_types=["alias"], branches_from=alias1, branches_count=1, )["branches"] assert len(branches) == 1 assert alias1 in branches def test_snapshot_add_get(self, swh_storage, sample_data): snapshot = sample_data.snapshot origin = sample_data.origin swh_storage.origin_add([origin]) visit = OriginVisit( origin=origin.url, date=sample_data.date_visit1, type=sample_data.type_visit1, ) ov1 = swh_storage.origin_visit_add([visit])[0] swh_storage.snapshot_add([snapshot]) swh_storage.origin_visit_status_add( [ OriginVisitStatus( origin=origin.url, visit=ov1.visit, date=now(), status="ongoing", snapshot=snapshot.id, ) ] ) expected_snapshot = {**snapshot.to_dict(), "next_branch": None} by_id = swh_storage.snapshot_get(snapshot.id) assert by_id == expected_snapshot actual_visit = swh_storage.origin_visit_get_by(origin.url, ov1.visit) assert actual_visit == ov1 visit_status = swh_storage.origin_visit_status_get_latest( origin.url, ov1.visit, require_snapshot=True ) assert visit_status.snapshot == snapshot.id def test_snapshot_get_random(self, swh_storage, sample_data): snapshot, empty_snapshot, complete_snapshot = sample_data.snapshots[:3] swh_storage.snapshot_add([snapshot, empty_snapshot, complete_snapshot]) assert swh_storage.snapshot_get_random() in { snapshot.id, empty_snapshot.id, complete_snapshot.id, } def test_snapshot_missing(self, swh_storage, sample_data): snapshot, missing_snapshot = sample_data.snapshots[:2] snapshots = [snapshot.id, missing_snapshot.id] swh_storage.snapshot_add([snapshot]) missing_snapshots = swh_storage.snapshot_missing(snapshots) assert list(missing_snapshots) == [missing_snapshot.id] def test_stat_counters(self, swh_storage, sample_data): origin = sample_data.origin snapshot = sample_data.snapshot revision = sample_data.revision release = sample_data.release directory = sample_data.directory content = sample_data.content expected_keys = ["content", "directory", "origin", "revision"] # Initially, all counters are 0 swh_storage.refresh_stat_counters() counters = swh_storage.stat_counters() assert set(expected_keys) <= set(counters) for key in expected_keys: assert counters[key] == 0 # Add a content. Only the content counter should increase. swh_storage.content_add([content]) swh_storage.refresh_stat_counters() counters = swh_storage.stat_counters() assert set(expected_keys) <= set(counters) for key in expected_keys: if key != "content": assert counters[key] == 0 assert counters["content"] == 1 # Add other objects. Check their counter increased as well. swh_storage.origin_add([origin]) visit = OriginVisit( origin=origin.url, date=sample_data.date_visit2, type=sample_data.type_visit2, ) origin_visit1 = swh_storage.origin_visit_add([visit])[0] swh_storage.snapshot_add([snapshot]) swh_storage.origin_visit_status_add( [ OriginVisitStatus( origin=origin.url, visit=origin_visit1.visit, date=now(), status="ongoing", snapshot=snapshot.id, ) ] ) swh_storage.directory_add([directory]) swh_storage.revision_add([revision]) swh_storage.release_add([release]) swh_storage.refresh_stat_counters() counters = swh_storage.stat_counters() assert counters["content"] == 1 assert counters["directory"] == 1 assert counters["snapshot"] == 1 assert counters["origin"] == 1 assert counters["origin_visit"] == 1 assert counters["revision"] == 1 assert counters["release"] == 1 assert counters["snapshot"] == 1 if "person" in counters: assert counters["person"] == 3 def test_content_find_ctime(self, swh_storage, sample_data): origin_content = sample_data.content ctime = round_to_milliseconds(now()) content = attr.evolve(origin_content, data=None, ctime=ctime) swh_storage.content_add_metadata([content]) actually_present = swh_storage.content_find({"sha1": content.sha1}) assert actually_present[0] == content assert actually_present[0].ctime is not None assert actually_present[0].ctime.tzinfo is not None def test_content_find_with_present_content(self, swh_storage, sample_data): content = sample_data.content expected_content = attr.evolve(content, data=None) # 1. with something to find swh_storage.content_add([content]) actually_present = swh_storage.content_find({"sha1": content.sha1}) assert 1 == len(actually_present) assert actually_present[0] == expected_content # 2. with something to find actually_present = swh_storage.content_find({"sha1_git": content.sha1_git}) assert 1 == len(actually_present) assert actually_present[0] == expected_content # 3. with something to find actually_present = swh_storage.content_find({"sha256": content.sha256}) assert 1 == len(actually_present) assert actually_present[0] == expected_content # 4. with something to find actually_present = swh_storage.content_find(content.hashes()) assert 1 == len(actually_present) assert actually_present[0] == expected_content def test_content_find_with_non_present_content(self, swh_storage, sample_data): missing_content = sample_data.skipped_content # 1. with something that does not exist actually_present = swh_storage.content_find({"sha1": missing_content.sha1}) assert actually_present == [] # 2. with something that does not exist actually_present = swh_storage.content_find( {"sha1_git": missing_content.sha1_git} ) assert actually_present == [] # 3. with something that does not exist actually_present = swh_storage.content_find({"sha256": missing_content.sha256}) assert actually_present == [] def test_content_find_with_duplicate_input(self, swh_storage, sample_data): content = sample_data.content # Create fake data with colliding sha256 and blake2s256 sha1_array = bytearray(content.sha1) sha1_array[0] += 1 sha1git_array = bytearray(content.sha1_git) sha1git_array[0] += 1 duplicated_content = attr.evolve( content, sha1=bytes(sha1_array), sha1_git=bytes(sha1git_array) ) # Inject the data swh_storage.content_add([content, duplicated_content]) actual_result = swh_storage.content_find( { "blake2s256": duplicated_content.blake2s256, "sha256": duplicated_content.sha256, } ) expected_content = attr.evolve(content, data=None) expected_duplicated_content = attr.evolve(duplicated_content, data=None) for result in actual_result: assert result in [expected_content, expected_duplicated_content] def test_content_find_with_duplicate_sha256(self, swh_storage, sample_data): content = sample_data.content hashes = {} # Create fake data with colliding sha256 for hashalgo in ("sha1", "sha1_git", "blake2s256"): value = bytearray(getattr(content, hashalgo)) value[0] += 1 hashes[hashalgo] = bytes(value) duplicated_content = attr.evolve( content, sha1=hashes["sha1"], sha1_git=hashes["sha1_git"], blake2s256=hashes["blake2s256"], ) swh_storage.content_add([content, duplicated_content]) actual_result = swh_storage.content_find({"sha256": duplicated_content.sha256}) assert len(actual_result) == 2 expected_content = attr.evolve(content, data=None) expected_duplicated_content = attr.evolve(duplicated_content, data=None) for result in actual_result: assert result in [expected_content, expected_duplicated_content] # Find with both sha256 and blake2s256 actual_result = swh_storage.content_find( { "sha256": duplicated_content.sha256, "blake2s256": duplicated_content.blake2s256, } ) assert len(actual_result) == 1 assert actual_result == [expected_duplicated_content] def test_content_find_with_duplicate_blake2s256(self, swh_storage, sample_data): content = sample_data.content # Create fake data with colliding sha256 and blake2s256 sha1_array = bytearray(content.sha1) sha1_array[0] += 1 sha1git_array = bytearray(content.sha1_git) sha1git_array[0] += 1 sha256_array = bytearray(content.sha256) sha256_array[0] += 1 duplicated_content = attr.evolve( content, sha1=bytes(sha1_array), sha1_git=bytes(sha1git_array), sha256=bytes(sha256_array), ) swh_storage.content_add([content, duplicated_content]) actual_result = swh_storage.content_find( {"blake2s256": duplicated_content.blake2s256} ) expected_content = attr.evolve(content, data=None) expected_duplicated_content = attr.evolve(duplicated_content, data=None) for result in actual_result: assert result in [expected_content, expected_duplicated_content] # Find with both sha256 and blake2s256 actual_result = swh_storage.content_find( { "sha256": duplicated_content.sha256, "blake2s256": duplicated_content.blake2s256, } ) assert actual_result == [expected_duplicated_content] def test_content_find_bad_input(self, swh_storage): # 1. with no hash to lookup with pytest.raises(StorageArgumentException): swh_storage.content_find({}) # need at least one hash # 2. with bad hash with pytest.raises(StorageArgumentException): swh_storage.content_find({"unknown-sha1": "something"}) # not the right key def test_object_find_by_sha1_git(self, swh_storage, sample_data): content = sample_data.content directory = sample_data.directory revision = sample_data.revision release = sample_data.release sha1_gits = [b"00000000000000000000"] expected = { b"00000000000000000000": [], } swh_storage.content_add([content]) sha1_gits.append(content.sha1_git) expected[content.sha1_git] = [ {"sha1_git": content.sha1_git, "type": "content",} ] swh_storage.directory_add([directory]) sha1_gits.append(directory.id) expected[directory.id] = [{"sha1_git": directory.id, "type": "directory",}] swh_storage.revision_add([revision]) sha1_gits.append(revision.id) expected[revision.id] = [{"sha1_git": revision.id, "type": "revision",}] swh_storage.release_add([release]) sha1_gits.append(release.id) expected[release.id] = [{"sha1_git": release.id, "type": "release",}] ret = swh_storage.object_find_by_sha1_git(sha1_gits) assert expected == ret def test_metadata_fetcher_add_get(self, swh_storage, sample_data): fetcher = sample_data.metadata_fetcher actual_fetcher = swh_storage.metadata_fetcher_get(fetcher.name, fetcher.version) assert actual_fetcher is None # does not exist swh_storage.metadata_fetcher_add([fetcher]) res = swh_storage.metadata_fetcher_get(fetcher.name, fetcher.version) assert res == fetcher actual_objects = list(swh_storage.journal_writer.journal.objects) expected_objects = [ ("metadata_fetcher", fetcher), ] for obj in expected_objects: assert obj in actual_objects def test_metadata_fetcher_add_zero(self, swh_storage, sample_data): fetcher = sample_data.metadata_fetcher actual_fetcher = swh_storage.metadata_fetcher_get(fetcher.name, fetcher.version) assert actual_fetcher is None # does not exist swh_storage.metadata_fetcher_add([]) def test_metadata_authority_add_get(self, swh_storage, sample_data): authority = sample_data.metadata_authority actual_authority = swh_storage.metadata_authority_get( authority.type, authority.url ) assert actual_authority is None # does not exist swh_storage.metadata_authority_add([authority]) res = swh_storage.metadata_authority_get(authority.type, authority.url) assert res == authority actual_objects = list(swh_storage.journal_writer.journal.objects) expected_objects = [ ("metadata_authority", authority), ] for obj in expected_objects: assert obj in actual_objects def test_metadata_authority_add_zero(self, swh_storage, sample_data): authority = sample_data.metadata_authority actual_authority = swh_storage.metadata_authority_get( authority.type, authority.url ) assert actual_authority is None # does not exist swh_storage.metadata_authority_add([]) def test_content_metadata_add(self, swh_storage, sample_data): content = sample_data.content fetcher = sample_data.metadata_fetcher authority = sample_data.metadata_authority content_metadata = sample_data.content_metadata[:2] content_swhid = SWHID( object_type="content", object_id=hash_to_bytes(content.sha1_git) ) swh_storage.metadata_fetcher_add([fetcher]) swh_storage.metadata_authority_add([authority]) swh_storage.raw_extrinsic_metadata_add(content_metadata) result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.CONTENT, content_swhid, authority ) assert result.next_page_token is None assert list(sorted(result.results, key=lambda x: x.discovery_date,)) == list( content_metadata ) actual_objects = list(swh_storage.journal_writer.journal.objects) expected_objects = [ ("metadata_authority", authority), ("metadata_fetcher", fetcher), ] + [("raw_extrinsic_metadata", item) for item in content_metadata] for obj in expected_objects: assert obj in actual_objects def test_content_metadata_add_duplicate(self, swh_storage, sample_data): """Duplicates should be silently updated.""" content = sample_data.content fetcher = sample_data.metadata_fetcher authority = sample_data.metadata_authority content_metadata, content_metadata2 = sample_data.content_metadata[:2] content_swhid = SWHID( object_type="content", object_id=hash_to_bytes(content.sha1_git) ) new_content_metadata2 = attr.evolve( content_metadata2, format="new-format", metadata=b"new-metadata", ) swh_storage.metadata_fetcher_add([fetcher]) swh_storage.metadata_authority_add([authority]) swh_storage.raw_extrinsic_metadata_add([content_metadata, content_metadata2]) swh_storage.raw_extrinsic_metadata_add([new_content_metadata2]) result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.CONTENT, content_swhid, authority ) assert result.next_page_token is None expected_results1 = (content_metadata, new_content_metadata2) expected_results2 = (content_metadata, content_metadata2) assert tuple(sorted(result.results, key=lambda x: x.discovery_date,)) in ( expected_results1, # cassandra expected_results2, # postgresql ) def test_content_metadata_get(self, swh_storage, sample_data): content, content2 = sample_data.contents[:2] fetcher, fetcher2 = sample_data.fetchers[:2] authority, authority2 = sample_data.authorities[:2] ( content1_metadata1, content1_metadata2, content1_metadata3, ) = sample_data.content_metadata[:3] content1_swhid = SWHID(object_type="content", object_id=content.sha1_git) content2_swhid = SWHID(object_type="content", object_id=content2.sha1_git) - content2_metadata = attr.evolve(content1_metadata2, id=content2_swhid) + content2_metadata = attr.evolve(content1_metadata2, target=content2_swhid) swh_storage.metadata_authority_add([authority, authority2]) swh_storage.metadata_fetcher_add([fetcher, fetcher2]) swh_storage.raw_extrinsic_metadata_add( [ content1_metadata1, content1_metadata2, content1_metadata3, content2_metadata, ] ) result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.CONTENT, content1_swhid, authority ) assert result.next_page_token is None assert [content1_metadata1, content1_metadata2] == list( sorted(result.results, key=lambda x: x.discovery_date,) ) result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.CONTENT, content1_swhid, authority2 ) assert result.next_page_token is None assert [content1_metadata3] == list( sorted(result.results, key=lambda x: x.discovery_date,) ) result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.CONTENT, content2_swhid, authority ) assert result.next_page_token is None assert [content2_metadata] == list(result.results,) def test_content_metadata_get_after(self, swh_storage, sample_data): content = sample_data.content fetcher = sample_data.metadata_fetcher authority = sample_data.metadata_authority content_metadata, content_metadata2 = sample_data.content_metadata[:2] content_swhid = SWHID(object_type="content", object_id=content.sha1_git) swh_storage.metadata_fetcher_add([fetcher]) swh_storage.metadata_authority_add([authority]) swh_storage.raw_extrinsic_metadata_add([content_metadata, content_metadata2]) result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.CONTENT, content_swhid, authority, after=content_metadata.discovery_date - timedelta(seconds=1), ) assert result.next_page_token is None assert [content_metadata, content_metadata2] == list( sorted(result.results, key=lambda x: x.discovery_date,) ) result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.CONTENT, content_swhid, authority, after=content_metadata.discovery_date, ) assert result.next_page_token is None assert result.results == [content_metadata2] result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.CONTENT, content_swhid, authority, after=content_metadata2.discovery_date, ) assert result.next_page_token is None assert result.results == [] def test_content_metadata_get_paginate(self, swh_storage, sample_data): content = sample_data.content fetcher = sample_data.metadata_fetcher authority = sample_data.metadata_authority content_metadata, content_metadata2 = sample_data.content_metadata[:2] content_swhid = SWHID(object_type="content", object_id=content.sha1_git) swh_storage.metadata_fetcher_add([fetcher]) swh_storage.metadata_authority_add([authority]) swh_storage.raw_extrinsic_metadata_add([content_metadata, content_metadata2]) swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.CONTENT, content_swhid, authority ) result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.CONTENT, content_swhid, authority, limit=1 ) assert result.next_page_token is not None assert result.results == [content_metadata] result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.CONTENT, content_swhid, authority, limit=1, page_token=result.next_page_token, ) assert result.next_page_token is None assert result.results == [content_metadata2] def test_content_metadata_get_paginate_same_date(self, swh_storage, sample_data): content = sample_data.content fetcher1, fetcher2 = sample_data.fetchers[:2] authority = sample_data.metadata_authority content_metadata, content_metadata2 = sample_data.content_metadata[:2] content_swhid = SWHID(object_type="content", object_id=content.sha1_git) swh_storage.metadata_fetcher_add([fetcher1, fetcher2]) swh_storage.metadata_authority_add([authority]) new_content_metadata2 = attr.evolve( content_metadata2, discovery_date=content_metadata2.discovery_date, fetcher=attr.evolve(fetcher2, metadata=None), ) swh_storage.raw_extrinsic_metadata_add( [content_metadata, new_content_metadata2] ) result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.CONTENT, content_swhid, authority, limit=1 ) assert result.next_page_token is not None assert result.results == [content_metadata] result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.CONTENT, content_swhid, authority, limit=1, page_token=result.next_page_token, ) assert result.next_page_token is None assert result.results == [new_content_metadata2] def test_content_metadata_get__invalid_id(self, swh_storage, sample_data): origin = sample_data.origin fetcher = sample_data.metadata_fetcher authority = sample_data.metadata_authority content_metadata, content_metadata2 = sample_data.content_metadata[:2] swh_storage.metadata_fetcher_add([fetcher]) swh_storage.metadata_authority_add([authority]) swh_storage.raw_extrinsic_metadata_add([content_metadata, content_metadata2]) with pytest.raises(StorageArgumentException, match="SWHID"): swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.CONTENT, origin.url, authority ) def test_origin_metadata_add(self, swh_storage, sample_data): origin = sample_data.origin fetcher = sample_data.metadata_fetcher authority = sample_data.metadata_authority origin_metadata, origin_metadata2 = sample_data.origin_metadata[:2] assert swh_storage.origin_add([origin]) == {"origin:add": 1} swh_storage.metadata_fetcher_add([fetcher]) swh_storage.metadata_authority_add([authority]) swh_storage.raw_extrinsic_metadata_add([origin_metadata, origin_metadata2]) result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.ORIGIN, origin.url, authority ) assert result.next_page_token is None assert list(sorted(result.results, key=lambda x: x.discovery_date)) == [ origin_metadata, origin_metadata2, ] actual_objects = list(swh_storage.journal_writer.journal.objects) expected_objects = [ ("metadata_authority", authority), ("metadata_fetcher", fetcher), ("raw_extrinsic_metadata", origin_metadata), ("raw_extrinsic_metadata", origin_metadata2), ] for obj in expected_objects: assert obj in actual_objects def test_origin_metadata_add_duplicate(self, swh_storage, sample_data): """Duplicates should be silently updated.""" origin = sample_data.origin fetcher = sample_data.metadata_fetcher authority = sample_data.metadata_authority origin_metadata, origin_metadata2 = sample_data.origin_metadata[:2] assert swh_storage.origin_add([origin]) == {"origin:add": 1} new_origin_metadata2 = attr.evolve( origin_metadata2, format="new-format", metadata=b"new-metadata", ) swh_storage.metadata_fetcher_add([fetcher]) swh_storage.metadata_authority_add([authority]) swh_storage.raw_extrinsic_metadata_add([origin_metadata, origin_metadata2]) swh_storage.raw_extrinsic_metadata_add([new_origin_metadata2]) result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.ORIGIN, origin.url, authority ) assert result.next_page_token is None # which of the two behavior happens is backend-specific. expected_results1 = (origin_metadata, new_origin_metadata2) expected_results2 = (origin_metadata, origin_metadata2) assert tuple(sorted(result.results, key=lambda x: x.discovery_date,)) in ( expected_results1, # cassandra expected_results2, # postgresql ) def test_origin_metadata_get(self, swh_storage, sample_data): origin, origin2 = sample_data.origins[:2] fetcher, fetcher2 = sample_data.fetchers[:2] authority, authority2 = sample_data.authorities[:2] ( origin1_metadata1, origin1_metadata2, origin1_metadata3, ) = sample_data.origin_metadata[:3] assert swh_storage.origin_add([origin, origin2]) == {"origin:add": 2} - origin2_metadata = attr.evolve(origin1_metadata2, id=origin2.url) + origin2_metadata = attr.evolve(origin1_metadata2, target=origin2.url) swh_storage.metadata_authority_add([authority, authority2]) swh_storage.metadata_fetcher_add([fetcher, fetcher2]) swh_storage.raw_extrinsic_metadata_add( [origin1_metadata1, origin1_metadata2, origin1_metadata3, origin2_metadata] ) result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.ORIGIN, origin.url, authority ) assert result.next_page_token is None assert [origin1_metadata1, origin1_metadata2] == list( sorted(result.results, key=lambda x: x.discovery_date,) ) result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.ORIGIN, origin.url, authority2 ) assert result.next_page_token is None assert [origin1_metadata3] == list( sorted(result.results, key=lambda x: x.discovery_date,) ) result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.ORIGIN, origin2.url, authority ) assert result.next_page_token is None assert [origin2_metadata] == list(result.results,) def test_origin_metadata_get_after(self, swh_storage, sample_data): origin = sample_data.origin fetcher = sample_data.metadata_fetcher authority = sample_data.metadata_authority origin_metadata, origin_metadata2 = sample_data.origin_metadata[:2] assert swh_storage.origin_add([origin]) == {"origin:add": 1} swh_storage.metadata_fetcher_add([fetcher]) swh_storage.metadata_authority_add([authority]) swh_storage.raw_extrinsic_metadata_add([origin_metadata, origin_metadata2]) result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.ORIGIN, origin.url, authority, after=origin_metadata.discovery_date - timedelta(seconds=1), ) assert result.next_page_token is None assert list(sorted(result.results, key=lambda x: x.discovery_date,)) == [ origin_metadata, origin_metadata2, ] result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.ORIGIN, origin.url, authority, after=origin_metadata.discovery_date, ) assert result.next_page_token is None assert result.results == [origin_metadata2] result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.ORIGIN, origin.url, authority, after=origin_metadata2.discovery_date, ) assert result.next_page_token is None assert result.results == [] def test_origin_metadata_get_paginate(self, swh_storage, sample_data): origin = sample_data.origin fetcher = sample_data.metadata_fetcher authority = sample_data.metadata_authority origin_metadata, origin_metadata2 = sample_data.origin_metadata[:2] assert swh_storage.origin_add([origin]) == {"origin:add": 1} swh_storage.metadata_fetcher_add([fetcher]) swh_storage.metadata_authority_add([authority]) swh_storage.raw_extrinsic_metadata_add([origin_metadata, origin_metadata2]) swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.ORIGIN, origin.url, authority ) result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.ORIGIN, origin.url, authority, limit=1 ) assert result.next_page_token is not None assert result.results == [origin_metadata] result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.ORIGIN, origin.url, authority, limit=1, page_token=result.next_page_token, ) assert result.next_page_token is None assert result.results == [origin_metadata2] def test_origin_metadata_get_paginate_same_date(self, swh_storage, sample_data): origin = sample_data.origin fetcher1, fetcher2 = sample_data.fetchers[:2] authority = sample_data.metadata_authority origin_metadata, origin_metadata2 = sample_data.origin_metadata[:2] assert swh_storage.origin_add([origin]) == {"origin:add": 1} swh_storage.metadata_fetcher_add([fetcher1, fetcher2]) swh_storage.metadata_authority_add([authority]) new_origin_metadata2 = attr.evolve( origin_metadata2, discovery_date=origin_metadata2.discovery_date, fetcher=attr.evolve(fetcher2, metadata=None), ) swh_storage.raw_extrinsic_metadata_add([origin_metadata, new_origin_metadata2]) result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.ORIGIN, origin.url, authority, limit=1 ) assert result.next_page_token is not None assert result.results == [origin_metadata] result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.ORIGIN, origin.url, authority, limit=1, page_token=result.next_page_token, ) assert result.next_page_token is None assert result.results == [new_origin_metadata2] def test_origin_metadata_add_missing_authority(self, swh_storage, sample_data): origin = sample_data.origin fetcher = sample_data.metadata_fetcher origin_metadata, origin_metadata2 = sample_data.origin_metadata[:2] assert swh_storage.origin_add([origin]) == {"origin:add": 1} swh_storage.metadata_fetcher_add([fetcher]) with pytest.raises(StorageArgumentException, match="authority"): swh_storage.raw_extrinsic_metadata_add([origin_metadata, origin_metadata2]) def test_origin_metadata_add_missing_fetcher(self, swh_storage, sample_data): origin = sample_data.origin authority = sample_data.metadata_authority origin_metadata, origin_metadata2 = sample_data.origin_metadata[:2] assert swh_storage.origin_add([origin]) == {"origin:add": 1} swh_storage.metadata_authority_add([authority]) with pytest.raises(StorageArgumentException, match="fetcher"): swh_storage.raw_extrinsic_metadata_add([origin_metadata, origin_metadata2]) def test_origin_metadata_get__invalid_id_type(self, swh_storage, sample_data): origin = sample_data.origin authority = sample_data.metadata_authority fetcher = sample_data.metadata_fetcher origin_metadata, origin_metadata2 = sample_data.origin_metadata[:2] content_metadata = sample_data.content_metadata[0] assert swh_storage.origin_add([origin]) == {"origin:add": 1} swh_storage.metadata_fetcher_add([fetcher]) swh_storage.metadata_authority_add([authority]) swh_storage.raw_extrinsic_metadata_add([origin_metadata, origin_metadata2]) with pytest.raises(StorageArgumentException, match="SWHID"): swh_storage.raw_extrinsic_metadata_get( - MetadataTargetType.ORIGIN, content_metadata.id, authority, + MetadataTargetType.ORIGIN, content_metadata.target, authority, ) class TestStorageGeneratedData: def test_generate_content_get_data(self, swh_storage, swh_contents): contents_with_data = [c for c in swh_contents if c.status != "absent"] # retrieve contents for content in contents_with_data: actual_content_data = swh_storage.content_get_data(content.sha1) assert actual_content_data is not None assert actual_content_data == content.data def test_generate_content_get(self, swh_storage, swh_contents): expected_contents = [ attr.evolve(c, data=None) for c in swh_contents if c.status != "absent" ] actual_contents = swh_storage.content_get([c.sha1 for c in expected_contents]) assert len(actual_contents) == len(expected_contents) assert actual_contents == expected_contents @pytest.mark.parametrize("limit", [1, 7, 10, 100, 1000]) def test_origin_list(self, swh_storage, swh_origins, limit): returned_origins = [] page_token = None i = 0 while True: actual_page = swh_storage.origin_list(page_token=page_token, limit=limit) assert len(actual_page.results) <= limit returned_origins.extend(actual_page.results) i += 1 page_token = actual_page.next_page_token if page_token is None: assert i * limit >= len(swh_origins) break else: assert len(actual_page.results) == limit assert sorted(returned_origins) == sorted(swh_origins) def test_origin_count(self, swh_storage, sample_data): swh_storage.origin_add(sample_data.origins) assert swh_storage.origin_count("github") == 3 assert swh_storage.origin_count("gitlab") == 2 assert swh_storage.origin_count(".*user.*", regexp=True) == 5 assert swh_storage.origin_count(".*user.*", regexp=False) == 0 assert swh_storage.origin_count(".*user1.*", regexp=True) == 2 assert swh_storage.origin_count(".*user1.*", regexp=False) == 0 def test_origin_count_with_visit_no_visits(self, swh_storage, sample_data): swh_storage.origin_add(sample_data.origins) # none of them have visits, so with_visit=True => 0 assert swh_storage.origin_count("github", with_visit=True) == 0 assert swh_storage.origin_count("gitlab", with_visit=True) == 0 assert swh_storage.origin_count(".*user.*", regexp=True, with_visit=True) == 0 assert swh_storage.origin_count(".*user.*", regexp=False, with_visit=True) == 0 assert swh_storage.origin_count(".*user1.*", regexp=True, with_visit=True) == 0 assert swh_storage.origin_count(".*user1.*", regexp=False, with_visit=True) == 0 def test_origin_count_with_visit_with_visits_no_snapshot( self, swh_storage, sample_data ): swh_storage.origin_add(sample_data.origins) origin_url = "https://github.com/user1/repo1" visit = OriginVisit(origin=origin_url, date=now(), type="git",) swh_storage.origin_visit_add([visit]) assert swh_storage.origin_count("github", with_visit=False) == 3 # it has a visit, but no snapshot, so with_visit=True => 0 assert swh_storage.origin_count("github", with_visit=True) == 0 assert swh_storage.origin_count("gitlab", with_visit=False) == 2 # these gitlab origins have no visit assert swh_storage.origin_count("gitlab", with_visit=True) == 0 assert ( swh_storage.origin_count("github.*user1", regexp=True, with_visit=False) == 1 ) assert ( swh_storage.origin_count("github.*user1", regexp=True, with_visit=True) == 0 ) assert swh_storage.origin_count("github", regexp=True, with_visit=True) == 0 def test_origin_count_with_visit_with_visits_and_snapshot( self, swh_storage, sample_data ): snapshot = sample_data.snapshot swh_storage.origin_add(sample_data.origins) swh_storage.snapshot_add([snapshot]) origin_url = "https://github.com/user1/repo1" visit = OriginVisit(origin=origin_url, date=now(), type="git",) visit = swh_storage.origin_visit_add([visit])[0] swh_storage.origin_visit_status_add( [ OriginVisitStatus( origin=origin_url, visit=visit.visit, date=now(), status="ongoing", snapshot=snapshot.id, ) ] ) assert swh_storage.origin_count("github", with_visit=False) == 3 # github/user1 has a visit and a snapshot, so with_visit=True => 1 assert swh_storage.origin_count("github", with_visit=True) == 1 assert ( swh_storage.origin_count("github.*user1", regexp=True, with_visit=False) == 1 ) assert ( swh_storage.origin_count("github.*user1", regexp=True, with_visit=True) == 1 ) assert swh_storage.origin_count("github", regexp=True, with_visit=True) == 1 @settings(suppress_health_check=[HealthCheck.too_slow]) @given(strategies.lists(objects(split_content=True), max_size=2)) def test_add_arbitrary(self, swh_storage, objects): for (obj_type, obj) in objects: if obj.object_type == "origin_visit": swh_storage.origin_add([Origin(url=obj.origin)]) visit = OriginVisit(origin=obj.origin, date=obj.date, type=obj.type,) swh_storage.origin_visit_add([visit]) else: method = getattr(swh_storage, obj_type + "_add") try: method([obj]) except HashCollision: pass diff --git a/swh/storage/tests/test_retry.py b/swh/storage/tests/test_retry.py index d290b5be..d58aac44 100644 --- a/swh/storage/tests/test_retry.py +++ b/swh/storage/tests/test_retry.py @@ -1,837 +1,837 @@ # Copyright (C) 2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from unittest.mock import call import attr import psycopg2 import pytest from swh.model.model import MetadataTargetType from swh.storage.exc import HashCollision, StorageArgumentException from swh.storage.utils import now @pytest.fixture def monkeypatch_sleep(monkeypatch, swh_storage): """In test context, we don't want to wait, make test faster """ from swh.storage.retry import RetryingProxyStorage for method_name, method in RetryingProxyStorage.__dict__.items(): if "_add" in method_name or "_update" in method_name: monkeypatch.setattr(method.retry, "sleep", lambda x: None) return monkeypatch @pytest.fixture def fake_hash_collision(sample_data): return HashCollision("sha1", "38762cf7f55934b34d179ae6a4c80cadccbb7f0a", []) @pytest.fixture def swh_storage_backend_config(): yield { "cls": "pipeline", "steps": [{"cls": "retry"}, {"cls": "memory"},], } def test_retrying_proxy_storage_content_add(swh_storage, sample_data): """Standard content_add works as before """ sample_content = sample_data.content content = swh_storage.content_get_data(sample_content.sha1) assert content is None s = swh_storage.content_add([sample_content]) assert s == { "content:add": 1, "content:add:bytes": sample_content.length, } content = swh_storage.content_get_data(sample_content.sha1) assert content == sample_content.data def test_retrying_proxy_storage_content_add_with_retry( monkeypatch_sleep, swh_storage, sample_data, mocker, fake_hash_collision, ): """Multiple retries for hash collision and psycopg2 error but finally ok """ mock_memory = mocker.patch("swh.storage.in_memory.InMemoryStorage.content_add") mock_memory.side_effect = [ # first try goes ko fake_hash_collision, # second try goes ko psycopg2.IntegrityError("content already inserted"), # ok then! {"content:add": 1}, ] sample_content = sample_data.content content = swh_storage.content_get_data(sample_content.sha1) assert content is None s = swh_storage.content_add([sample_content]) assert s == {"content:add": 1} mock_memory.assert_has_calls( [call([sample_content]), call([sample_content]), call([sample_content]),] ) def test_retrying_proxy_swh_storage_content_add_failure( swh_storage, sample_data, mocker ): """Unfiltered errors are raising without retry """ mock_memory = mocker.patch("swh.storage.in_memory.InMemoryStorage.content_add") mock_memory.side_effect = StorageArgumentException("Refuse to add content always!") sample_content = sample_data.content content = swh_storage.content_get_data(sample_content.sha1) assert content is None with pytest.raises(StorageArgumentException, match="Refuse to add"): swh_storage.content_add([sample_content]) assert mock_memory.call_count == 1 def test_retrying_proxy_storage_content_add_metadata(swh_storage, sample_data): """Standard content_add_metadata works as before """ sample_content = sample_data.content content = attr.evolve(sample_content, data=None) pk = content.sha1 content_metadata = swh_storage.content_get([pk]) assert content_metadata == [None] s = swh_storage.content_add_metadata([attr.evolve(content, ctime=now())]) assert s == { "content:add": 1, } content_metadata = swh_storage.content_get([pk]) assert len(content_metadata) == 1 assert content_metadata[0].sha1 == pk def test_retrying_proxy_storage_content_add_metadata_with_retry( monkeypatch_sleep, swh_storage, sample_data, mocker, fake_hash_collision ): """Multiple retries for hash collision and psycopg2 error but finally ok """ mock_memory = mocker.patch( "swh.storage.in_memory.InMemoryStorage.content_add_metadata" ) mock_memory.side_effect = [ # first try goes ko fake_hash_collision, # second try goes ko psycopg2.IntegrityError("content_metadata already inserted"), # ok then! {"content:add": 1}, ] sample_content = sample_data.content content = attr.evolve(sample_content, data=None) s = swh_storage.content_add_metadata([content]) assert s == {"content:add": 1} mock_memory.assert_has_calls( [call([content]), call([content]), call([content]),] ) def test_retrying_proxy_swh_storage_content_add_metadata_failure( swh_storage, sample_data, mocker ): """Unfiltered errors are raising without retry """ mock_memory = mocker.patch( "swh.storage.in_memory.InMemoryStorage.content_add_metadata" ) mock_memory.side_effect = StorageArgumentException( "Refuse to add content_metadata!" ) sample_content = sample_data.content content = attr.evolve(sample_content, data=None) with pytest.raises(StorageArgumentException, match="Refuse to add"): swh_storage.content_add_metadata([content]) assert mock_memory.call_count == 1 def test_retrying_proxy_storage_skipped_content_add(swh_storage, sample_data): """Standard skipped_content_add works as before """ sample_content = sample_data.skipped_content sample_content_dict = sample_content.to_dict() skipped_contents = list(swh_storage.skipped_content_missing([sample_content_dict])) assert len(skipped_contents) == 1 s = swh_storage.skipped_content_add([sample_content]) assert s == { "skipped_content:add": 1, } skipped_content = list(swh_storage.skipped_content_missing([sample_content_dict])) assert len(skipped_content) == 0 def test_retrying_proxy_storage_skipped_content_add_with_retry( monkeypatch_sleep, swh_storage, sample_data, mocker, fake_hash_collision ): """Multiple retries for hash collision and psycopg2 error but finally ok """ mock_memory = mocker.patch( "swh.storage.in_memory.InMemoryStorage.skipped_content_add" ) mock_memory.side_effect = [ # 1st & 2nd try goes ko fake_hash_collision, psycopg2.IntegrityError("skipped_content already inserted"), # ok then! {"skipped_content:add": 1}, ] sample_content = sample_data.skipped_content s = swh_storage.skipped_content_add([sample_content]) assert s == {"skipped_content:add": 1} mock_memory.assert_has_calls( [call([sample_content]), call([sample_content]), call([sample_content]),] ) def test_retrying_proxy_swh_storage_skipped_content_add_failure( swh_storage, sample_data, mocker ): """Unfiltered errors are raising without retry """ mock_memory = mocker.patch( "swh.storage.in_memory.InMemoryStorage.skipped_content_add" ) mock_memory.side_effect = StorageArgumentException( "Refuse to add content_metadata!" ) sample_content = sample_data.skipped_content sample_content_dict = sample_content.to_dict() skipped_contents = list(swh_storage.skipped_content_missing([sample_content_dict])) assert len(skipped_contents) == 1 with pytest.raises(StorageArgumentException, match="Refuse to add"): swh_storage.skipped_content_add([sample_content]) skipped_contents = list(swh_storage.skipped_content_missing([sample_content_dict])) assert len(skipped_contents) == 1 assert mock_memory.call_count == 1 def test_retrying_proxy_swh_storage_origin_visit_add(swh_storage, sample_data): """Standard origin_visit_add works as before """ origin = sample_data.origin visit = sample_data.origin_visit assert visit.origin == origin.url swh_storage.origin_add([origin]) origins = swh_storage.origin_visit_get(origin.url).results assert not origins origin_visit = swh_storage.origin_visit_add([visit])[0] assert origin_visit.origin == origin.url assert isinstance(origin_visit.visit, int) actual_visit = swh_storage.origin_visit_get(origin.url).results[0] assert actual_visit == visit def test_retrying_proxy_swh_storage_origin_visit_add_retry( monkeypatch_sleep, swh_storage, sample_data, mocker, fake_hash_collision ): """Multiple retries for hash collision and psycopg2 error but finally ok """ origin = sample_data.origin visit = sample_data.origin_visit assert visit.origin == origin.url swh_storage.origin_add([origin]) mock_memory = mocker.patch("swh.storage.in_memory.InMemoryStorage.origin_visit_add") mock_memory.side_effect = [ # first try goes ko fake_hash_collision, # second try goes ko psycopg2.IntegrityError("origin already inserted"), # ok then! [visit], ] origins = swh_storage.origin_visit_get(origin.url).results assert not origins r = swh_storage.origin_visit_add([visit]) assert r == [visit] mock_memory.assert_has_calls( [call([visit]), call([visit]), call([visit]),] ) def test_retrying_proxy_swh_storage_origin_visit_add_failure( swh_storage, sample_data, mocker ): """Unfiltered errors are raising without retry """ mock_memory = mocker.patch("swh.storage.in_memory.InMemoryStorage.origin_visit_add") mock_memory.side_effect = StorageArgumentException("Refuse to add origin always!") origin = sample_data.origin visit = sample_data.origin_visit assert visit.origin == origin.url origins = swh_storage.origin_visit_get(origin.url).results assert not origins with pytest.raises(StorageArgumentException, match="Refuse to add"): swh_storage.origin_visit_add([visit]) mock_memory.assert_has_calls( [call([visit]),] ) def test_retrying_proxy_storage_metadata_fetcher_add(swh_storage, sample_data): """Standard metadata_fetcher_add works as before """ fetcher = sample_data.metadata_fetcher metadata_fetcher = swh_storage.metadata_fetcher_get(fetcher.name, fetcher.version) assert not metadata_fetcher swh_storage.metadata_fetcher_add([fetcher]) actual_fetcher = swh_storage.metadata_fetcher_get(fetcher.name, fetcher.version) assert actual_fetcher == fetcher def test_retrying_proxy_storage_metadata_fetcher_add_with_retry( monkeypatch_sleep, swh_storage, sample_data, mocker, fake_hash_collision, ): """Multiple retries for hash collision and psycopg2 error but finally ok """ fetcher = sample_data.metadata_fetcher mock_memory = mocker.patch( "swh.storage.in_memory.InMemoryStorage.metadata_fetcher_add" ) mock_memory.side_effect = [ # first try goes ko fake_hash_collision, # second try goes ko psycopg2.IntegrityError("metadata_fetcher already inserted"), # ok then! [fetcher], ] actual_fetcher = swh_storage.metadata_fetcher_get(fetcher.name, fetcher.version) assert not actual_fetcher swh_storage.metadata_fetcher_add([fetcher]) mock_memory.assert_has_calls( [call([fetcher]), call([fetcher]), call([fetcher]),] ) def test_retrying_proxy_swh_storage_metadata_fetcher_add_failure( swh_storage, sample_data, mocker ): """Unfiltered errors are raising without retry """ mock_memory = mocker.patch( "swh.storage.in_memory.InMemoryStorage.metadata_fetcher_add" ) mock_memory.side_effect = StorageArgumentException( "Refuse to add metadata_fetcher always!" ) fetcher = sample_data.metadata_fetcher actual_fetcher = swh_storage.metadata_fetcher_get(fetcher.name, fetcher.version) assert not actual_fetcher with pytest.raises(StorageArgumentException, match="Refuse to add"): swh_storage.metadata_fetcher_add([fetcher]) assert mock_memory.call_count == 1 def test_retrying_proxy_storage_metadata_authority_add(swh_storage, sample_data): """Standard metadata_authority_add works as before """ authority = sample_data.metadata_authority assert not swh_storage.metadata_authority_get(authority.type, authority.url) swh_storage.metadata_authority_add([authority]) actual_authority = swh_storage.metadata_authority_get(authority.type, authority.url) assert actual_authority == authority def test_retrying_proxy_storage_metadata_authority_add_with_retry( monkeypatch_sleep, swh_storage, sample_data, mocker, fake_hash_collision, ): """Multiple retries for hash collision and psycopg2 error but finally ok """ authority = sample_data.metadata_authority mock_memory = mocker.patch( "swh.storage.in_memory.InMemoryStorage.metadata_authority_add" ) mock_memory.side_effect = [ # first try goes ko fake_hash_collision, # second try goes ko psycopg2.IntegrityError("foo bar"), # ok then! None, ] assert not swh_storage.metadata_authority_get(authority.type, authority.url) swh_storage.metadata_authority_add([authority]) mock_memory.assert_has_calls( [call([authority]), call([authority]), call([authority])] ) def test_retrying_proxy_swh_storage_metadata_authority_add_failure( swh_storage, sample_data, mocker ): """Unfiltered errors are raising without retry """ mock_memory = mocker.patch( "swh.storage.in_memory.InMemoryStorage.metadata_authority_add" ) mock_memory.side_effect = StorageArgumentException( "Refuse to add authority_id always!" ) authority = sample_data.metadata_authority swh_storage.metadata_authority_get(authority.type, authority.url) with pytest.raises(StorageArgumentException, match="Refuse to add"): swh_storage.metadata_authority_add([authority]) assert mock_memory.call_count == 1 def test_retrying_proxy_storage_raw_extrinsic_metadata_add(swh_storage, sample_data): """Standard raw_extrinsic_metadata_add works as before """ origin = sample_data.origin ori_meta = sample_data.origin_metadata1 - assert origin.url == ori_meta.id + assert origin.url == ori_meta.target swh_storage.origin_add([origin]) swh_storage.metadata_authority_add([sample_data.metadata_authority]) swh_storage.metadata_fetcher_add([sample_data.metadata_fetcher]) origin_metadata = swh_storage.raw_extrinsic_metadata_get( - MetadataTargetType.ORIGIN, ori_meta.id, ori_meta.authority + MetadataTargetType.ORIGIN, ori_meta.target, ori_meta.authority ) assert origin_metadata.next_page_token is None assert not origin_metadata.results swh_storage.raw_extrinsic_metadata_add([ori_meta]) origin_metadata = swh_storage.raw_extrinsic_metadata_get( - MetadataTargetType.ORIGIN, ori_meta.id, ori_meta.authority + MetadataTargetType.ORIGIN, ori_meta.target, ori_meta.authority ) assert origin_metadata def test_retrying_proxy_storage_raw_extrinsic_metadata_add_with_retry( monkeypatch_sleep, swh_storage, sample_data, mocker, fake_hash_collision, ): """Multiple retries for hash collision and psycopg2 error but finally ok """ origin = sample_data.origin ori_meta = sample_data.origin_metadata1 - assert origin.url == ori_meta.id + assert origin.url == ori_meta.target swh_storage.origin_add([origin]) swh_storage.metadata_authority_add([sample_data.metadata_authority]) swh_storage.metadata_fetcher_add([sample_data.metadata_fetcher]) mock_memory = mocker.patch( "swh.storage.in_memory.InMemoryStorage.raw_extrinsic_metadata_add" ) mock_memory.side_effect = [ # first try goes ko fake_hash_collision, # second try goes ko psycopg2.IntegrityError("foo bar"), # ok then! None, ] # No exception raised as insertion finally came through swh_storage.raw_extrinsic_metadata_add([ori_meta]) mock_memory.assert_has_calls( [ # 3 calls, as long as error raised call([ori_meta]), call([ori_meta]), call([ori_meta]), ] ) def test_retrying_proxy_swh_storage_raw_extrinsic_metadata_add_failure( swh_storage, sample_data, mocker ): """Unfiltered errors are raising without retry """ mock_memory = mocker.patch( "swh.storage.in_memory.InMemoryStorage.raw_extrinsic_metadata_add" ) mock_memory.side_effect = StorageArgumentException("Refuse to add always!") origin = sample_data.origin ori_meta = sample_data.origin_metadata1 - assert origin.url == ori_meta.id + assert origin.url == ori_meta.target swh_storage.origin_add([origin]) with pytest.raises(StorageArgumentException, match="Refuse to add"): swh_storage.raw_extrinsic_metadata_add([ori_meta]) assert mock_memory.call_count == 1 def test_retrying_proxy_storage_directory_add(swh_storage, sample_data): """Standard directory_add works as before """ sample_dir = sample_data.directory s = swh_storage.directory_add([sample_dir]) assert s == { "directory:add": 1, } directory_id = swh_storage.directory_get_random() # only 1 assert directory_id == sample_dir.id def test_retrying_proxy_storage_directory_add_with_retry( monkeypatch_sleep, swh_storage, sample_data, mocker, fake_hash_collision ): """Multiple retries for hash collision and psycopg2 error but finally ok """ mock_memory = mocker.patch("swh.storage.in_memory.InMemoryStorage.directory_add") mock_memory.side_effect = [ # first try goes ko fake_hash_collision, # second try goes ko psycopg2.IntegrityError("directory already inserted"), # ok then! {"directory:add": 1}, ] sample_dir = sample_data.directories[1] s = swh_storage.directory_add([sample_dir]) assert s == { "directory:add": 1, } mock_memory.assert_has_calls( [call([sample_dir]), call([sample_dir]), call([sample_dir]),] ) def test_retrying_proxy_swh_storage_directory_add_failure( swh_storage, sample_data, mocker ): """Unfiltered errors are raising without retry """ mock_memory = mocker.patch("swh.storage.in_memory.InMemoryStorage.directory_add") mock_memory.side_effect = StorageArgumentException( "Refuse to add directory always!" ) sample_dir = sample_data.directory with pytest.raises(StorageArgumentException, match="Refuse to add"): swh_storage.directory_add([sample_dir]) assert mock_memory.call_count == 1 def test_retrying_proxy_storage_revision_add(swh_storage, sample_data): """Standard revision_add works as before """ sample_rev = sample_data.revision revision = swh_storage.revision_get([sample_rev.id])[0] assert revision is None s = swh_storage.revision_add([sample_rev]) assert s == { "revision:add": 1, } revision = swh_storage.revision_get([sample_rev.id])[0] assert revision == sample_rev def test_retrying_proxy_storage_revision_add_with_retry( monkeypatch_sleep, swh_storage, sample_data, mocker, fake_hash_collision ): """Multiple retries for hash collision and psycopg2 error but finally ok """ mock_memory = mocker.patch("swh.storage.in_memory.InMemoryStorage.revision_add") mock_memory.side_effect = [ # first try goes ko fake_hash_collision, # second try goes ko psycopg2.IntegrityError("revision already inserted"), # ok then! {"revision:add": 1}, ] sample_rev = sample_data.revision revision = swh_storage.revision_get([sample_rev.id])[0] assert revision is None s = swh_storage.revision_add([sample_rev]) assert s == { "revision:add": 1, } mock_memory.assert_has_calls( [call([sample_rev]), call([sample_rev]), call([sample_rev]),] ) def test_retrying_proxy_swh_storage_revision_add_failure( swh_storage, sample_data, mocker ): """Unfiltered errors are raising without retry """ mock_memory = mocker.patch("swh.storage.in_memory.InMemoryStorage.revision_add") mock_memory.side_effect = StorageArgumentException("Refuse to add revision always!") sample_rev = sample_data.revision revision = swh_storage.revision_get([sample_rev.id])[0] assert revision is None with pytest.raises(StorageArgumentException, match="Refuse to add"): swh_storage.revision_add([sample_rev]) assert mock_memory.call_count == 1 def test_retrying_proxy_storage_release_add(swh_storage, sample_data): """Standard release_add works as before """ sample_rel = sample_data.release release = swh_storage.release_get([sample_rel.id])[0] assert release is None s = swh_storage.release_add([sample_rel]) assert s == { "release:add": 1, } release = swh_storage.release_get([sample_rel.id])[0] assert release == sample_rel def test_retrying_proxy_storage_release_add_with_retry( monkeypatch_sleep, swh_storage, sample_data, mocker, fake_hash_collision ): """Multiple retries for hash collision and psycopg2 error but finally ok """ mock_memory = mocker.patch("swh.storage.in_memory.InMemoryStorage.release_add") mock_memory.side_effect = [ # first try goes ko fake_hash_collision, # second try goes ko psycopg2.IntegrityError("release already inserted"), # ok then! {"release:add": 1}, ] sample_rel = sample_data.release release = swh_storage.release_get([sample_rel.id])[0] assert release is None s = swh_storage.release_add([sample_rel]) assert s == { "release:add": 1, } mock_memory.assert_has_calls( [call([sample_rel]), call([sample_rel]), call([sample_rel]),] ) def test_retrying_proxy_swh_storage_release_add_failure( swh_storage, sample_data, mocker ): """Unfiltered errors are raising without retry """ mock_memory = mocker.patch("swh.storage.in_memory.InMemoryStorage.release_add") mock_memory.side_effect = StorageArgumentException("Refuse to add release always!") sample_rel = sample_data.release release = swh_storage.release_get([sample_rel.id])[0] assert release is None with pytest.raises(StorageArgumentException, match="Refuse to add"): swh_storage.release_add([sample_rel]) assert mock_memory.call_count == 1 def test_retrying_proxy_storage_snapshot_add(swh_storage, sample_data): """Standard snapshot_add works as before """ sample_snap = sample_data.snapshot snapshot = swh_storage.snapshot_get(sample_snap.id) assert not snapshot s = swh_storage.snapshot_add([sample_snap]) assert s == { "snapshot:add": 1, } snapshot = swh_storage.snapshot_get(sample_snap.id) assert snapshot["id"] == sample_snap.id def test_retrying_proxy_storage_snapshot_add_with_retry( monkeypatch_sleep, swh_storage, sample_data, mocker, fake_hash_collision ): """Multiple retries for hash collision and psycopg2 error but finally ok """ mock_memory = mocker.patch("swh.storage.in_memory.InMemoryStorage.snapshot_add") mock_memory.side_effect = [ # first try goes ko fake_hash_collision, # second try goes ko psycopg2.IntegrityError("snapshot already inserted"), # ok then! {"snapshot:add": 1}, ] sample_snap = sample_data.snapshot snapshot = swh_storage.snapshot_get(sample_snap.id) assert not snapshot s = swh_storage.snapshot_add([sample_snap]) assert s == { "snapshot:add": 1, } mock_memory.assert_has_calls( [call([sample_snap]), call([sample_snap]), call([sample_snap]),] ) def test_retrying_proxy_swh_storage_snapshot_add_failure( swh_storage, sample_data, mocker ): """Unfiltered errors are raising without retry """ mock_memory = mocker.patch("swh.storage.in_memory.InMemoryStorage.snapshot_add") mock_memory.side_effect = StorageArgumentException("Refuse to add snapshot always!") sample_snap = sample_data.snapshot snapshot = swh_storage.snapshot_get(sample_snap.id) assert not snapshot with pytest.raises(StorageArgumentException, match="Refuse to add"): swh_storage.snapshot_add([sample_snap]) assert mock_memory.call_count == 1 def test_retrying_proxy_swh_storage_keyboardinterrupt(swh_storage, sample_data, mocker): """Unfiltered errors are raising without retry """ mock_memory = mocker.patch("swh.storage.in_memory.InMemoryStorage.content_add") mock_memory.side_effect = KeyboardInterrupt() sample_content = sample_data.content content = swh_storage.content_get_data(sample_content.sha1) assert content is None with pytest.raises(KeyboardInterrupt): swh_storage.content_add([sample_content]) assert mock_memory.call_count == 1 diff --git a/swh/storage/tests/test_server.py b/swh/storage/tests/test_server.py index b2d0392d..d4089cf8 100644 --- a/swh/storage/tests/test_server.py +++ b/swh/storage/tests/test_server.py @@ -1,73 +1,96 @@ # Copyright (C) 2019-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information +import os +from typing import Any, Dict + import pytest import yaml -from swh.storage.api.server import load_and_check_config +from swh.core.config import load_from_envvar +from swh.storage.api.server import ( + StorageServerApp, + load_and_check_config, + make_app_from_configfile, +) def prepare_config_file(tmpdir, content, name="config.yml"): """Prepare configuration file in `$tmpdir/name` with content `content`. Args: tmpdir (LocalPath): root directory content (str/dict): Content of the file either as string or as a dict. If a dict, converts the dict into a yaml string. name (str): configuration filename Returns path (str) of the configuration file prepared. """ config_path = tmpdir / name if isinstance(content, dict): # convert if needed content = yaml.dump(content) config_path.write_text(content, encoding="utf-8") # pytest on python3.5 does not support LocalPath manipulation, so # convert path to string return str(config_path) -def test_load_and_check_config_no_configuration(): +@pytest.mark.parametrize("storage_class", [None, ""]) +def test_load_and_check_config_no_configuration(storage_class): """Inexistent configuration files raises""" - with pytest.raises(EnvironmentError) as e: - load_and_check_config(None) + with pytest.raises(EnvironmentError, match="Configuration file must be defined"): + load_and_check_config(storage_class) - assert e.value.args[0] == "Configuration file must be defined" +def test_load_and_check_config_inexistent_file(): config_path = "/some/inexistent/config.yml" - with pytest.raises(FileNotFoundError) as e: + expected_error = f"Configuration file {config_path} does not exist" + with pytest.raises(FileNotFoundError, match=expected_error): load_and_check_config(config_path) - assert e.value.args[0] == "Configuration file %s does not exist" % (config_path,) - def test_load_and_check_config_wrong_configuration(tmpdir): """Wrong configuration raises""" config_path = prepare_config_file(tmpdir, "something: useless") - with pytest.raises(KeyError) as e: + with pytest.raises(KeyError, match="Missing 'storage' configuration"): load_and_check_config(config_path) - assert e.value.args[0] == "Missing '%storage' configuration" - def test_load_and_check_config_local_config_fine(tmpdir): - """'Remote configuration is fine""" - config = { - "storage": {"cls": "local", "args": {"db": "db", "objstorage": "something",}} - } + """'local' complete configuration is fine""" + config = {"storage": {"cls": "local", "db": "db", "objstorage": "something",}} config_path = prepare_config_file(tmpdir, config) - cfg = load_and_check_config(config_path, type="local") + cfg = load_and_check_config(config_path) assert cfg == config -def test_load_and_check_config_remote_config_fine(tmpdir): - """'Remote configuration is fine""" - config = {"storage": {"cls": "remote", "args": {}}} - config_path = prepare_config_file(tmpdir, config) - cfg = load_and_check_config(config_path, type="any") +@pytest.fixture +def swh_storage_server_config( + swh_storage_backend_config: Dict[str, Any] +) -> Dict[str, Any]: + return {"storage": swh_storage_backend_config} - assert cfg == config + +@pytest.fixture +def swh_storage_config(monkeypatch, swh_storage_server_config, tmp_path): + conf_path = os.path.join(str(tmp_path), "storage.yml") + with open(conf_path, "w") as f: + f.write(yaml.dump(swh_storage_server_config)) + monkeypatch.setenv("SWH_CONFIG_FILENAME", conf_path) + return conf_path + + +def test_server_make_app_from_config_file(swh_storage_config): + app = make_app_from_configfile() + expected_cfg = load_from_envvar() + + assert app is not None + assert isinstance(app, StorageServerApp) + assert app.config["storage"] == expected_cfg["storage"] + + app2 = make_app_from_configfile() + assert app is app2 diff --git a/swh/storage/validate.py b/swh/storage/validate.py index 2e7faddc..be6bcbe8 100644 --- a/swh/storage/validate.py +++ b/swh/storage/validate.py @@ -1,71 +1,71 @@ # Copyright (C) 2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from typing import Dict, Iterable, List from swh.model.hashutil import MultiHash, hash_to_bytes, hash_to_hex from swh.model.model import Content, Directory, Release, Revision, Snapshot from swh.storage import get_storage from swh.storage.exc import StorageArgumentException from swh.storage.interface import StorageInterface class ValidatingProxyStorage: """Proxy for storage classes, which checks inserted objects have a correct hash. Sample configuration use case for filtering storage: .. code-block: yaml storage: cls: validate storage: cls: remote url: http://storage.internal.staging.swh.network:5002/ """ def __init__(self, storage): self.storage: StorageInterface = get_storage(**storage) def __getattr__(self, key): if key == "storage": raise AttributeError(key) return getattr(self.storage, key) def _check_hashes(self, objects: Iterable): for obj in objects: - id_ = hash_to_bytes(obj.__class__.compute_hash(obj.to_dict())) + id_ = hash_to_bytes(obj.compute_hash()) if id_ != obj.id: raise StorageArgumentException( f"Object has id {hash_to_hex(obj.id)}, " f"but it should be {hash_to_hex(id_)}: {obj}" ) def content_add(self, content: List[Content]) -> Dict: for cont in content: hashes = MultiHash.from_data(cont.data).digest() if hashes != cont.hashes(): raise StorageArgumentException( f"Object has hashes {cont.hashes()}, but they should be {hashes}" ) return self.storage.content_add(content) def directory_add(self, directories: List[Directory]) -> Dict: self._check_hashes(directories) return self.storage.directory_add(directories) def revision_add(self, revisions: List[Revision]) -> Dict: self._check_hashes(revisions) return self.storage.revision_add(revisions) def release_add(self, releases: List[Release]) -> Dict: self._check_hashes(releases) return self.storage.release_add(releases) def snapshot_add(self, snapshots: List[Snapshot]) -> Dict: self._check_hashes(snapshots) return self.storage.snapshot_add(snapshots)