diff --git a/requirements-swh.txt b/requirements-swh.txt index fea0dfb2..e8ef7740 100644 --- a/requirements-swh.txt +++ b/requirements-swh.txt @@ -1,4 +1,4 @@ swh.core[db,http] >= 0.14.0 swh.counters >= v0.8.0 -swh.model >= 4.0.0 +swh.model >= 4.3.0 swh.objstorage >= 0.2.2 diff --git a/swh/storage/backfill.py b/swh/storage/backfill.py index 1ea0f4dd..54908b48 100644 --- a/swh/storage/backfill.py +++ b/swh/storage/backfill.py @@ -1,663 +1,657 @@ # Copyright (C) 2017-2021 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information """Storage backfiller. The backfiller goal is to produce back part or all of the objects from a storage to the journal topics Current implementation consists in the JournalBackfiller class. It simply reads the objects from the storage and sends every object identifier back to the journal. """ import logging from typing import Any, Callable, Dict, Optional from swh.core.db import BaseDb from swh.model.model import ( BaseModel, Directory, DirectoryEntry, ExtID, RawExtrinsicMetadata, Release, Revision, Snapshot, SnapshotBranch, TargetType, ) from swh.model.swhids import ExtendedObjectType from swh.storage.postgresql.converters import ( db_to_extid, db_to_raw_extrinsic_metadata, db_to_release, db_to_revision, ) from swh.storage.replay import OBJECT_CONVERTERS from swh.storage.writer import JournalWriter logger = logging.getLogger(__name__) PARTITION_KEY = { "content": "sha1", "skipped_content": "sha1", "directory": "id", "extid": "target", "metadata_authority": "type, url", "metadata_fetcher": "name, version", "raw_extrinsic_metadata": "target", "revision": "revision.id", "release": "release.id", "snapshot": "id", "origin": "id", "origin_visit": "origin_visit.origin", "origin_visit_status": "origin_visit_status.origin", } COLUMNS = { "content": [ "sha1", "sha1_git", "sha256", "blake2s256", "length", "status", "ctime", ], "skipped_content": [ "sha1", "sha1_git", "sha256", "blake2s256", "length", "ctime", "status", "reason", ], "directory": ["id", "dir_entries", "file_entries", "rev_entries", "raw_manifest"], "extid": ["extid_type", "extid", "extid_version", "target_type", "target"], "metadata_authority": ["type", "url"], "metadata_fetcher": ["name", "version"], "origin": ["url"], "origin_visit": ["visit", "type", ("origin.url", "origin"), "date",], "origin_visit_status": [ ("origin_visit_status.visit", "visit"), ("origin.url", "origin"), ("origin_visit_status.date", "date"), "type", "snapshot", "status", "metadata", ], "raw_extrinsic_metadata": [ "raw_extrinsic_metadata.type", "raw_extrinsic_metadata.target", "metadata_authority.type", "metadata_authority.url", "metadata_fetcher.name", "metadata_fetcher.version", "discovery_date", "format", "raw_extrinsic_metadata.metadata", "origin", "visit", "snapshot", "release", "revision", "path", "directory", ], "revision": [ ("revision.id", "id"), "date", - "date_offset", - "date_neg_utc_offset", "date_offset_bytes", "committer_date", - "committer_date_offset", - "committer_date_neg_utc_offset", "committer_date_offset_bytes", "type", "directory", "message", "synthetic", "metadata", "extra_headers", ( "array(select parent_id::bytea from revision_history rh " "where rh.id = revision.id order by rh.parent_rank asc)", "parents", ), "raw_manifest", ("a.id", "author_id"), ("a.name", "author_name"), ("a.email", "author_email"), ("a.fullname", "author_fullname"), ("c.id", "committer_id"), ("c.name", "committer_name"), ("c.email", "committer_email"), ("c.fullname", "committer_fullname"), ], "release": [ ("release.id", "id"), "date", - "date_offset", - "date_neg_utc_offset", "date_offset_bytes", "comment", ("release.name", "name"), "synthetic", "target", "target_type", ("a.id", "author_id"), ("a.name", "author_name"), ("a.email", "author_email"), ("a.fullname", "author_fullname"), "raw_manifest", ], "snapshot": ["id", "object_id"], } JOINS = { "release": ["person a on release.author=a.id"], "revision": [ "person a on revision.author=a.id", "person c on revision.committer=c.id", ], "origin_visit": ["origin on origin_visit.origin=origin.id"], "origin_visit_status": ["origin on origin_visit_status.origin=origin.id",], "raw_extrinsic_metadata": [ "metadata_authority on " "raw_extrinsic_metadata.authority_id=metadata_authority.id", "metadata_fetcher on raw_extrinsic_metadata.fetcher_id=metadata_fetcher.id", ], } EXTRA_WHERE = { # hack to force the right index usage on table extid "extid": "target_type in ('revision', 'release', 'content', 'directory')" } def directory_converter(db: BaseDb, directory_d: Dict[str, Any]) -> Directory: """Convert directory from the flat representation to swh model compatible objects. """ columns = ["target", "name", "perms"] query_template = """ select %(columns)s from directory_entry_%(type)s where id in %%s """ types = ["file", "dir", "rev"] entries = [] with db.cursor() as cur: for type in types: ids = directory_d.pop("%s_entries" % type) if not ids: continue query = query_template % { "columns": ",".join(columns), "type": type, } cur.execute(query, (tuple(ids),)) for row in cur: entry_d = dict(zip(columns, row)) entry = DirectoryEntry( name=entry_d["name"], type=type, target=entry_d["target"], perms=entry_d["perms"], ) entries.append(entry) return Directory(id=directory_d["id"], entries=tuple(entries),) def raw_extrinsic_metadata_converter( db: BaseDb, metadata: Dict[str, Any] ) -> RawExtrinsicMetadata: """Convert a raw extrinsic metadata from the flat representation to swh model compatible objects. """ return db_to_raw_extrinsic_metadata(metadata) def extid_converter(db: BaseDb, extid: Dict[str, Any]) -> ExtID: """Convert an extid from the flat representation to swh model compatible objects. """ return db_to_extid(extid) def revision_converter(db: BaseDb, revision_d: Dict[str, Any]) -> Revision: """Convert revision from the flat representation to swh model compatible objects. """ revision = db_to_revision(revision_d) assert revision is not None, revision_d["id"] return revision def release_converter(db: BaseDb, release_d: Dict[str, Any]) -> Release: """Convert release from the flat representation to swh model compatible objects. """ release = db_to_release(release_d) assert release is not None, release_d["id"] return release def snapshot_converter(db: BaseDb, snapshot_d: Dict[str, Any]) -> Snapshot: """Convert snapshot from the flat representation to swh model compatible objects. """ columns = ["name", "target", "target_type"] query = """ select %s from snapshot_branches sbs inner join snapshot_branch sb on sb.object_id=sbs.branch_id where sbs.snapshot_id=%%s """ % ", ".join( columns ) with db.cursor() as cur: cur.execute(query, (snapshot_d["object_id"],)) branches = {} for name, *row in cur: branch_d = dict(zip(columns[1:], row)) if branch_d["target"] is not None and branch_d["target_type"] is not None: branch: Optional[SnapshotBranch] = SnapshotBranch( target=branch_d["target"], target_type=TargetType(branch_d["target_type"]), ) else: branch = None branches[name] = branch return Snapshot(id=snapshot_d["id"], branches=branches,) CONVERTERS: Dict[str, Callable[[BaseDb, Dict[str, Any]], BaseModel]] = { "directory": directory_converter, "extid": extid_converter, "raw_extrinsic_metadata": raw_extrinsic_metadata_converter, "revision": revision_converter, "release": release_converter, "snapshot": snapshot_converter, } def object_to_offset(object_id, numbits): """Compute the index of the range containing object id, when dividing space into 2^numbits. Args: object_id (str): The hex representation of object_id numbits (int): Number of bits in which we divide input space Returns: The index of the range containing object id """ q, r = divmod(numbits, 8) length = q + (r != 0) shift_bits = 8 - r if r else 0 truncated_id = object_id[: length * 2] if len(truncated_id) < length * 2: truncated_id += "0" * (length * 2 - len(truncated_id)) truncated_id_bytes = bytes.fromhex(truncated_id) return int.from_bytes(truncated_id_bytes, byteorder="big") >> shift_bits def byte_ranges(numbits, start_object=None, end_object=None): """Generate start/end pairs of bytes spanning numbits bits and constrained by optional start_object and end_object. Args: numbits (int): Number of bits in which we divide input space start_object (str): Hex object id contained in the first range returned end_object (str): Hex object id contained in the last range returned Yields: 2^numbits pairs of bytes """ q, r = divmod(numbits, 8) length = q + (r != 0) shift_bits = 8 - r if r else 0 def to_bytes(i): return int.to_bytes(i << shift_bits, length=length, byteorder="big") start_offset = 0 end_offset = 1 << numbits if start_object is not None: start_offset = object_to_offset(start_object, numbits) if end_object is not None: end_offset = object_to_offset(end_object, numbits) + 1 for start in range(start_offset, end_offset): end = start + 1 if start == 0: yield None, to_bytes(end) elif end == 1 << numbits: yield to_bytes(start), None else: yield to_bytes(start), to_bytes(end) def raw_extrinsic_metadata_target_ranges(start_object=None, end_object=None): """Generate ranges of values for the `target` attribute of `raw_extrinsic_metadata` objects. This generates one range for all values before the first SWHID (which would correspond to raw origin URLs), then a number of hex-based ranges for each known type of SWHID (2**12 ranges for directories, 2**8 ranges for all other types). Finally, it generates one extra range for values above all possible SWHIDs. """ if start_object is None: start_object = "" swhid_target_types = sorted(type.value for type in ExtendedObjectType) first_swhid = f"swh:1:{swhid_target_types[0]}:" # Generate a range for url targets, if the starting object is before SWHIDs if start_object < first_swhid: yield start_object, ( first_swhid if end_object is None or end_object >= first_swhid else end_object ) if end_object is not None and end_object <= first_swhid: return # Prime the following loop, which uses the upper bound of the previous range # as lower bound, to account for potential targets between two valid types # of SWHIDs (even though they would eventually be rejected by the # RawExtrinsicMetadata parser, they /might/ exist...) end_swhid = first_swhid # Generate ranges for swhid targets for target_type in swhid_target_types: finished = False base_swhid = f"swh:1:{target_type}:" last_swhid = base_swhid + ("f" * 40) if start_object > last_swhid: continue # Generate 2**8 or 2**12 ranges for _, end in byte_ranges(12 if target_type == "dir" else 8): # Reuse previous uppper bound start_swhid = end_swhid # Use last_swhid for this object type if on the last byte range end_swhid = (base_swhid + end.hex()) if end is not None else last_swhid # Ignore out of bounds ranges if start_object >= end_swhid: continue # Potentially clamp start of range to the first object requested start_swhid = max(start_swhid, start_object) # Handle ending the loop early if the last requested object id is in # the current range if end_object is not None and end_swhid >= end_object: end_swhid = end_object finished = True yield start_swhid, end_swhid if finished: return # Generate one final range for potential raw origin URLs after the last # valid SWHID start_swhid = max(start_object, end_swhid) yield start_swhid, end_object def integer_ranges(start, end, block_size=1000): for start in range(start, end, block_size): if start == 0: yield None, block_size elif start + block_size > end: yield start, end else: yield start, start + block_size RANGE_GENERATORS = { "content": lambda start, end: byte_ranges(24, start, end), "skipped_content": lambda start, end: [(None, None)], "directory": lambda start, end: byte_ranges(24, start, end), "extid": lambda start, end: byte_ranges(24, start, end), "revision": lambda start, end: byte_ranges(24, start, end), "release": lambda start, end: byte_ranges(16, start, end), "raw_extrinsic_metadata": raw_extrinsic_metadata_target_ranges, "snapshot": lambda start, end: byte_ranges(16, start, end), "origin": integer_ranges, "origin_visit": integer_ranges, "origin_visit_status": integer_ranges, } def compute_query(obj_type, start, end): columns = COLUMNS.get(obj_type) join_specs = JOINS.get(obj_type, []) join_clause = "\n".join("left join %s" % clause for clause in join_specs) additional_where = EXTRA_WHERE.get(obj_type) where = [] where_args = [] if start: where.append("%(keys)s >= %%s") where_args.append(start) if end: where.append("%(keys)s < %%s") where_args.append(end) if additional_where: where.append(additional_where) where_clause = "" if where: where_clause = ("where " + " and ".join(where)) % { "keys": "(%s)" % PARTITION_KEY[obj_type] } column_specs = [] column_aliases = [] for column in columns: if isinstance(column, str): column_specs.append(column) column_aliases.append(column) else: column_specs.append("%s as %s" % column) column_aliases.append(column[1]) query = """ select %(columns)s from %(table)s %(join)s %(where)s """ % { "columns": ",".join(column_specs), "table": obj_type, "join": join_clause, "where": where_clause, } return query, where_args, column_aliases def fetch(db, obj_type, start, end): """Fetch all obj_type's identifiers from db. This opens one connection, stream objects and when done, close the connection. Args: db (BaseDb): Db connection object obj_type (str): Object type start (Union[bytes|Tuple]): Range start identifier end (Union[bytes|Tuple]): Range end identifier Raises: ValueError if obj_type is not supported Yields: Objects in the given range """ query, where_args, column_aliases = compute_query(obj_type, start, end) converter = CONVERTERS.get(obj_type) with db.cursor() as cursor: logger.debug("Fetching data for table %s", obj_type) logger.debug("query: %s %s", query, where_args) cursor.execute(query, where_args) for row in cursor: record = dict(zip(column_aliases, row)) if converter: record = converter(db, record) else: record = OBJECT_CONVERTERS[obj_type](record) logger.debug("record: %s", record) yield record def _format_range_bound(bound): if isinstance(bound, bytes): return bound.hex() else: return str(bound) MANDATORY_KEYS = ["storage", "journal_writer"] class JournalBackfiller: """Class in charge of reading the storage's objects and sends those back to the journal's topics. This is designed to be run periodically. """ def __init__(self, config=None): self.config = config self.check_config(config) def check_config(self, config): missing_keys = [] for key in MANDATORY_KEYS: if not config.get(key): missing_keys.append(key) if missing_keys: raise ValueError( "Configuration error: The following keys must be" " provided: %s" % (",".join(missing_keys),) ) if "cls" not in config["storage"] or config["storage"]["cls"] not in ( "local", "postgresql", ): raise ValueError( "swh storage backfiller must be configured to use a local" " (PostgreSQL) storage" ) def parse_arguments(self, object_type, start_object, end_object): """Parse arguments Raises: ValueError for unsupported object type ValueError if object ids are not parseable Returns: Parsed start and end object ids """ if object_type not in COLUMNS: raise ValueError( "Object type %s is not supported. " "The only possible values are %s" % (object_type, ", ".join(sorted(COLUMNS.keys()))) ) if object_type in ["origin", "origin_visit", "origin_visit_status"]: if start_object: start_object = int(start_object) else: start_object = 0 if end_object: end_object = int(end_object) else: end_object = 100 * 1000 * 1000 # hard-coded limit return start_object, end_object def run(self, object_type, start_object, end_object, dry_run=False): """Reads storage's subscribed object types and send them to the journal's reading topic. """ start_object, end_object = self.parse_arguments( object_type, start_object, end_object ) db = BaseDb.connect(self.config["storage"]["db"]) writer = JournalWriter({"cls": "kafka", **self.config["journal_writer"]}) assert writer.journal is not None for range_start, range_end in RANGE_GENERATORS[object_type]( start_object, end_object ): logger.info( "Processing %s range %s to %s", object_type, _format_range_bound(range_start), _format_range_bound(range_end), ) objects = fetch(db, object_type, start=range_start, end=range_end) if not dry_run: writer.write_additions(object_type, objects) else: # only consume the objects iterator to check for any potential # decoding/encoding errors for obj in objects: pass if __name__ == "__main__": print('Please use the "swh-journal backfiller run" command') diff --git a/swh/storage/cassandra/schema.py b/swh/storage/cassandra/schema.py index d12b5e23..c9e08a45 100644 --- a/swh/storage/cassandra/schema.py +++ b/swh/storage/cassandra/schema.py @@ -1,342 +1,340 @@ # Copyright (C) 2019-2021 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import os _use_scylla = bool(os.environ.get("SWH_USE_SCYLLADB", "")) UDF_LANGUAGE = "lua" if _use_scylla else "java" if UDF_LANGUAGE == "java": # For Cassandra CREATE_TABLES_QUERIES = [ """ CREATE OR REPLACE FUNCTION ascii_bins_count_sfunc ( state tuple>, -- (nb_none, map) bin_name ascii ) CALLED ON NULL INPUT RETURNS tuple> LANGUAGE java AS $$ if (bin_name == null) { state.setInt(0, state.getInt(0) + 1); } else { Map counters = state.getMap( 1, String.class, Integer.class); Integer nb = counters.get(bin_name); if (nb == null) { nb = 0; } counters.put(bin_name, nb + 1); state.setMap(1, counters, String.class, Integer.class); } return state; $$;""", """ CREATE OR REPLACE AGGREGATE ascii_bins_count ( ascii ) SFUNC ascii_bins_count_sfunc STYPE tuple> INITCOND (0, {}) ;""", ] elif UDF_LANGUAGE == "lua": # For ScyllaDB # TODO: this is not implementable yet, because ScyllaDB does not support # user-defined aggregates. https://github.com/scylladb/scylla/issues/7201 CREATE_TABLES_QUERIES = [] else: assert False, f"{UDF_LANGUAGE} must be 'lua' or 'java'" CREATE_TABLES_QUERIES = [ *CREATE_TABLES_QUERIES, """ CREATE TYPE IF NOT EXISTS microtimestamp ( seconds bigint, microseconds int, );""", """ CREATE TYPE IF NOT EXISTS microtimestamp_with_timezone ( timestamp frozen, - offset smallint, - negative_utc boolean, offset_bytes blob, );""", """ CREATE TYPE IF NOT EXISTS person ( fullname blob, name blob, email blob );""", """ CREATE TABLE IF NOT EXISTS content ( sha1 blob, sha1_git blob, sha256 blob, blake2s256 blob, length bigint, ctime timestamp, -- creation time, i.e. time of (first) injection into the storage status ascii, PRIMARY KEY ((sha256), sha1, sha1_git, blake2s256) );""", """ CREATE TABLE IF NOT EXISTS skipped_content ( sha1 blob, sha1_git blob, sha256 blob, blake2s256 blob, length bigint, ctime timestamp, -- creation time, i.e. time of (first) injection into the storage status ascii, reason text, origin text, PRIMARY KEY ((sha1, sha1_git, sha256, blake2s256)) );""", """ CREATE TABLE IF NOT EXISTS revision ( id blob PRIMARY KEY, date microtimestamp_with_timezone, committer_date microtimestamp_with_timezone, type ascii, directory blob, -- source code "root" directory message blob, author person, committer person, synthetic boolean, -- true iff revision has been created by Software Heritage metadata text, -- extra metadata as JSON(tarball checksums, etc...) extra_headers frozen> >, -- extra commit information as (tuple(key, value), ...) raw_manifest blob, -- NULL if the object can be rebuild from other cells and revision_parent. );""", """ CREATE TABLE IF NOT EXISTS revision_parent ( id blob, parent_rank int, -- parent position in merge commits, 0-based parent_id blob, PRIMARY KEY ((id), parent_rank) );""", """ CREATE TABLE IF NOT EXISTS release ( id blob PRIMARY KEY, target_type ascii, target blob, date microtimestamp_with_timezone, name blob, message blob, author person, synthetic boolean, -- true iff release has been created by Software Heritage raw_manifest blob, -- NULL if the object can be rebuild from other cells );""", """ CREATE TABLE IF NOT EXISTS directory ( id blob PRIMARY KEY, raw_manifest blob -- NULL if the object can be rebuild from (sorted) entries );""", """ CREATE TABLE IF NOT EXISTS directory_entry ( directory_id blob, name blob, -- path name, relative to containing dir target blob, perms int, -- unix-like permissions type ascii, -- target type PRIMARY KEY ((directory_id), name) );""", """ CREATE TABLE IF NOT EXISTS snapshot ( id blob PRIMARY KEY, );""", """ -- For a given snapshot_id, branches are sorted by their name, -- allowing easy pagination. CREATE TABLE IF NOT EXISTS snapshot_branch ( snapshot_id blob, name blob, target_type ascii, target blob, PRIMARY KEY ((snapshot_id), name) );""", """ CREATE TABLE IF NOT EXISTS origin_visit ( origin text, visit bigint, date timestamp, type text, PRIMARY KEY ((origin), visit) );""", """ CREATE TABLE IF NOT EXISTS origin_visit_status ( origin text, visit bigint, date timestamp, type text, status ascii, metadata text, snapshot blob, PRIMARY KEY ((origin), visit, date) ) WITH CLUSTERING ORDER BY (visit DESC, date DESC) ;""", # 'WITH CLUSTERING ORDER BY' is optional with Cassandra 4, but ScyllaDB needs it """ CREATE TABLE IF NOT EXISTS origin ( sha1 blob PRIMARY KEY, url text, next_visit_id int, -- We need integer visit ids for compatibility with the pgsql -- storage, so we're using lightweight transactions with this trick: -- https://stackoverflow.com/a/29391877/539465 );""", """ CREATE TABLE IF NOT EXISTS metadata_authority ( url text, type ascii, PRIMARY KEY ((url), type) );""", """ CREATE TABLE IF NOT EXISTS metadata_fetcher ( name ascii, version ascii, PRIMARY KEY ((name), version) );""", """ CREATE TABLE IF NOT EXISTS raw_extrinsic_metadata ( id blob, type text, target text, -- metadata source authority_type text, authority_url text, discovery_date timestamp, fetcher_name ascii, fetcher_version ascii, -- metadata itself format ascii, metadata blob, -- context origin text, visit bigint, snapshot text, release text, revision text, path blob, directory text, PRIMARY KEY ((target), authority_type, authority_url, discovery_date, id) -- An explanation is in order for this primary key: -- -- Intuitively, the primary key should only be 'id', because two metadata -- entries are the same iff the id is the same; and 'id' is used for -- deduplication. -- -- However, we also want to query by -- (target, authority_type, authority_url, discovery_date) -- The naive solution to this would be an extra table, to use as index; -- but it means 1. extra code to keep them in sync 2. overhead when writing -- 3. overhead + random reads (instead of linear) when reading. -- -- Therefore, we use a single table for both, by adding the column -- we want to query with before the id. -- It solves both a) the query/order issues and b) the uniqueness issue because: -- -- a) adding the id at the end of the primary key does not change the rows' order: -- for two different rows, id1 != id2, so -- (target1, ..., date1) < (target2, ..., date2) -- <=> (target1, ..., date1, id1) < (target2, ..., date2, id2) -- -- b) the id is a hash of all the columns, so: -- rows are the same -- <=> id1 == id2 -- <=> (target1, ..., date1, id1) == (target2, ..., date2, id2) );""", """ CREATE TABLE IF NOT EXISTS raw_extrinsic_metadata_by_id ( id blob, target text, authority_type text, authority_url text, PRIMARY KEY ((id)) );""", """ CREATE TABLE IF NOT EXISTS extid ( extid_type ascii, extid blob, extid_version smallint, target_type ascii, target blob, PRIMARY KEY ((extid_type, extid), extid_version, target_type, target) );""", """ CREATE TABLE IF NOT EXISTS extid_by_target ( target_type ascii, target blob, target_token bigint, -- value of token(pk) on the "primary" table PRIMARY KEY ((target_type, target), target_token) );""", ] CONTENT_INDEX_TEMPLATE = """ -- Secondary table, used for looking up "content" from a single hash CREATE TABLE IF NOT EXISTS content_by_{main_algo} ( {main_algo} blob, target_token bigint, -- value of token(pk) on the "primary" table PRIMARY KEY (({main_algo}), target_token) ); CREATE TABLE IF NOT EXISTS skipped_content_by_{main_algo} ( {main_algo} blob, target_token bigint, -- value of token(pk) on the "primary" table PRIMARY KEY (({main_algo}), target_token) ); """ TABLES = [ "skipped_content", "content", "revision", "revision_parent", "release", "directory", "directory_entry", "snapshot", "snapshot_branch", "origin_visit", "origin", "raw_extrinsic_metadata", "origin_visit_status", "metadata_authority", "metadata_fetcher", "extid", "extid_by_target", ] HASH_ALGORITHMS = ["sha1", "sha1_git", "sha256", "blake2s256"] for main_algo in HASH_ALGORITHMS: CREATE_TABLES_QUERIES.extend( CONTENT_INDEX_TEMPLATE.format( main_algo=main_algo, other_algos=", ".join( [algo for algo in HASH_ALGORITHMS if algo != main_algo] ), ).split("\n\n") ) TABLES.append("content_by_%s" % main_algo) TABLES.append("skipped_content_by_%s" % main_algo) diff --git a/swh/storage/postgresql/converters.py b/swh/storage/postgresql/converters.py index 36c75c31..e9e6eddb 100644 --- a/swh/storage/postgresql/converters.py +++ b/swh/storage/postgresql/converters.py @@ -1,367 +1,338 @@ # Copyright (C) 2015-2021 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime import math from typing import Any, Dict, Optional import warnings from swh.core.utils import encode_with_unescape from swh.model.model import ( ExtID, MetadataAuthority, MetadataAuthorityType, MetadataFetcher, ObjectType, Origin, Person, RawExtrinsicMetadata, Release, Revision, RevisionType, Timestamp, TimestampWithTimezone, ) from swh.model.swhids import CoreSWHID, ExtendedSWHID from swh.model.swhids import ObjectType as SwhidObjectType from ..utils import map_optional DEFAULT_AUTHOR = { "fullname": None, "name": None, "email": None, } DEFAULT_DATE = { "timestamp": None, "offset": 0, "neg_utc_offset": None, "offset_bytes": None, } def author_to_db(author: Optional[Person]) -> Dict[str, Any]: """Convert a swh-model author to its DB representation. Args: author: a :mod:`swh.model` compatible author Returns: dict: a dictionary with three keys: author, fullname and email """ if author is None: return DEFAULT_AUTHOR return author.to_dict() def db_to_author( fullname: Optional[bytes], name: Optional[bytes], email: Optional[bytes] ) -> Optional[Person]: """Convert the DB representation of an author to a swh-model author. Args: fullname (bytes): the author's fullname name (bytes): the author's name email (bytes): the author's email Returns: a Person object, or None if 'fullname' is None. """ if fullname is None: return None return Person(fullname=fullname, name=name, email=email,) def db_to_git_headers(db_git_headers): ret = [] for key, value in db_git_headers: ret.append([key.encode("utf-8"), encode_with_unescape(value)]) return ret def db_to_date( - date: Optional[datetime.datetime], - offset: int, - neg_utc_offset: Optional[bool], - offset_bytes: Optional[bytes], + date: Optional[datetime.datetime], offset_bytes: bytes, ) -> Optional[TimestampWithTimezone]: """Convert the DB representation of a date to a swh-model compatible date. Args: date: a date pulled out of the database - offset: an integer number of minutes representing an UTC offset - neg_utc_offset: whether an utc offset is negative + offset_bytes: a byte representation of the latter two, usually as "+HHMM" + or "-HHMM" Returns: a TimestampWithTimezone, or None if the date is None. """ if date is None: return None - if neg_utc_offset is None: - # For older versions of the database that were not migrated to schema v160 - neg_utc_offset = False - - kwargs = {} - if offset_bytes: - # TODO: remove the conditional after migration is complete. - kwargs["offset_bytes"] = offset_bytes - return TimestampWithTimezone( timestamp=Timestamp( # we use floor() instead of int() to round down, because of negative dates seconds=math.floor(date.timestamp()), microseconds=date.microsecond, ), - offset=offset, - negative_utc=neg_utc_offset, - **kwargs, + offset_bytes=offset_bytes, ) def date_to_db(ts_with_tz: Optional[TimestampWithTimezone]) -> Dict[str, Any]: """Convert a swh-model date_offset to its DB representation. Args: ts_with_tz: a TimestampWithTimezone object Returns: dict: a dictionary with these keys: - timestamp: a date in ISO format - - offset: the UTC offset in minutes - - neg_utc_offset: a boolean indicating whether a null offset is - negative or positive. - offset_bytes: a byte representation of the latter two, usually as "+HHMM" or "-HHMM" """ if ts_with_tz is None: return DEFAULT_DATE ts = ts_with_tz.timestamp timestamp = datetime.datetime.fromtimestamp(ts.seconds, datetime.timezone.utc) timestamp = timestamp.replace(microsecond=ts.microseconds) return { # PostgreSQL supports isoformatted timestamps "timestamp": timestamp.isoformat(), "offset": ts_with_tz.offset, - "neg_utc_offset": ts_with_tz.negative_utc, + "neg_utc_offset": ts_with_tz.offset == 0 + and ts_with_tz.offset_bytes.startswith(b"-"), "offset_bytes": ts_with_tz.offset_bytes, } def revision_to_db(revision: Revision) -> Dict[str, Any]: """Convert a swh-model revision to its database representation. """ author = author_to_db(revision.author) date = date_to_db(revision.date) committer = author_to_db(revision.committer) committer_date = date_to_db(revision.committer_date) return { "id": revision.id, "author_fullname": author["fullname"], "author_name": author["name"], "author_email": author["email"], "date": date["timestamp"], "date_offset": date["offset"], "date_neg_utc_offset": date["neg_utc_offset"], "date_offset_bytes": date["offset_bytes"], "committer_fullname": committer["fullname"], "committer_name": committer["name"], "committer_email": committer["email"], "committer_date": committer_date["timestamp"], "committer_date_offset": committer_date["offset"], "committer_date_neg_utc_offset": committer_date["neg_utc_offset"], "committer_date_offset_bytes": committer_date["offset_bytes"], "type": revision.type.value, "directory": revision.directory, "message": revision.message, "metadata": None if revision.metadata is None else dict(revision.metadata), "synthetic": revision.synthetic, "extra_headers": revision.extra_headers, "raw_manifest": revision.raw_manifest, "parents": [ {"id": revision.id, "parent_id": parent, "parent_rank": i,} for i, parent in enumerate(revision.parents) ], } def db_to_revision(db_revision: Dict[str, Any]) -> Optional[Revision]: """Convert a database representation of a revision to its swh-model representation.""" if db_revision["type"] is None: assert all( v is None for (k, v) in db_revision.items() if k not in ("id", "parents") ) return None author = db_to_author( db_revision["author_fullname"], db_revision["author_name"], db_revision["author_email"], ) - date = db_to_date( - db_revision["date"], - db_revision["date_offset"], - db_revision["date_neg_utc_offset"], - db_revision["date_offset_bytes"], - ) + date = db_to_date(db_revision["date"], db_revision["date_offset_bytes"],) committer = db_to_author( db_revision["committer_fullname"], db_revision["committer_name"], db_revision["committer_email"], ) committer_date = db_to_date( - db_revision["committer_date"], - db_revision["committer_date_offset"], - db_revision["committer_date_neg_utc_offset"], - db_revision["committer_date_offset_bytes"], + db_revision["committer_date"], db_revision["committer_date_offset_bytes"], ) assert author, "author is None" assert committer, "committer is None" parents = [] if "parents" in db_revision: for parent in db_revision["parents"]: if parent: parents.append(parent) metadata = db_revision["metadata"] extra_headers = db_revision["extra_headers"] if not extra_headers: if metadata and "extra_headers" in metadata: extra_headers = db_to_git_headers(metadata.pop("extra_headers")) else: # For older versions of the database that were not migrated to schema v161 extra_headers = () return Revision( id=db_revision["id"], author=author, date=date, committer=committer, committer_date=committer_date, type=RevisionType(db_revision["type"]), directory=db_revision["directory"], message=db_revision["message"], metadata=metadata, synthetic=db_revision["synthetic"], extra_headers=extra_headers, parents=tuple(parents), raw_manifest=db_revision["raw_manifest"], ) def release_to_db(release: Release) -> Dict[str, Any]: """Convert a swh-model release to its database representation. """ author = author_to_db(release.author) date = date_to_db(release.date) return { "id": release.id, "author_fullname": author["fullname"], "author_name": author["name"], "author_email": author["email"], "date": date["timestamp"], "date_offset": date["offset"], "date_neg_utc_offset": date["neg_utc_offset"], "date_offset_bytes": date["offset_bytes"], "name": release.name, "target": release.target, "target_type": release.target_type.value, "comment": release.message, "synthetic": release.synthetic, "raw_manifest": release.raw_manifest, } def db_to_release(db_release: Dict[str, Any]) -> Optional[Release]: """Convert a database representation of a release to its swh-model representation. """ if db_release["target_type"] is None: assert all(v is None for (k, v) in db_release.items() if k != "id") return None author = db_to_author( db_release["author_fullname"], db_release["author_name"], db_release["author_email"], ) - date = db_to_date( - db_release["date"], - db_release["date_offset"], - db_release["date_neg_utc_offset"], - db_release["date_offset_bytes"], - ) + date = db_to_date(db_release["date"], db_release["date_offset_bytes"],) return Release( author=author, date=date, id=db_release["id"], name=db_release["name"], message=db_release["comment"], synthetic=db_release["synthetic"], target=db_release["target"], target_type=ObjectType(db_release["target_type"]), raw_manifest=db_release["raw_manifest"], ) def db_to_raw_extrinsic_metadata(row) -> RawExtrinsicMetadata: target = row["raw_extrinsic_metadata.target"] if not target.startswith("swh:1:"): warnings.warn( "Fetching raw_extrinsic_metadata row with URL target", DeprecationWarning ) target = str(Origin(url=target).swhid()) return RawExtrinsicMetadata( target=ExtendedSWHID.from_string(target), authority=MetadataAuthority( type=MetadataAuthorityType(row["metadata_authority.type"]), url=row["metadata_authority.url"], ), fetcher=MetadataFetcher( name=row["metadata_fetcher.name"], version=row["metadata_fetcher.version"], ), discovery_date=row["discovery_date"], format=row["format"], metadata=row["raw_extrinsic_metadata.metadata"], origin=row["origin"], visit=row["visit"], snapshot=map_optional(CoreSWHID.from_string, row["snapshot"]), release=map_optional(CoreSWHID.from_string, row["release"]), revision=map_optional(CoreSWHID.from_string, row["revision"]), path=row["path"], directory=map_optional(CoreSWHID.from_string, row["directory"]), ) def db_to_extid(row) -> ExtID: return ExtID( extid=row["extid"], extid_type=row["extid_type"], extid_version=row.get("extid_version", 0), target=CoreSWHID( object_id=row["target"], object_type=SwhidObjectType[row["target_type"].upper()], ), ) diff --git a/swh/storage/tests/algos/test_revisions_walker.py b/swh/storage/tests/algos/test_revisions_walker.py index 799439fe..84156886 100644 --- a/swh/storage/tests/algos/test_revisions_walker.py +++ b/swh/storage/tests/algos/test_revisions_walker.py @@ -1,543 +1,525 @@ # Copyright (C) 2018-2021 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from copy import deepcopy from swh.model.hashutil import hash_to_bytes, hash_to_hex from swh.storage.algos.revisions_walker import get_revisions_walker # For those tests, we will walk the following revisions history # with different orderings: # # * commit b364f53155044e5308a0f73abb3b5f01995a5b7d # |\ Merge: 836d498 b94886c # | | Author: Adam # | | AuthorDate: Fri Oct 4 12:50:49 2013 +0200 # | | Commit: Adam # | | CommitDate: Fri Oct 4 12:50:49 2013 +0200 # | | # | | Merge branch 'release/1.0' # | | # | * commit b94886c500c46e32dc3d7ebae8a5409accd592e5 # | | Author: Adam # | | AuthorDate: Fri Oct 4 12:50:38 2013 +0200 # | | Commit: Adam # | | CommitDate: Fri Oct 4 12:50:38 2013 +0200 # | | # | | updating poms for 1.0 release # | | # | * commit 0cb6b4611d65bee0f57821dac7f611e2f8a02433 # | | Author: Adam # | | AuthorDate: Fri Oct 4 12:50:38 2013 +0200 # | | Commit: Adam # | | CommitDate: Fri Oct 4 12:50:38 2013 +0200 # | | # | | updating poms for 1.0 release # | | # | * commit 2b0240c6d682bad51532eec15b8a7ed6b75c8d31 # | | Author: Adam Janicki # | | AuthorDate: Fri Oct 4 12:50:22 2013 +0200 # | | Commit: Adam Janicki # | | CommitDate: Fri Oct 4 12:50:22 2013 +0200 # | | # | | For 1.0 release. Allow untracked. # | | # | * commit b401c50863475db4440c85c10ac0b6423b61554d # | | Author: Adam # | | AuthorDate: Fri Oct 4 12:48:12 2013 +0200 # | | Commit: Adam # | | CommitDate: Fri Oct 4 12:48:12 2013 +0200 # | | # | | updating poms for 1.0 release # | | # | * commit 9c5051397e5c2e0c258bb639c3dd34406584ca10 # |/ Author: Adam Janicki # | AuthorDate: Fri Oct 4 12:47:48 2013 +0200 # | Commit: Adam Janicki # | CommitDate: Fri Oct 4 12:47:48 2013 +0200 # | # | For 1.0 release. # | # * commit 836d498396fb9b5d45c896885f84d8d60a5651dc # | Author: Adam Janicki # | AuthorDate: Fri Oct 4 12:08:16 2013 +0200 # | Commit: Adam Janicki # | CommitDate: Fri Oct 4 12:08:16 2013 +0200 # | # | Add ignores # | # * commit ee96c2a2d397b79070d2b6fe3051290963748358 # | Author: Adam # | AuthorDate: Fri Oct 4 10:48:16 2013 +0100 # | Commit: Adam # | CommitDate: Fri Oct 4 10:48:16 2013 +0100 # | # | Reset author # | # * commit 8f89dda8e072383cf50d42532ae8f52ad89f8fdf # Author: Adam # AuthorDate: Fri Oct 4 02:20:19 2013 -0700 # Commit: Adam # CommitDate: Fri Oct 4 02:20:19 2013 -0700 # # Initial commit # raw dump of the above history in swh format _revisions_list = [ { "author": { "email": b"adam.janicki@roche.com", # noqa "fullname": b"Adam ", "id": 8040905, "name": b"Adam", }, "committer": { "email": b"adam.janicki@roche.com", "fullname": b"Adam ", "id": 8040905, "name": b"Adam", }, "committer_date": { - "negative_utc": None, - "offset": 120, + "offset_bytes": b"+0200", "timestamp": {"microseconds": 0, "seconds": 1380883849}, }, "date": { - "negative_utc": None, - "offset": 120, + "offset_bytes": b"+0200", "timestamp": {"microseconds": 0, "seconds": 1380883849}, }, "directory": b"\xefX\xe7\xa6\\\xda\xdf\xfdH\xdbH\xfbq\x96@{\x98?9\xfe", "id": b"\xb3d\xf51U\x04NS\x08\xa0\xf7:\xbb;_\x01\x99Z[}", "message": b"Merge branch 'release/1.0'", "metadata": None, "parents": [ b"\x83mI\x83\x96\xfb\x9b]E\xc8\x96\x88_\x84\xd8\xd6\nVQ\xdc", b"\xb9H\x86\xc5\x00\xc4n2\xdc=~\xba\xe8\xa5@\x9a\xcc\xd5\x92\xe5", ], # noqa "synthetic": False, "type": "git", }, { "author": { "email": b"adam.janicki@roche.com", "fullname": b"Adam ", "id": 8040905, "name": b"Adam", }, "committer": { "email": b"adam.janicki@roche.com", "fullname": b"Adam ", "id": 8040905, "name": b"Adam", }, "committer_date": { - "negative_utc": None, - "offset": 120, + "offset_bytes": b"+0200", "timestamp": {"microseconds": 0, "seconds": 1380883838}, }, "date": { - "negative_utc": None, - "offset": 120, + "offset_bytes": b"+0200", "timestamp": {"microseconds": 0, "seconds": 1380883838}, }, "directory": b"\xefX\xe7\xa6\\\xda\xdf\xfdH\xdbH\xfbq\x96@{\x98?9\xfe", "id": b"\xb9H\x86\xc5\x00\xc4n2\xdc=~\xba\xe8\xa5@\x9a\xcc\xd5\x92\xe5", "message": b"updating poms for 1.0 release", "metadata": None, "parents": [ b"\x0c\xb6\xb4a\x1de\xbe\xe0\xf5x!\xda\xc7\xf6\x11\xe2\xf8\xa0$3" ], # noqa "synthetic": False, "type": "git", }, { "author": { "email": b"adam.janicki@roche.com", "fullname": b"Adam ", "id": 8040905, "name": b"Adam", }, "committer": { "email": b"adam.janicki@roche.com", "fullname": b"Adam ", "id": 8040905, "name": b"Adam", }, "committer_date": { - "negative_utc": None, - "offset": 120, + "offset_bytes": b"+0200", "timestamp": {"microseconds": 0, "seconds": 1380883838}, }, "date": { - "negative_utc": None, - "offset": 120, + "offset_bytes": b"+0200", "timestamp": {"microseconds": 0, "seconds": 1380883838}, }, "directory": b"\xefX\xe7\xa6\\\xda\xdf\xfdH\xdbH\xfbq\x96@{\x98?9\xfe", "id": b"\x0c\xb6\xb4a\x1de\xbe\xe0\xf5x!\xda\xc7\xf6\x11\xe2\xf8\xa0$3", "message": b"updating poms for 1.0 release", "metadata": None, "parents": [b"+\x02@\xc6\xd6\x82\xba\xd5\x152\xee\xc1[\x8a~\xd6\xb7\\\x8d1"], "synthetic": False, "type": "git", }, { "author": { "email": b"janickia", "fullname": b"Adam Janicki ", "id": 8040906, "name": b"Adam Janicki", }, "committer": { "email": b"janickia", "fullname": b"Adam Janicki ", "id": 8040906, "name": b"Adam Janicki", }, "committer_date": { - "negative_utc": None, - "offset": 120, + "offset_bytes": b"+0200", "timestamp": {"microseconds": 0, "seconds": 1380883822}, }, "date": { - "negative_utc": None, - "offset": 120, + "offset_bytes": b"+0200", "timestamp": {"microseconds": 0, "seconds": 1380883822}, }, "directory": b"\xefX\xe7\xa6\\\xda\xdf\xfdH\xdbH\xfbq\x96@{\x98?9\xfe", "id": b"+\x02@\xc6\xd6\x82\xba\xd5\x152\xee\xc1[\x8a~\xd6\xb7\\\x8d1", "message": b"For 1.0 release. Allow untracked.\n", "metadata": None, "parents": [b"\xb4\x01\xc5\x08cG]\xb4D\x0c\x85\xc1\n\xc0\xb6B;aUM"], "synthetic": False, "type": "git", }, { "author": { "email": b"adam.janicki@roche.com", "fullname": b"Adam ", "id": 8040905, "name": b"Adam", }, "committer": { "email": b"adam.janicki@roche.com", "fullname": b"Adam ", "id": 8040905, "name": b"Adam", }, "committer_date": { - "negative_utc": None, - "offset": 120, + "offset_bytes": b"+0200", "timestamp": {"microseconds": 0, "seconds": 1380883692}, }, "date": { - "negative_utc": None, - "offset": 120, + "offset_bytes": b"+0200", "timestamp": {"microseconds": 0, "seconds": 1380883692}, }, "directory": b"d@\xe7\x143w\xcb\xf7\xad\xae\x91\xd5\xec\xd8\x95\x82" b"\x02\xa6=\x1b", "id": b"\xb4\x01\xc5\x08cG]\xb4D\x0c\x85\xc1\n\xc0\xb6B;aUM", "message": b"updating poms for 1.0 release", "metadata": None, "parents": [b"\x9cPQ9~\\.\x0c%\x8b\xb69\xc3\xdd4@e\x84\xca\x10"], "synthetic": False, "type": "git", }, { "author": { "email": b"janickia", "fullname": b"Adam Janicki ", "id": 8040906, "name": b"Adam Janicki", }, "committer": { "email": b"janickia", "fullname": b"Adam Janicki ", "id": 8040906, "name": b"Adam Janicki", }, "committer_date": { - "negative_utc": None, - "offset": 120, + "offset_bytes": b"+0200", "timestamp": {"microseconds": 0, "seconds": 1380883668}, }, "date": { - "negative_utc": None, - "offset": 120, + "offset_bytes": b"+0200", "timestamp": {"microseconds": 0, "seconds": 1380883668}, }, "directory": b"\n\x857\x94r\xbe\xcc\x04=\xe9}\xe5\xfd\xdf?nR\xe6\xa7\x9e", "id": b"\x9cPQ9~\\.\x0c%\x8b\xb69\xc3\xdd4@e\x84\xca\x10", "message": b"For 1.0 release.\n", "metadata": None, "parents": [b"\x83mI\x83\x96\xfb\x9b]E\xc8\x96\x88_\x84\xd8\xd6\nVQ\xdc"], "synthetic": False, "type": "git", }, { "author": { "email": b"janickia", "fullname": b"Adam Janicki ", "id": 8040906, "name": b"Adam Janicki", }, "committer": { "email": b"janickia", "fullname": b"Adam Janicki ", "id": 8040906, "name": b"Adam Janicki", }, "committer_date": { - "negative_utc": None, - "offset": 120, + "offset_bytes": b"+0200", "timestamp": {"microseconds": 0, "seconds": 1380881296}, }, "date": { - "negative_utc": None, - "offset": 120, + "offset_bytes": b"+0200", "timestamp": {"microseconds": 0, "seconds": 1380881296}, }, "directory": b".\xf9\xa5\xcb\xb0\xd3\xdc\x9b{\xb8\x81\x03l\xe2P\x16c\x0b|\xe6", # noqa "id": b"\x83mI\x83\x96\xfb\x9b]E\xc8\x96\x88_\x84\xd8\xd6\nVQ\xdc", "message": b"Add ignores\n", "metadata": None, "parents": [b"\xee\x96\xc2\xa2\xd3\x97\xb7\x90p\xd2\xb6\xfe0Q)\tct\x83X"], "synthetic": False, "type": "git", }, { "author": { "email": b"adam.janicki@roche.com", "fullname": b"Adam ", "id": 8040905, "name": b"Adam", }, "committer": { "email": b"adam.janicki@roche.com", "fullname": b"Adam ", "id": 8040905, "name": b"Adam", }, "committer_date": { - "negative_utc": None, - "offset": 60, + "offset_bytes": b"+0100", "timestamp": {"microseconds": 0, "seconds": 1380880096}, }, "date": { - "negative_utc": None, - "offset": 60, + "offset_bytes": b"+0100", "timestamp": {"microseconds": 0, "seconds": 1380880096}, }, "directory": b"\xc7r\xc4\x9f\xc0$\xd4\xab\xff\xcb]\xf6<\xcb\x8b~\xec\xc4\xd1)", # noqa "id": b"\xee\x96\xc2\xa2\xd3\x97\xb7\x90p\xd2\xb6\xfe0Q)\tct\x83X", "message": b"Reset author\n", "metadata": None, "parents": [b"\x8f\x89\xdd\xa8\xe0r8<\xf5\rBS*\xe8\xf5*\xd8\x9f\x8f\xdf"], "synthetic": False, "type": "git", }, { "author": { "email": b"adam.janicki@roche.com", "fullname": b"Adam ", "id": 8040905, "name": b"Adam", }, "committer": { "email": b"adam.janicki@roche.com", "fullname": b"Adam ", "id": 8040905, "name": b"Adam", }, "committer_date": { - "negative_utc": None, - "offset": -420, + "offset_bytes": b"-0700", "timestamp": {"microseconds": 0, "seconds": 1380878419}, }, "date": { - "negative_utc": None, - "offset": -420, + "offset_bytes": b"-0700", "timestamp": {"microseconds": 0, "seconds": 1380878419}, }, "directory": b"WS\xbaX\xd6x{q\x8f\x020i\xc5\x95\xa01\xf7y\xb2\x80", "id": b"\x8f\x89\xdd\xa8\xe0r8<\xf5\rBS*\xe8\xf5*\xd8\x9f\x8f\xdf", "message": b"Initial commit\n", "metadata": None, "parents": [], "synthetic": False, "type": "git", }, ] _rev_start = "b364f53155044e5308a0f73abb3b5f01995a5b7d" _rev_missing = "836d498396fb9b5d45c896885f84d8d60a5651dc" def check_revisions_ordering( mocker, rev_walker_type, expected_result, truncated_history, revisions_list=_revisions_list, ): storage = mocker.patch("swh.storage.postgresql.storage.Storage") if not truncated_history: storage.revision_log.return_value = revisions_list else: revs_lists_truncated = [ None if hash_to_hex(rev["id"]) == _rev_missing else rev for rev in revisions_list ] storage.revision_log.return_value = revs_lists_truncated revs_walker = get_revisions_walker( rev_walker_type, storage, hash_to_bytes(_rev_start) ) assert list(map(hash_to_bytes, expected_result)) == [ rev["id"] for rev in revs_walker ] assert revs_walker.is_history_truncated() == truncated_history if truncated_history: missing_revs = revs_walker.missing_revisions() assert missing_revs == {hash_to_bytes(_rev_missing)} else: assert revs_walker.missing_revisions() == set() def test_revisions_walker_committer_date(mocker): # revisions should be returned in reverse chronological order # of their committer date expected_result = [ "b364f53155044e5308a0f73abb3b5f01995a5b7d", "b94886c500c46e32dc3d7ebae8a5409accd592e5", "0cb6b4611d65bee0f57821dac7f611e2f8a02433", "2b0240c6d682bad51532eec15b8a7ed6b75c8d31", "b401c50863475db4440c85c10ac0b6423b61554d", "9c5051397e5c2e0c258bb639c3dd34406584ca10", "836d498396fb9b5d45c896885f84d8d60a5651dc", "ee96c2a2d397b79070d2b6fe3051290963748358", "8f89dda8e072383cf50d42532ae8f52ad89f8fdf", ] check_revisions_ordering( mocker, "committer_date", expected_result, truncated_history=False ) def test_revisions_walker_dfs(mocker): # revisions should be returned in the same order they are # visited when performing a depth-first search in pre order # on the revisions DAG expected_result = [ "b364f53155044e5308a0f73abb3b5f01995a5b7d", "836d498396fb9b5d45c896885f84d8d60a5651dc", "ee96c2a2d397b79070d2b6fe3051290963748358", "8f89dda8e072383cf50d42532ae8f52ad89f8fdf", "b94886c500c46e32dc3d7ebae8a5409accd592e5", "0cb6b4611d65bee0f57821dac7f611e2f8a02433", "2b0240c6d682bad51532eec15b8a7ed6b75c8d31", "b401c50863475db4440c85c10ac0b6423b61554d", "9c5051397e5c2e0c258bb639c3dd34406584ca10", ] check_revisions_ordering(mocker, "dfs", expected_result, truncated_history=False) def test_revisions_walker_dfs_post(mocker): # revisions should be returned in the same order they are # visited when performing a depth-first search in post order # on the revisions DAG expected_result = [ "b364f53155044e5308a0f73abb3b5f01995a5b7d", "b94886c500c46e32dc3d7ebae8a5409accd592e5", "0cb6b4611d65bee0f57821dac7f611e2f8a02433", "2b0240c6d682bad51532eec15b8a7ed6b75c8d31", "b401c50863475db4440c85c10ac0b6423b61554d", "9c5051397e5c2e0c258bb639c3dd34406584ca10", "836d498396fb9b5d45c896885f84d8d60a5651dc", "ee96c2a2d397b79070d2b6fe3051290963748358", "8f89dda8e072383cf50d42532ae8f52ad89f8fdf", ] check_revisions_ordering( mocker, "dfs_post", expected_result, truncated_history=False ) def test_revisions_walker_bfs(mocker): # revisions should be returned in the same order they are # visited when performing a breadth-first search on the # revisions DAG expected_result = [ "b364f53155044e5308a0f73abb3b5f01995a5b7d", "836d498396fb9b5d45c896885f84d8d60a5651dc", "b94886c500c46e32dc3d7ebae8a5409accd592e5", "ee96c2a2d397b79070d2b6fe3051290963748358", "0cb6b4611d65bee0f57821dac7f611e2f8a02433", "8f89dda8e072383cf50d42532ae8f52ad89f8fdf", "2b0240c6d682bad51532eec15b8a7ed6b75c8d31", "b401c50863475db4440c85c10ac0b6423b61554d", "9c5051397e5c2e0c258bb639c3dd34406584ca10", ] check_revisions_ordering(mocker, "bfs", expected_result, truncated_history=False) def test_revisions_walker_truncated_history(mocker): expected_result = [ "b364f53155044e5308a0f73abb3b5f01995a5b7d", "b94886c500c46e32dc3d7ebae8a5409accd592e5", "0cb6b4611d65bee0f57821dac7f611e2f8a02433", "2b0240c6d682bad51532eec15b8a7ed6b75c8d31", "b401c50863475db4440c85c10ac0b6423b61554d", "9c5051397e5c2e0c258bb639c3dd34406584ca10", ] for revs_walker_type in ("committer_date", "bfs", "dfs", "dfs_post"): check_revisions_ordering( mocker, revs_walker_type, expected_result, truncated_history=True ) def test_revisions_walker_no_committer_date(mocker): expected_result = [ "b364f53155044e5308a0f73abb3b5f01995a5b7d", "b94886c500c46e32dc3d7ebae8a5409accd592e5", "0cb6b4611d65bee0f57821dac7f611e2f8a02433", "2b0240c6d682bad51532eec15b8a7ed6b75c8d31", "b401c50863475db4440c85c10ac0b6423b61554d", "9c5051397e5c2e0c258bb639c3dd34406584ca10", "836d498396fb9b5d45c896885f84d8d60a5651dc", "ee96c2a2d397b79070d2b6fe3051290963748358", "8f89dda8e072383cf50d42532ae8f52ad89f8fdf", ] revisions_list = deepcopy(_revisions_list) for revision in revisions_list: revision["committer_date"] = None check_revisions_ordering( mocker, "committer_date", expected_result, truncated_history=False, revisions_list=revisions_list, ) diff --git a/swh/storage/tests/migrate_extrinsic_metadata/test_debian.py b/swh/storage/tests/migrate_extrinsic_metadata/test_debian.py index 35661225..33a11d54 100644 --- a/swh/storage/tests/migrate_extrinsic_metadata/test_debian.py +++ b/swh/storage/tests/migrate_extrinsic_metadata/test_debian.py @@ -1,572 +1,568 @@ # Copyright (C) 2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information # flake8: noqa # because of long lines import copy import datetime import json from unittest.mock import Mock, call from unittest.mock import patch as _patch import attr import pytest from swh.model.model import ( MetadataAuthority, MetadataAuthorityType, MetadataFetcher, Origin, OriginVisit, OriginVisitStatus, Person, RawExtrinsicMetadata, Revision, RevisionType, Snapshot, SnapshotBranch, TargetType, Timestamp, TimestampWithTimezone, ) from swh.model.swhids import CoreSWHID, ExtendedObjectType, ExtendedSWHID from swh.storage import get_storage from swh.storage.interface import ListOrder, PagedResult from swh.storage.migrate_extrinsic_metadata import debian_origins_from_row, handle_row FETCHER = MetadataFetcher( name="migrate-extrinsic-metadata-from-revisions", version="0.0.1", ) SWH_AUTHORITY = MetadataAuthority( type=MetadataAuthorityType.REGISTRY, url="https://softwareheritage.org/", metadata={}, ) DIRECTORY_ID = b"a" * 20 DIRECTORY_SWHID = ExtendedSWHID( object_type=ExtendedObjectType.DIRECTORY, object_id=DIRECTORY_ID ) def now(): return datetime.datetime.now(tz=datetime.timezone.utc) def patch(function_name, *args, **kwargs): # It's a long name, this function spares some line breaks in 'with' statements return _patch( "swh.storage.migrate_extrinsic_metadata." + function_name, *args, **kwargs ) def test_debian_origins_from_row(): """Tests debian_origins_from_row on a real example (with some parts omitted, for conciseness).""" origin_url = "deb://Debian/packages/kalgebra" visit = OriginVisit( origin=origin_url, date=datetime.datetime( 2020, 1, 27, 19, 32, 3, 925498, tzinfo=datetime.timezone.utc, ), type="deb", visit=280, ) storage = get_storage("memory") storage.origin_add( [ Origin(url=origin_url), Origin(url="http://snapshot.debian.org/package/kalgebra/"), ] ) storage.origin_visit_add([visit]) storage.origin_visit_status_add( [ OriginVisitStatus( origin=origin_url, visit=280, date=datetime.datetime( 2020, 1, 27, 19, 32, 3, 925498, tzinfo=datetime.timezone.utc ), status="full", snapshot=b"\xafD\x15\x98){\xd4$\xdeI\x1f\xbe\x95lh`x\x14\xce\xc4", metadata=None, ) ], ) snapshot = Snapshot( id=b"\xafD\x15\x98){\xd4$\xdeI\x1f\xbe\x95lh`x\x14\xce\xc4", branches={ # ... b"releases/unstable/main/4:19.12.1-1": SnapshotBranch( target=b"\x00\x00\x03l1\x1e\xf3:(\x1b\x05h\x8fn\xad\xcf\xc0\x94:\xee", target_type=TargetType.REVISION, ), }, ) revision_row = { "id": b"\x00\x00\x03l1\x1e\xf3:(\x1b\x05h\x8fn\xad\xcf\xc0\x94:\xee", "directory": DIRECTORY_ID, "metadata": { # ... "original_artifact": [ { "filename": "kalgebra_19.12.1-1.dsc", # ... }, ] }, } storage.snapshot_add([snapshot]) assert debian_origins_from_row(revision_row, storage) == [origin_url] def test_debian_origins_from_row__no_result(): """Tests debian_origins_from_row when there's no origin, visit, status, snapshot, branch, or matching branch. """ storage = get_storage("memory") origin_url = "deb://Debian/packages/kalgebra" snapshot_id = b"42424242424242424242" revision_id = b"21212121212121212121" storage.origin_add([Origin(url=origin_url)]) revision_row = { "id": b"\x00\x00\x03l1\x1e\xf3:(\x1b\x05h\x8fn\xad\xcf\xc0\x94:\xee", "directory": DIRECTORY_ID, "metadata": {"original_artifact": [{"filename": "kalgebra_19.12.1-1.dsc",},]}, } # no visit assert debian_origins_from_row(revision_row, storage) == [] storage.origin_visit_add( [OriginVisit(origin=origin_url, date=now(), type="deb", visit=280,)] ) # no status assert debian_origins_from_row(revision_row, storage) == [] status = OriginVisitStatus( origin=origin_url, visit=280, date=now(), status="full", snapshot=None, metadata=None, ) storage.origin_visit_status_add([status]) # no snapshot assert debian_origins_from_row(revision_row, storage) == [] status = attr.evolve(status, snapshot=snapshot_id, date=now()) storage.origin_visit_status_add([status]) storage_before_snapshot = copy.deepcopy(storage) snapshot = Snapshot(id=snapshot_id, branches={}) storage.snapshot_add([snapshot]) # no branch assert debian_origins_from_row(revision_row, storage) == [] # "remove" the snapshot, so we can add a new one with the same id storage = copy.deepcopy(storage_before_snapshot) snapshot = attr.evolve(snapshot, branches={b"foo": None,},) storage.snapshot_add([snapshot]) # dangling branch assert debian_origins_from_row(revision_row, storage) == [] # "remove" the snapshot again storage = copy.deepcopy(storage_before_snapshot) snapshot = attr.evolve( snapshot, branches={ b"foo": SnapshotBranch(target_type=TargetType.REVISION, target=revision_id,) }, ) storage.snapshot_add([snapshot]) # branch points to unknown revision assert debian_origins_from_row(revision_row, storage) == [] revision = Revision( id=revision_id, message=b"foo", author=Person.from_fullname(b"foo"), committer=Person.from_fullname(b"foo"), date=TimestampWithTimezone( timestamp=Timestamp(seconds=1580076204, microseconds=0), - offset=60, - negative_utc=False, + offset_bytes=b"+0100", ), committer_date=TimestampWithTimezone( timestamp=Timestamp(seconds=1580076204, microseconds=0), - offset=60, - negative_utc=False, + offset_bytes=b"+0100", ), type=RevisionType.DSC, directory=b"\xd5\x9a\x1f\x9c\x80\x9d\x8c}19P\xf6\xc8\xa2\x0f^%H\xcd\xdb", synthetic=True, metadata=None, parents=(), extra_headers=(), ) storage.revision_add([revision]) # no matching branch assert debian_origins_from_row(revision_row, storage) == [] def test_debian_origins_from_row__check_revisions(): """Tests debian_origins_from_row errors when the revision at the head of a branch is a DSC and has no parents """ storage = get_storage("memory") origin_url = "deb://Debian/packages/kalgebra" revision_id = b"21" * 10 storage.origin_add([Origin(url=origin_url)]) revision_row = { "id": b"\x00\x00\x03l1\x1e\xf3:(\x1b\x05h\x8fn\xad\xcf\xc0\x94:\xee", "directory": DIRECTORY_ID, "metadata": {"original_artifact": [{"filename": "kalgebra_19.12.1-1.dsc",},]}, } storage.origin_visit_add( [ OriginVisit( origin=origin_url, date=datetime.datetime.now(tz=datetime.timezone.utc), type="deb", visit=280, ) ] ) storage.origin_visit_status_add( [ OriginVisitStatus( origin=origin_url, visit=280, date=datetime.datetime.now(tz=datetime.timezone.utc), status="full", snapshot=b"42" * 10, metadata=None, ) ] ) storage.snapshot_add( [ Snapshot( id=b"42" * 10, branches={ b"foo": SnapshotBranch( target_type=TargetType.REVISION, target=revision_id ) }, ) ] ) storage_before_revision = copy.deepcopy(storage) revision = Revision( id=revision_id, message=b"foo", author=Person.from_fullname(b"foo"), committer=Person.from_fullname(b"foo"), date=TimestampWithTimezone( timestamp=Timestamp(seconds=1580076204, microseconds=0), - offset=60, - negative_utc=False, + offset_bytes=b"+0100", ), committer_date=TimestampWithTimezone( timestamp=Timestamp(seconds=1580076204, microseconds=0), - offset=60, - negative_utc=False, + offset_bytes=b"+0100", ), type=RevisionType.DSC, directory=b"\xd5\x9a\x1f\x9c\x80\x9d\x8c}19P\xf6\xc8\xa2\x0f^%H\xcd\xdb", synthetic=True, metadata=None, parents=(b"parent " * 2,), extra_headers=(), ) storage.revision_add([revision]) with pytest.raises(AssertionError, match="revision with parents"): debian_origins_from_row(revision_row, storage) def test_debian_with_extrinsic(): dest_original_artifacts = [ { "length": 2936, "filename": "kalgebra_19.12.1-1.dsc", "checksums": { "sha1": "f869e9f1155b1ee6d28ae3b40060570152a358cd", "sha256": "75f77150aefdaa4bcf8bc5b1e9b8b90b5cb1651b76a068c5e58e5b83658d5d11", }, "url": "http://deb.debian.org/debian//pool/main/k/kalgebra/kalgebra_19.12.1-1.dsc", }, { "length": 1156408, "filename": "kalgebra_19.12.1.orig.tar.xz", "checksums": { "sha1": "e496032962212983a5359aebadfe13c4026fd45c", "sha256": "49d623186800eb8f6fbb91eb43fb14dff78e112624c9cda6b331d494d610b16a", }, "url": "http://deb.debian.org/debian//pool/main/k/kalgebra/kalgebra_19.12.1.orig.tar.xz", }, { "length": 10044, "filename": "kalgebra_19.12.1-1.debian.tar.xz", "checksums": { "sha1": "b518bfc2ac708b40577c595bd539faa8b84572db", "sha256": "1a30acd2699c3769da302f7a0c63a7d7b060f80925b38c8c43ce3bec92744d67", }, "url": "http://deb.debian.org/debian//pool/main/k/kalgebra/kalgebra_19.12.1-1.debian.tar.xz", }, { "length": 488, "filename": "kalgebra_19.12.1.orig.tar.xz.asc", "checksums": { "sha1": "ff53a5c21c1aef2b9caa38a02fa3488f43df4c20", "sha256": "a37e0b95bb1f16b19b0587bc5d3b99ba63a195d7f6335c4a359122ad96d682dd", }, "url": "http://deb.debian.org/debian//pool/main/k/kalgebra/kalgebra_19.12.1.orig.tar.xz.asc", }, ] source_original_artifacts = [ {k: v for (k, v) in d.items() if k != "url"} for d in dest_original_artifacts ] row = { "id": b"\x00\x00\x03l1\x1e\xf3:(\x1b\x05h\x8fn\xad\xcf\xc0\x94:\xee", "directory": DIRECTORY_ID, "date": datetime.datetime( 2020, 1, 26, 22, 3, 24, tzinfo=datetime.timezone.utc, ), "date_offset": 60, "type": "dsc", "message": b"Synthetic revision for Debian source package kalgebra version 4:19.12.1-1", "metadata": { "extrinsic": { "raw": { "id": 2718802, "name": "kalgebra", "files": { "kalgebra_19.12.1-1.dsc": { "uri": "http://deb.debian.org/debian//pool/main/k/kalgebra/kalgebra_19.12.1-1.dsc", "name": "kalgebra_19.12.1-1.dsc", "size": 2936, "md5sum": "fd28f604d4cc31a0a305543230f1622a", "sha256": "75f77150aefdaa4bcf8bc5b1e9b8b90b5cb1651b76a068c5e58e5b83658d5d11", }, "kalgebra_19.12.1.orig.tar.xz": { "uri": "http://deb.debian.org/debian//pool/main/k/kalgebra/kalgebra_19.12.1.orig.tar.xz", "name": "kalgebra_19.12.1.orig.tar.xz", "size": 1156408, "md5sum": "34e09ed152da762d53101ea33634712b", "sha256": "49d623186800eb8f6fbb91eb43fb14dff78e112624c9cda6b331d494d610b16a", }, "kalgebra_19.12.1-1.debian.tar.xz": { "uri": "http://deb.debian.org/debian//pool/main/k/kalgebra/kalgebra_19.12.1-1.debian.tar.xz", "name": "kalgebra_19.12.1-1.debian.tar.xz", "size": 10044, "md5sum": "4f639f36143898d97d044f273f038e58", "sha256": "1a30acd2699c3769da302f7a0c63a7d7b060f80925b38c8c43ce3bec92744d67", }, "kalgebra_19.12.1.orig.tar.xz.asc": { "uri": "http://deb.debian.org/debian//pool/main/k/kalgebra/kalgebra_19.12.1.orig.tar.xz.asc", "name": "kalgebra_19.12.1.orig.tar.xz.asc", "size": 488, "md5sum": "3c29291e4e6f0c294de80feb8e9fce4c", "sha256": "a37e0b95bb1f16b19b0587bc5d3b99ba63a195d7f6335c4a359122ad96d682dd", }, }, "version": "4:19.12.1-1", "revision_id": None, }, "when": "2020-01-27T19:32:03.925498+00:00", "provider": "http://deb.debian.org/debian//pool/main/k/kalgebra/kalgebra_19.12.1-1.dsc", }, "intrinsic": { "raw": { "name": "kalgebra", "version": "4:19.12.1-1", # ... }, "tool": "dsc", }, "original_artifact": source_original_artifacts, }, } origin_url = "deb://Debian/packages/kalgebra" storage = Mock() deposit_cur = None with patch("debian_origins_from_row", return_value=[origin_url]): handle_row(copy.deepcopy(row), storage, deposit_cur, dry_run=False) assert storage.method_calls == [ call.raw_extrinsic_metadata_add( [ RawExtrinsicMetadata( target=DIRECTORY_SWHID, discovery_date=datetime.datetime( 2020, 1, 26, 22, 3, 24, tzinfo=datetime.timezone.utc, ), authority=SWH_AUTHORITY, fetcher=FETCHER, format="original-artifacts-json", metadata=json.dumps(dest_original_artifacts).encode(), origin=origin_url, revision=CoreSWHID.from_string( "swh:1:rev:0000036c311ef33a281b05688f6eadcfc0943aee" ), ), ] ), ] def test_debian_without_extrinsic(): source_original_artifacts = [ { "name": "pymongo_1.10-1.dsc", "sha1": "81877c1ae4406c2519b9cc9c4557cf6b0775a241", "length": 99, "sha256": "40269a73f38ee4c2f9cc021f1d5d091cc59ca6e778c339684b7be030e29e282f", "sha1_git": "0ac7bdb8e4d10926c5d3e51baa2be7bb29a3966b", }, { "name": "pymongo_1.10.orig.tar.gz", "sha1": "4f4c97641b86ac8f21396281bd1a7369236693c3", "length": 99, "sha256": "0b6bffb310782ffaeb3916c75790742ec5830c63a758fc711cd1f557eb5a4b5f", "sha1_git": "19ef0adda8868520d1ef9d4164b3ace4df1d62ad", }, { "name": "pymongo_1.10-1.debian.tar.gz", "sha1": "fbf378296613c8d55e043aec98896b3e50a94971", "length": 99, "sha256": "3970cc70fe3ba6499a9c56ba4b4c6c3782f56433d0d17d72b7a0e2ceae31b513", "sha1_git": "2eea9904806050a8fda95edd5d4fa60d29c1fdec", }, ] dest_original_artifacts = [ { "length": 99, "filename": "pymongo_1.10-1.dsc", "checksums": { "sha1": "81877c1ae4406c2519b9cc9c4557cf6b0775a241", "sha256": "40269a73f38ee4c2f9cc021f1d5d091cc59ca6e778c339684b7be030e29e282f", "sha1_git": "0ac7bdb8e4d10926c5d3e51baa2be7bb29a3966b", }, }, { "length": 99, "filename": "pymongo_1.10.orig.tar.gz", "checksums": { "sha1": "4f4c97641b86ac8f21396281bd1a7369236693c3", "sha256": "0b6bffb310782ffaeb3916c75790742ec5830c63a758fc711cd1f557eb5a4b5f", "sha1_git": "19ef0adda8868520d1ef9d4164b3ace4df1d62ad", }, }, { "length": 99, "filename": "pymongo_1.10-1.debian.tar.gz", "checksums": { "sha1": "fbf378296613c8d55e043aec98896b3e50a94971", "sha256": "3970cc70fe3ba6499a9c56ba4b4c6c3782f56433d0d17d72b7a0e2ceae31b513", "sha1_git": "2eea9904806050a8fda95edd5d4fa60d29c1fdec", }, }, ] row = { "id": b"\x00\x00\x01\xc2\x8c\x8f\xca\x01\xb9\x04\xde\x92\xa2d\n\x86l\xe0<\xb7", "directory": DIRECTORY_ID, "date": datetime.datetime( 2011, 3, 31, 20, 17, 41, tzinfo=datetime.timezone.utc ), "date_offset": 0, "type": "dsc", "message": b"Synthetic revision for Debian source package pymongo version 1.10-1", "metadata": { "package_info": { "name": "pymongo", "version": "1.10-1", "changelog": { # ... }, "maintainers": [ {"name": "Federico Ceratto", "email": "federico.ceratto@gmail.com"}, {"name": "Janos Guljas", "email": "janos@resenje.org"}, ], "pgp_signature": { "date": "2011-03-31T21:02:44+00:00", "keyid": "2BABC6254E66E7B8450AC3E1E6AA90171392B174", "person": {"name": "David Paleino", "email": "d.paleino@gmail.com"}, }, "lister_metadata": {"id": 244296, "lister": "snapshot.debian.org"}, }, "original_artifact": source_original_artifacts, }, } storage = Mock() origin_url = "http://snapshot.debian.org/package/pymongo" deposit_cur = None with patch("debian_origins_from_row", return_value=[origin_url]): handle_row(copy.deepcopy(row), storage, deposit_cur, dry_run=False) assert storage.method_calls == [ call.raw_extrinsic_metadata_add( [ RawExtrinsicMetadata( target=DIRECTORY_SWHID, discovery_date=datetime.datetime( 2011, 3, 31, 20, 17, 41, tzinfo=datetime.timezone.utc ), authority=SWH_AUTHORITY, fetcher=FETCHER, format="original-artifacts-json", metadata=json.dumps(dest_original_artifacts).encode(), origin=origin_url, revision=CoreSWHID.from_string( "swh:1:rev:000001c28c8fca01b904de92a2640a866ce03cb7" ), ), ] ) ] diff --git a/swh/storage/tests/storage_data.py b/swh/storage/tests/storage_data.py index 541d803b..a1e96802 100644 --- a/swh/storage/tests/storage_data.py +++ b/swh/storage/tests/storage_data.py @@ -1,715 +1,696 @@ # Copyright (C) 2015-2021 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime from typing import Tuple import attr from swh.model import from_disk from swh.model.hashutil import hash_to_bytes from swh.model.model import ( Content, Directory, DirectoryEntry, ExtID, MetadataAuthority, MetadataAuthorityType, MetadataFetcher, ObjectType, Origin, OriginVisit, Person, RawExtrinsicMetadata, Release, Revision, RevisionType, SkippedContent, Snapshot, SnapshotBranch, TargetType, Timestamp, TimestampWithTimezone, ) from swh.model.swhids import CoreSWHID, ExtendedObjectType, ExtendedSWHID from swh.model.swhids import ObjectType as SwhidObjectType class StorageData: """Data model objects to use within tests. """ content = Content( data=b"42\n", length=3, sha1=hash_to_bytes("34973274ccef6ab4dfaaf86599792fa9c3fe4689"), sha1_git=hash_to_bytes("d81cc0710eb6cf9efd5b920a8453e1e07157b6cd"), sha256=hash_to_bytes( "084c799cd551dd1d8d5c5f9a5d593b2e931f5e36122ee5c793c1d08a19839cc0" ), blake2s256=hash_to_bytes( "d5fe1939576527e42cfd76a9455a2432fe7f56669564577dd93c4280e76d661d" ), status="visible", ) content2 = Content( data=b"4242\n", length=5, sha1=hash_to_bytes("61c2b3a30496d329e21af70dd2d7e097046d07b7"), sha1_git=hash_to_bytes("36fade77193cb6d2bd826161a0979d64c28ab4fa"), sha256=hash_to_bytes( "859f0b154fdb2d630f45e1ecae4a862915435e663248bb8461d914696fc047cd" ), blake2s256=hash_to_bytes( "849c20fad132b7c2d62c15de310adfe87be94a379941bed295e8141c6219810d" ), status="visible", ) content3 = Content( data=b"424242\n", length=7, sha1=hash_to_bytes("3e21cc4942a4234c9e5edd8a9cacd1670fe59f13"), sha1_git=hash_to_bytes("c932c7649c6dfa4b82327d121215116909eb3bea"), sha256=hash_to_bytes( "92fb72daf8c6818288a35137b72155f507e5de8d892712ab96277aaed8cf8a36" ), blake2s256=hash_to_bytes( "76d0346f44e5a27f6bafdd9c2befd304aff83780f93121d801ab6a1d4769db11" ), status="visible", ctime=datetime.datetime(2019, 12, 1, tzinfo=datetime.timezone.utc), ) contents: Tuple[Content, ...] = (content, content2, content3) skipped_content = SkippedContent( length=1024 * 1024 * 200, sha1_git=hash_to_bytes("33e45d56f88993aae6a0198013efa80716fd8920"), sha1=hash_to_bytes("43e45d56f88993aae6a0198013efa80716fd8920"), sha256=hash_to_bytes( "7bbd052ab054ef222c1c87be60cd191addedd24cc882d1f5f7f7be61dc61bb3a" ), blake2s256=hash_to_bytes( "ade18b1adecb33f891ca36664da676e12c772cc193778aac9a137b8dc5834b9b" ), reason="Content too long", status="absent", origin="file:///dev/zero", ) skipped_content2 = SkippedContent( length=1024 * 1024 * 300, sha1_git=hash_to_bytes("44e45d56f88993aae6a0198013efa80716fd8921"), sha1=hash_to_bytes("54e45d56f88993aae6a0198013efa80716fd8920"), sha256=hash_to_bytes( "8cbd052ab054ef222c1c87be60cd191addedd24cc882d1f5f7f7be61dc61bb3a" ), blake2s256=hash_to_bytes( "9ce18b1adecb33f891ca36664da676e12c772cc193778aac9a137b8dc5834b9b" ), reason="Content too long", status="absent", ) skipped_contents: Tuple[SkippedContent, ...] = (skipped_content, skipped_content2) directory5 = Directory( id=hash_to_bytes("4b825dc642cb6eb9a060e54bf8d69288fbee4904"), entries=(), ) directory = Directory( id=hash_to_bytes("5256e856a0a0898966d6ba14feb4388b8b82d302"), entries=tuple( [ DirectoryEntry( name=b"foo", type="file", target=content.sha1_git, perms=from_disk.DentryPerms.content, ), DirectoryEntry( name=b"bar\xc3", type="dir", target=directory5.id, perms=from_disk.DentryPerms.directory, ), ], ), ) directory2 = Directory( id=hash_to_bytes("8505808532953da7d2581741f01b29c04b1cb9ab"), entries=tuple( [ DirectoryEntry( name=b"oof", type="file", target=content2.sha1_git, perms=from_disk.DentryPerms.content, ) ], ), ) directory3 = Directory( id=hash_to_bytes("13089e6e544f78df7c9a40a3059050d10dee686a"), entries=tuple( [ DirectoryEntry( name=b"foo", type="file", target=content.sha1_git, perms=from_disk.DentryPerms.content, ), DirectoryEntry( name=b"subdir", type="dir", target=directory.id, perms=from_disk.DentryPerms.directory, ), DirectoryEntry( name=b"hello", type="file", target=content2.sha1_git, perms=from_disk.DentryPerms.content, ), ], ), ) directory4 = Directory( id=hash_to_bytes("cd5dfd9c09d9e99ed123bc7937a0d5fddc3cd531"), entries=tuple( [ DirectoryEntry( name=b"subdir1", type="dir", target=directory3.id, perms=from_disk.DentryPerms.directory, ) ], ), ) directories: Tuple[Directory, ...] = ( directory2, directory, directory3, directory4, directory5, ) revision = Revision( id=hash_to_bytes("01a7114f36fddd5ef2511b2cadda237a68adbb12"), message=b"hello", author=Person( name=b"Nicolas Dandrimont", email=b"nicolas@example.com", fullname=b"Nicolas Dandrimont ", ), date=TimestampWithTimezone( timestamp=Timestamp(seconds=1234567890, microseconds=0), - offset=120, - negative_utc=False, + offset_bytes=b"+0200", ), committer=Person( name=b"St\xc3fano Zacchiroli", email=b"stefano@example.com", fullname=b"St\xc3fano Zacchiroli ", ), committer_date=TimestampWithTimezone( timestamp=Timestamp(seconds=1123456789, microseconds=0), - offset=120, - negative_utc=False, + offset_bytes=b"+0200", ), parents=(), type=RevisionType.GIT, directory=directory.id, metadata={ "checksums": {"sha1": "tarball-sha1", "sha256": "tarball-sha256",}, "signed-off-by": "some-dude", }, extra_headers=( (b"gpgsig", b"test123"), (b"mergetag", b"foo\\bar"), (b"mergetag", b"\x22\xaf\x89\x80\x01\x00"), ), synthetic=True, ) revision2 = Revision( id=hash_to_bytes("a646dd94c912829659b22a1e7e143d2fa5ebde1b"), message=b"hello again", author=Person( name=b"Roberto Dicosmo", email=b"roberto@example.com", fullname=b"Roberto Dicosmo ", ), date=TimestampWithTimezone( timestamp=Timestamp(seconds=1234567843, microseconds=220000,), - offset=-720, - negative_utc=False, + offset_bytes=b"-1200", ), committer=Person( name=b"tony", email=b"ar@dumont.fr", fullname=b"tony ", ), committer_date=TimestampWithTimezone( timestamp=Timestamp(seconds=1123456789, microseconds=220000,), - offset=0, - negative_utc=False, + offset_bytes=b"+0000", ), parents=tuple([revision.id]), type=RevisionType.GIT, directory=directory2.id, metadata=None, extra_headers=(), synthetic=False, ) revision3 = Revision( id=hash_to_bytes("beb2844dff30658e27573cb46eb55980e974b391"), message=b"a simple revision with no parents this time", author=Person( name=b"Roberto Dicosmo", email=b"roberto@example.com", fullname=b"Roberto Dicosmo ", ), date=TimestampWithTimezone( timestamp=Timestamp(seconds=1234567843, microseconds=220000,), - offset=-720, - negative_utc=False, + offset_bytes=b"-1200", ), committer=Person( name=b"tony", email=b"ar@dumont.fr", fullname=b"tony ", ), committer_date=TimestampWithTimezone( timestamp=Timestamp(seconds=1127351742, microseconds=220000,), - offset=0, - negative_utc=False, + offset_bytes=b"+0000", ), parents=tuple([revision.id, revision2.id]), type=RevisionType.GIT, directory=directory2.id, metadata=None, extra_headers=(), synthetic=True, ) revision4 = Revision( id=hash_to_bytes("ae860aec43700c7f5a295e2ef47e2ae41b535dfe"), message=b"parent of self.revision2", author=Person( name=b"me", email=b"me@soft.heri", fullname=b"me ", ), date=TimestampWithTimezone( timestamp=Timestamp(seconds=1234567843, microseconds=220000,), - offset=-720, - negative_utc=False, + offset_bytes=b"-1200", ), committer=Person( name=b"committer-dude", email=b"committer@dude.com", fullname=b"committer-dude ", ), committer_date=TimestampWithTimezone( timestamp=Timestamp(seconds=1244567843, microseconds=220000,), - offset=-720, - negative_utc=False, + offset_bytes=b"-1200", ), parents=tuple([revision3.id]), type=RevisionType.GIT, directory=directory.id, metadata=None, extra_headers=(), synthetic=False, ) git_revisions: Tuple[Revision, ...] = (revision, revision2, revision3, revision4) hg_revision = Revision( id=hash_to_bytes("951c9503541e7beaf002d7aebf2abd1629084c68"), message=b"hello", author=Person( name=b"Nicolas Dandrimont", email=b"nicolas@example.com", fullname=b"Nicolas Dandrimont ", ), date=TimestampWithTimezone( timestamp=Timestamp(seconds=1234567890, microseconds=0), - offset=120, - negative_utc=False, + offset_bytes=b"+0200", ), committer=Person( name=b"St\xc3fano Zacchiroli", email=b"stefano@example.com", fullname=b"St\xc3fano Zacchiroli ", ), committer_date=TimestampWithTimezone( timestamp=Timestamp(seconds=1123456789, microseconds=0), - offset=120, - negative_utc=False, + offset_bytes=b"+0200", ), parents=(), type=RevisionType.MERCURIAL, directory=directory.id, metadata={ "checksums": {"sha1": "tarball-sha1", "sha256": "tarball-sha256",}, "signed-off-by": "some-dude", "node": "a316dfb434af2b451c1f393496b7eaeda343f543", }, extra_headers=(), synthetic=True, ) hg_revision2 = Revision( id=hash_to_bytes("df4afb063236300eb13b96a0d7fff03f7b7cbbaf"), message=b"hello again", author=Person( name=b"Roberto Dicosmo", email=b"roberto@example.com", fullname=b"Roberto Dicosmo ", ), date=TimestampWithTimezone( timestamp=Timestamp(seconds=1234567843, microseconds=220000,), - offset=-720, - negative_utc=False, + offset_bytes=b"-1200", ), committer=Person( name=b"tony", email=b"ar@dumont.fr", fullname=b"tony ", ), committer_date=TimestampWithTimezone( timestamp=Timestamp(seconds=1123456789, microseconds=220000,), - offset=0, - negative_utc=False, + offset_bytes=b"+0000", ), parents=tuple([hg_revision.id]), type=RevisionType.MERCURIAL, directory=directory2.id, metadata=None, extra_headers=( (b"node", hash_to_bytes("fa1b7c84a9b40605b67653700f268349a6d6aca1")), ), synthetic=False, ) hg_revision3 = Revision( id=hash_to_bytes("84d8e7081b47ebb88cad9fa1f25de5f330872a37"), message=b"a simple revision with no parents this time", author=Person( name=b"Roberto Dicosmo", email=b"roberto@example.com", fullname=b"Roberto Dicosmo ", ), date=TimestampWithTimezone( timestamp=Timestamp(seconds=1234567843, microseconds=220000,), - offset=-720, - negative_utc=False, + offset_bytes=b"-1200", ), committer=Person( name=b"tony", email=b"ar@dumont.fr", fullname=b"tony ", ), committer_date=TimestampWithTimezone( timestamp=Timestamp(seconds=1127351742, microseconds=220000,), - offset=0, - negative_utc=False, + offset_bytes=b"+0000", ), parents=tuple([hg_revision.id, hg_revision2.id]), type=RevisionType.MERCURIAL, directory=directory2.id, metadata=None, extra_headers=( (b"node", hash_to_bytes("7f294a01c49065a90b3fe8b4ad49f08ce9656ef6")), ), synthetic=True, ) hg_revision4 = Revision( id=hash_to_bytes("4683324ba26dfe941a72cc7552e86eaaf7c27fe3"), message=b"parent of self.revision2", author=Person( name=b"me", email=b"me@soft.heri", fullname=b"me ", ), date=TimestampWithTimezone( timestamp=Timestamp(seconds=1234567843, microseconds=220000,), - offset=-720, - negative_utc=False, + offset_bytes=b"-1200", ), committer=Person( name=b"committer-dude", email=b"committer@dude.com", fullname=b"committer-dude ", ), committer_date=TimestampWithTimezone( timestamp=Timestamp(seconds=1244567843, microseconds=220000,), - offset=-720, - negative_utc=False, + offset_bytes=b"-1200", ), parents=tuple([hg_revision3.id]), type=RevisionType.MERCURIAL, directory=directory.id, metadata=None, extra_headers=( (b"node", hash_to_bytes("f4160af0485c85823d9e829bae2c00b00a2e6297")), ), synthetic=False, ) hg_revisions: Tuple[Revision, ...] = ( hg_revision, hg_revision2, hg_revision3, hg_revision4, ) revisions: Tuple[Revision, ...] = git_revisions + hg_revisions origins: Tuple[Origin, ...] = ( Origin(url="https://github.com/user1/repo1"), Origin(url="https://github.com/user2/repo1"), Origin(url="https://github.com/user3/repo1"), Origin(url="https://gitlab.com/user1/repo1"), Origin(url="https://gitlab.com/user2/repo1"), Origin(url="https://forge.softwareheritage.org/source/repo1"), Origin(url="https://example.рф/🏛️.txt"), ) origin, origin2 = origins[:2] metadata_authority = MetadataAuthority( type=MetadataAuthorityType.DEPOSIT_CLIENT, url="http://hal.inria.example.com/", ) metadata_authority2 = MetadataAuthority( type=MetadataAuthorityType.REGISTRY, url="http://wikidata.example.com/", ) authorities: Tuple[MetadataAuthority, ...] = ( metadata_authority, metadata_authority2, ) metadata_fetcher = MetadataFetcher(name="swh-deposit", version="0.0.1",) metadata_fetcher2 = MetadataFetcher(name="swh-example", version="0.0.1",) fetchers: Tuple[MetadataFetcher, ...] = (metadata_fetcher, metadata_fetcher2) date_visit1 = datetime.datetime(2015, 1, 1, 23, 0, 0, tzinfo=datetime.timezone.utc) date_visit2 = datetime.datetime(2017, 1, 1, 23, 0, 0, tzinfo=datetime.timezone.utc) date_visit3 = datetime.datetime(2018, 1, 1, 23, 0, 0, tzinfo=datetime.timezone.utc) type_visit1 = "git" type_visit2 = "hg" type_visit3 = "deb" origin_visit = OriginVisit( origin=origin.url, visit=1, date=date_visit1, type=type_visit1, ) origin_visit2 = OriginVisit( origin=origin.url, visit=2, date=date_visit2, type=type_visit1, ) origin_visit3 = OriginVisit( origin=origin2.url, visit=1, date=date_visit1, type=type_visit2, ) origin_visits: Tuple[OriginVisit, ...] = ( origin_visit, origin_visit2, origin_visit3, ) release = Release( id=hash_to_bytes("f7f222093a18ec60d781070abec4a630c850b837"), name=b"v0.0.1", author=Person( name=b"olasd", email=b"nic@olasd.fr", fullname=b"olasd ", ), date=TimestampWithTimezone( timestamp=Timestamp(seconds=1234567890, microseconds=0), - offset=42, - negative_utc=False, + offset_bytes=b"+0042", ), target=revision.id, target_type=ObjectType.REVISION, message=b"synthetic release", synthetic=True, ) release2 = Release( id=hash_to_bytes("db81a26783a3f4a9db07b4759ffc37621f159bb2"), name=b"v0.0.2", author=Person( name=b"tony", email=b"ar@dumont.fr", fullname=b"tony ", ), date=TimestampWithTimezone( timestamp=Timestamp(seconds=1634366813, microseconds=0), - offset=-120, - negative_utc=False, + offset_bytes=b"-0200", ), target=revision2.id, target_type=ObjectType.REVISION, message=b"v0.0.2\nMisc performance improvements + bug fixes", synthetic=False, ) release3 = Release( id=hash_to_bytes("1c5d42e603ce2eea44917fadca76c78bad76aeb9"), name=b"v0.0.2", author=Person( name=b"tony", email=b"tony@ardumont.fr", fullname=b"tony ", ), date=TimestampWithTimezone( timestamp=Timestamp(seconds=1634366813, microseconds=0), - offset=-120, - negative_utc=False, + offset_bytes=b"-0200", ), target=revision3.id, target_type=ObjectType.REVISION, message=b"yet another synthetic release", synthetic=True, ) releases: Tuple[Release, ...] = (release, release2, release3) snapshot = Snapshot( id=hash_to_bytes("9b922e6d8d5b803c1582aabe5525b7b91150788e"), branches={ b"master": SnapshotBranch( target=revision.id, target_type=TargetType.REVISION, ), }, ) empty_snapshot = Snapshot( id=hash_to_bytes("1a8893e6a86f444e8be8e7bda6cb34fb1735a00e"), branches={}, ) complete_snapshot = Snapshot( id=hash_to_bytes("db99fda25b43dc5cd90625ee4b0744751799c917"), branches={ b"directory": SnapshotBranch( target=directory.id, target_type=TargetType.DIRECTORY, ), b"directory2": SnapshotBranch( target=directory2.id, target_type=TargetType.DIRECTORY, ), b"content": SnapshotBranch( target=content.sha1_git, target_type=TargetType.CONTENT, ), b"alias": SnapshotBranch(target=b"revision", target_type=TargetType.ALIAS,), b"revision": SnapshotBranch( target=revision.id, target_type=TargetType.REVISION, ), b"release": SnapshotBranch( target=release.id, target_type=TargetType.RELEASE, ), b"snapshot": SnapshotBranch( target=empty_snapshot.id, target_type=TargetType.SNAPSHOT, ), b"dangling": None, }, ) snapshots: Tuple[Snapshot, ...] = (snapshot, empty_snapshot, complete_snapshot) content_metadata1 = RawExtrinsicMetadata( target=ExtendedSWHID( object_type=ExtendedObjectType.CONTENT, object_id=content.sha1_git ), origin=origin.url, discovery_date=datetime.datetime( 2015, 1, 1, 21, 0, 0, tzinfo=datetime.timezone.utc ), authority=metadata_authority, fetcher=metadata_fetcher, format="json", metadata=b'{"foo": "bar"}', ) content_metadata2 = RawExtrinsicMetadata( target=ExtendedSWHID( object_type=ExtendedObjectType.CONTENT, object_id=content.sha1_git ), origin=origin2.url, discovery_date=datetime.datetime( 2017, 1, 1, 22, 0, 0, tzinfo=datetime.timezone.utc ), authority=metadata_authority, fetcher=metadata_fetcher, format="yaml", metadata=b"foo: bar", ) content_metadata3 = RawExtrinsicMetadata( target=ExtendedSWHID( object_type=ExtendedObjectType.CONTENT, object_id=content.sha1_git ), discovery_date=datetime.datetime( 2017, 1, 1, 22, 0, 0, tzinfo=datetime.timezone.utc ), authority=attr.evolve(metadata_authority2, metadata=None), fetcher=attr.evolve(metadata_fetcher2, metadata=None), format="yaml", metadata=b"foo: bar", origin=origin.url, visit=42, snapshot=snapshot.swhid(), release=release.swhid(), revision=revision.swhid(), directory=directory.swhid(), path=b"/foo/bar", ) content_metadata: Tuple[RawExtrinsicMetadata, ...] = ( content_metadata1, content_metadata2, content_metadata3, ) origin_metadata1 = RawExtrinsicMetadata( target=Origin(origin.url).swhid(), discovery_date=datetime.datetime( 2015, 1, 1, 21, 0, 0, tzinfo=datetime.timezone.utc ), authority=attr.evolve(metadata_authority, metadata=None), fetcher=attr.evolve(metadata_fetcher, metadata=None), format="json", metadata=b'{"foo": "bar"}', ) origin_metadata2 = RawExtrinsicMetadata( target=Origin(origin.url).swhid(), discovery_date=datetime.datetime( 2017, 1, 1, 22, 0, 0, tzinfo=datetime.timezone.utc ), authority=attr.evolve(metadata_authority, metadata=None), fetcher=attr.evolve(metadata_fetcher, metadata=None), format="yaml", metadata=b"foo: bar", ) origin_metadata3 = RawExtrinsicMetadata( target=Origin(origin.url).swhid(), discovery_date=datetime.datetime( 2017, 1, 1, 22, 0, 0, tzinfo=datetime.timezone.utc ), authority=attr.evolve(metadata_authority2, metadata=None), fetcher=attr.evolve(metadata_fetcher2, metadata=None), format="yaml", metadata=b"foo: bar", ) origin_metadata: Tuple[RawExtrinsicMetadata, ...] = ( origin_metadata1, origin_metadata2, origin_metadata3, ) extid1 = ExtID( target=CoreSWHID(object_type=SwhidObjectType.REVISION, object_id=revision.id), extid_type="git", extid=revision.id, ) extid2 = ExtID( target=CoreSWHID( object_type=SwhidObjectType.REVISION, object_id=hg_revision.id ), extid_type="mercurial", extid=hash_to_bytes("a316dfb434af2b451c1f393496b7eaeda343f543"), ) extid3 = ExtID( target=CoreSWHID(object_type=SwhidObjectType.DIRECTORY, object_id=directory.id), extid_type="directory", extid=b"something", ) extid4 = ExtID( target=CoreSWHID( object_type=SwhidObjectType.DIRECTORY, object_id=directory2.id ), extid_type="directory", extid=b"something", extid_version=2, ) extids: Tuple[ExtID, ...] = ( extid1, extid2, extid3, extid4, ) diff --git a/swh/storage/tests/storage_tests.py b/swh/storage/tests/storage_tests.py index 25be8307..dce4d8b8 100644 --- a/swh/storage/tests/storage_tests.py +++ b/swh/storage/tests/storage_tests.py @@ -1,4976 +1,4975 @@ # Copyright (C) 2015-2021 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from collections import defaultdict import datetime from datetime import timedelta import inspect import itertools import math import random from typing import Any, ClassVar, Dict, Iterator, Optional from unittest.mock import MagicMock import attr from hypothesis import HealthCheck, given, settings, strategies import pytest from swh.core.api import RemoteException from swh.core.api.classes import stream_results from swh.model import from_disk, hypothesis_strategies from swh.model.hashutil import DEFAULT_ALGORITHMS, hash_to_bytes from swh.model.model import ( Content, Directory, ExtID, Origin, OriginVisit, OriginVisitStatus, Person, RawExtrinsicMetadata, Revision, RevisionType, SkippedContent, Snapshot, SnapshotBranch, TargetType, Timestamp, TimestampWithTimezone, ) from swh.model.swhids import CoreSWHID, ObjectType from swh.storage import get_storage from swh.storage.cassandra.storage import CassandraStorage from swh.storage.common import origin_url_to_sha1 as sha1 from swh.storage.exc import HashCollision, StorageArgumentException from swh.storage.in_memory import InMemoryStorage from swh.storage.interface import ListOrder, PagedResult, StorageInterface from swh.storage.tests.conftest import function_scoped_fixture_check from swh.storage.utils import ( content_hex_hashes, now, remove_keys, round_to_milliseconds, ) def transform_entries( storage: StorageInterface, dir_: Directory, *, prefix: bytes = b"" ) -> Iterator[Dict[str, Any]]: """Iterate through a directory's entries, and yields the items 'directory_ls' is expected to return; including content metadata for file entries.""" for ent in dir_.entries: if ent.type == "dir": yield { "dir_id": dir_.id, "type": ent.type, "target": ent.target, "name": prefix + ent.name, "perms": ent.perms, "status": None, "sha1": None, "sha1_git": None, "sha256": None, "length": None, } elif ent.type == "file": contents = storage.content_find({"sha1_git": ent.target}) assert contents ent_dict = contents[0].to_dict() for key in ["ctime", "blake2s256"]: ent_dict.pop(key, None) ent_dict.update( { "dir_id": dir_.id, "type": ent.type, "target": ent.target, "name": prefix + ent.name, "perms": ent.perms, } ) yield ent_dict def assert_contents_ok( expected_contents, actual_contents, keys_to_check={"sha1", "data"} ): """Assert that a given list of contents matches on a given set of keys. """ for k in keys_to_check: expected_list = set([c.get(k) for c in expected_contents]) actual_list = set([c.get(k) for c in actual_contents]) assert actual_list == expected_list, k class LazyContent(Content): def with_data(self): return Content.from_dict({**self.to_dict(), "data": b"42\n"}) class TestStorage: """Main class for Storage testing. This class is used as-is to test local storage (see TestLocalStorage below) and remote storage (see TestRemoteStorage in test_remote_storage.py. We need to have the two classes inherit from this base class separately to avoid nosetests running the tests from the base class twice. """ maxDiff = None # type: ClassVar[Optional[int]] def test_types(self, swh_storage_backend_config): """Checks all methods of StorageInterface are implemented by this backend, and that they have the same signature.""" # Create an instance of the protocol (which cannot be instantiated # directly, so this creates a subclass, then instantiates it) interface = type("_", (StorageInterface,), {})() storage = get_storage(**swh_storage_backend_config) assert "content_add" in dir(interface) missing_methods = [] for meth_name in dir(interface): if meth_name.startswith("_"): continue interface_meth = getattr(interface, meth_name) try: concrete_meth = getattr(storage, meth_name) except AttributeError: if not getattr(interface_meth, "deprecated_endpoint", False): # The backend is missing a (non-deprecated) endpoint missing_methods.append(meth_name) continue expected_signature = inspect.signature(interface_meth) actual_signature = inspect.signature(concrete_meth) assert expected_signature == actual_signature, meth_name assert missing_methods == [] # If all the assertions above succeed, then this one should too. # But there's no harm in double-checking. # And we could replace the assertions above by this one, but unlike # the assertions above, it doesn't explain what is missing. assert isinstance(storage, StorageInterface) def test_check_config(self, swh_storage): assert swh_storage.check_config(check_write=True) assert swh_storage.check_config(check_write=False) def test_content_add(self, swh_storage, sample_data): cont = sample_data.content insertion_start_time = now() actual_result = swh_storage.content_add([cont]) insertion_end_time = now() assert actual_result == { "content:add": 1, "content:add:bytes": cont.length, } assert swh_storage.content_get_data(cont.sha1) == cont.data expected_cont = attr.evolve(cont, data=None) contents = [ obj for (obj_type, obj) in swh_storage.journal_writer.journal.objects if obj_type == "content" ] assert len(contents) == 1 for obj in contents: assert insertion_start_time <= obj.ctime assert obj.ctime <= insertion_end_time assert obj == expected_cont if isinstance(swh_storage, InMemoryStorage) or not isinstance( swh_storage, CassandraStorage ): swh_storage.refresh_stat_counters() assert swh_storage.stat_counters()["content"] == 1 def test_content_add_from_lazy_content(self, swh_storage, sample_data): cont = sample_data.content lazy_content = LazyContent.from_dict(cont.to_dict()) insertion_start_time = now() actual_result = swh_storage.content_add([lazy_content]) insertion_end_time = now() assert actual_result == { "content:add": 1, "content:add:bytes": cont.length, } # the fact that we retrieve the content object from the storage with # the correct 'data' field ensures it has been 'called' assert swh_storage.content_get_data(cont.sha1) == cont.data expected_cont = attr.evolve(lazy_content, data=None, ctime=None) contents = [ obj for (obj_type, obj) in swh_storage.journal_writer.journal.objects if obj_type == "content" ] assert len(contents) == 1 for obj in contents: assert insertion_start_time <= obj.ctime assert obj.ctime <= insertion_end_time assert attr.evolve(obj, ctime=None).to_dict() == expected_cont.to_dict() if isinstance(swh_storage, InMemoryStorage) or not isinstance( swh_storage, CassandraStorage ): swh_storage.refresh_stat_counters() assert swh_storage.stat_counters()["content"] == 1 def test_content_get_data_missing(self, swh_storage, sample_data): cont, cont2 = sample_data.contents[:2] swh_storage.content_add([cont]) # Query a single missing content actual_content_data = swh_storage.content_get_data(cont2.sha1) assert actual_content_data is None # Check content_get does not abort after finding a missing content actual_content_data = swh_storage.content_get_data(cont.sha1) assert actual_content_data == cont.data actual_content_data = swh_storage.content_get_data(cont2.sha1) assert actual_content_data is None def test_content_add_different_input(self, swh_storage, sample_data): cont, cont2 = sample_data.contents[:2] actual_result = swh_storage.content_add([cont, cont2]) assert actual_result == { "content:add": 2, "content:add:bytes": cont.length + cont2.length, } def test_content_add_twice(self, swh_storage, sample_data): cont, cont2 = sample_data.contents[:2] actual_result = swh_storage.content_add([cont]) assert actual_result == { "content:add": 1, "content:add:bytes": cont.length, } assert len(swh_storage.journal_writer.journal.objects) == 1 actual_result = swh_storage.content_add([cont, cont2]) assert actual_result == { "content:add": 1, "content:add:bytes": cont2.length, } assert 2 <= len(swh_storage.journal_writer.journal.objects) <= 3 assert len(swh_storage.content_find(cont.to_dict())) == 1 assert len(swh_storage.content_find(cont2.to_dict())) == 1 def test_content_add_collision(self, swh_storage, sample_data): cont1 = sample_data.content # create (corrupted) content with same sha1{,_git} but != sha256 sha256_array = bytearray(cont1.sha256) sha256_array[0] += 1 cont1b = attr.evolve(cont1, sha256=bytes(sha256_array)) with pytest.raises(HashCollision) as cm: swh_storage.content_add([cont1, cont1b]) exc = cm.value actual_algo = exc.algo assert actual_algo in ["sha1", "sha1_git"] actual_id = exc.hash_id assert actual_id == getattr(cont1, actual_algo).hex() collisions = exc.args[2] assert len(collisions) == 2 assert collisions == [ content_hex_hashes(cont1.hashes()), content_hex_hashes(cont1b.hashes()), ] assert exc.colliding_content_hashes() == [ cont1.hashes(), cont1b.hashes(), ] def test_content_add_duplicate(self, swh_storage, sample_data): cont = sample_data.content swh_storage.content_add([cont, cont]) assert swh_storage.content_get_data(cont.sha1) == cont.data def test_content_update(self, swh_storage, sample_data): cont1 = sample_data.content if hasattr(swh_storage, "journal_writer"): swh_storage.journal_writer.journal = None # TODO, not supported swh_storage.content_add([cont1]) # alter the sha1_git for example cont1b = attr.evolve( cont1, sha1_git=hash_to_bytes("3a60a5275d0333bf13468e8b3dcab90f4046e654") ) swh_storage.content_update([cont1b.to_dict()], keys=["sha1_git"]) actual_contents = swh_storage.content_get([cont1.sha1]) expected_content = attr.evolve(cont1b, data=None) assert actual_contents == [expected_content] def test_content_add_metadata(self, swh_storage, sample_data): cont = attr.evolve(sample_data.content, data=None, ctime=now()) actual_result = swh_storage.content_add_metadata([cont]) assert actual_result == { "content:add": 1, } expected_cont = cont assert swh_storage.content_get([cont.sha1]) == [expected_cont] contents = [ obj for (obj_type, obj) in swh_storage.journal_writer.journal.objects if obj_type == "content" ] assert len(contents) == 1 for obj in contents: obj = attr.evolve(obj, ctime=None) assert obj == cont def test_content_add_metadata_different_input(self, swh_storage, sample_data): contents = sample_data.contents[:2] cont = attr.evolve(contents[0], data=None, ctime=now()) cont2 = attr.evolve(contents[1], data=None, ctime=now()) actual_result = swh_storage.content_add_metadata([cont, cont2]) assert actual_result == { "content:add": 2, } def test_content_add_metadata_collision(self, swh_storage, sample_data): cont1 = attr.evolve(sample_data.content, data=None, ctime=now()) # create (corrupted) content with same sha1{,_git} but != sha256 sha1_git_array = bytearray(cont1.sha256) sha1_git_array[0] += 1 cont1b = attr.evolve(cont1, sha256=bytes(sha1_git_array)) with pytest.raises(HashCollision) as cm: swh_storage.content_add_metadata([cont1, cont1b]) exc = cm.value actual_algo = exc.algo assert actual_algo in ["sha1", "sha1_git", "blake2s256"] actual_id = exc.hash_id assert actual_id == getattr(cont1, actual_algo).hex() collisions = exc.args[2] assert len(collisions) == 2 assert collisions == [ content_hex_hashes(cont1.hashes()), content_hex_hashes(cont1b.hashes()), ] assert exc.colliding_content_hashes() == [ cont1.hashes(), cont1b.hashes(), ] def test_content_add_objstorage_first(self, swh_storage, sample_data): """Tests the objstorage is written to before the DB and journal""" cont = sample_data.content swh_storage.objstorage.content_add = MagicMock(side_effect=Exception("Oops")) # Try to add, but the objstorage crashes try: swh_storage.content_add([cont]) except Exception: pass # The DB must be written to after the objstorage, so the DB should be # unchanged if the objstorage crashed assert swh_storage.content_get_data(cont.sha1) is None # The journal too assert list(swh_storage.journal_writer.journal.objects) == [] def test_skipped_content_add(self, swh_storage, sample_data): contents = sample_data.skipped_contents[:2] cont = contents[0] cont2 = attr.evolve(contents[1], blake2s256=None) contents_dict = [c.to_dict() for c in [cont, cont2]] missing = list(swh_storage.skipped_content_missing(contents_dict)) assert missing == [cont.hashes(), cont2.hashes()] actual_result = swh_storage.skipped_content_add([cont, cont, cont2]) assert 2 <= actual_result.pop("skipped_content:add") <= 3 assert actual_result == {} missing = list(swh_storage.skipped_content_missing(contents_dict)) assert missing == [] def test_skipped_content_add_missing_hashes(self, swh_storage, sample_data): cont, cont2 = [ attr.evolve(c, sha1_git=None) for c in sample_data.skipped_contents[:2] ] contents_dict = [c.to_dict() for c in [cont, cont2]] missing = list(swh_storage.skipped_content_missing(contents_dict)) assert len(missing) == 2 actual_result = swh_storage.skipped_content_add([cont, cont, cont2]) assert 2 <= actual_result.pop("skipped_content:add") <= 3 assert actual_result == {} missing = list(swh_storage.skipped_content_missing(contents_dict)) assert missing == [] def test_skipped_content_missing_partial_hash(self, swh_storage, sample_data): cont = sample_data.skipped_content cont2 = attr.evolve(cont, sha1_git=None) contents_dict = [c.to_dict() for c in [cont, cont2]] missing = list(swh_storage.skipped_content_missing(contents_dict)) assert len(missing) == 2 actual_result = swh_storage.skipped_content_add([cont]) assert actual_result.pop("skipped_content:add") == 1 assert actual_result == {} missing = list(swh_storage.skipped_content_missing(contents_dict)) assert missing == [cont2.hashes()] @pytest.mark.property_based @settings( deadline=None, # this test is very slow suppress_health_check=function_scoped_fixture_check, ) @given( strategies.sets( elements=strategies.sampled_from(["sha256", "sha1_git", "blake2s256"]), min_size=0, ) ) def test_content_missing(self, swh_storage, sample_data, algos): algos |= {"sha1"} content, missing_content = [sample_data.content2, sample_data.skipped_content] swh_storage.content_add([content]) test_contents = [content.to_dict()] missing_per_hash = defaultdict(list) for i in range(256): test_content = missing_content.to_dict() for hash in algos: test_content[hash] = bytes([i]) + test_content[hash][1:] missing_per_hash[hash].append(test_content[hash]) test_contents.append(test_content) assert set(swh_storage.content_missing(test_contents)) == set( missing_per_hash["sha1"] ) for hash in algos: assert set( swh_storage.content_missing(test_contents, key_hash=hash) ) == set(missing_per_hash[hash]) @pytest.mark.property_based @settings(suppress_health_check=function_scoped_fixture_check,) @given( strategies.sets( elements=strategies.sampled_from(["sha256", "sha1_git", "blake2s256"]), min_size=0, ) ) def test_content_missing_unknown_algo(self, swh_storage, sample_data, algos): algos |= {"sha1"} content, missing_content = [sample_data.content2, sample_data.skipped_content] swh_storage.content_add([content]) test_contents = [content.to_dict()] missing_per_hash = defaultdict(list) for i in range(16): test_content = missing_content.to_dict() for hash in algos: test_content[hash] = bytes([i]) + test_content[hash][1:] missing_per_hash[hash].append(test_content[hash]) test_content["nonexisting_algo"] = b"\x00" test_contents.append(test_content) assert set(swh_storage.content_missing(test_contents)) == set( missing_per_hash["sha1"] ) for hash in algos: assert set( swh_storage.content_missing(test_contents, key_hash=hash) ) == set(missing_per_hash[hash]) def test_content_missing_per_sha1(self, swh_storage, sample_data): # given cont = sample_data.content cont2 = sample_data.content2 missing_cont = sample_data.skipped_content missing_cont2 = sample_data.skipped_content2 swh_storage.content_add([cont, cont2]) # when gen = swh_storage.content_missing_per_sha1( [cont.sha1, missing_cont.sha1, cont2.sha1, missing_cont2.sha1] ) # then assert list(gen) == [missing_cont.sha1, missing_cont2.sha1] def test_content_missing_per_sha1_git(self, swh_storage, sample_data): cont, cont2 = sample_data.contents[:2] missing_cont = sample_data.skipped_content missing_cont2 = sample_data.skipped_content2 swh_storage.content_add([cont, cont2]) contents = [ cont.sha1_git, cont2.sha1_git, missing_cont.sha1_git, missing_cont2.sha1_git, ] missing_contents = swh_storage.content_missing_per_sha1_git(contents) assert list(missing_contents) == [missing_cont.sha1_git, missing_cont2.sha1_git] missing_contents = swh_storage.content_missing_per_sha1_git([]) assert list(missing_contents) == [] def test_content_get_partition(self, swh_storage, swh_contents): """content_get_partition paginates results if limit exceeded""" expected_contents = [ attr.evolve(c, data=None) for c in swh_contents if c.status != "absent" ] actual_contents = [] for i in range(16): actual_result = swh_storage.content_get_partition(i, 16) assert actual_result.next_page_token is None actual_contents.extend(actual_result.results) assert len(actual_contents) == len(expected_contents) for content in actual_contents: assert content in expected_contents assert content.ctime is None def test_content_get_partition_full(self, swh_storage, swh_contents): """content_get_partition for a single partition returns all available contents """ expected_contents = [ attr.evolve(c, data=None) for c in swh_contents if c.status != "absent" ] actual_result = swh_storage.content_get_partition(0, 1) assert actual_result.next_page_token is None actual_contents = actual_result.results assert len(actual_contents) == len(expected_contents) for content in actual_contents: assert content in expected_contents def test_content_get_partition_empty(self, swh_storage, swh_contents): """content_get_partition when at least one of the partitions is empty""" expected_contents = { cont.sha1 for cont in swh_contents if cont.status != "absent" } # nb_partitions = smallest power of 2 such that at least one of # the partitions is empty nb_partitions = 1 << math.floor(math.log2(len(swh_contents)) + 1) seen_sha1s = [] for i in range(nb_partitions): actual_result = swh_storage.content_get_partition( i, nb_partitions, limit=len(swh_contents) + 1 ) for content in actual_result.results: seen_sha1s.append(content.sha1) # Limit is higher than the max number of results assert actual_result.next_page_token is None assert set(seen_sha1s) == expected_contents def test_content_get_partition_limit_none(self, swh_storage): """content_get_partition call with wrong limit input should fail""" with pytest.raises(StorageArgumentException, match="limit should not be None"): swh_storage.content_get_partition(1, 16, limit=None) def test_content_get_partition_pagination_generate(self, swh_storage, swh_contents): """content_get_partition returns contents within range provided""" expected_contents = [ attr.evolve(c, data=None) for c in swh_contents if c.status != "absent" ] # retrieve contents actual_contents = [] for i in range(4): page_token = None while True: actual_result = swh_storage.content_get_partition( i, 4, limit=3, page_token=page_token ) actual_contents.extend(actual_result.results) page_token = actual_result.next_page_token if page_token is None: break assert len(actual_contents) == len(expected_contents) for content in actual_contents: assert content in expected_contents @pytest.mark.parametrize("algo", sorted(DEFAULT_ALGORITHMS)) def test_content_get(self, swh_storage, sample_data, algo): cont1, cont2 = sample_data.contents[:2] swh_storage.content_add([cont1, cont2]) actual_contents = swh_storage.content_get( [getattr(cont1, algo), getattr(cont2, algo)], algo ) # we only retrieve the metadata so no data nor ctime within expected_contents = [attr.evolve(c, data=None) for c in [cont1, cont2]] assert actual_contents == expected_contents for content in actual_contents: assert content.ctime is None @pytest.mark.parametrize("algo", sorted(DEFAULT_ALGORITHMS)) def test_content_get_missing(self, swh_storage, sample_data, algo): cont1, cont2 = sample_data.contents[:2] assert cont1.sha1 != cont2.sha1 missing_cont = sample_data.skipped_content swh_storage.content_add([cont1, cont2]) actual_contents = swh_storage.content_get( [getattr(cont1, algo), getattr(cont2, algo), getattr(missing_cont, algo)], algo, ) expected_contents = [ attr.evolve(c, data=None) if c else None for c in [cont1, cont2, None] ] assert actual_contents == expected_contents def test_content_get_random(self, swh_storage, sample_data): cont, cont2, cont3 = sample_data.contents[:3] swh_storage.content_add([cont, cont2, cont3]) assert swh_storage.content_get_random() in { cont.sha1_git, cont2.sha1_git, cont3.sha1_git, } def test_directory_add(self, swh_storage, sample_data): content = sample_data.content directory = sample_data.directory assert directory.entries[0].target == content.sha1_git swh_storage.content_add([content]) init_missing = list(swh_storage.directory_missing([directory.id])) assert [directory.id] == init_missing actual_result = swh_storage.directory_add([directory]) assert actual_result == {"directory:add": 1} assert ("directory", directory) in list( swh_storage.journal_writer.journal.objects ) actual_data = list(swh_storage.directory_ls(directory.id)) expected_data = list(transform_entries(swh_storage, directory)) for data in actual_data: assert data in expected_data after_missing = list(swh_storage.directory_missing([directory.id])) assert after_missing == [] if isinstance(swh_storage, InMemoryStorage) or not isinstance( swh_storage, CassandraStorage ): swh_storage.refresh_stat_counters() assert swh_storage.stat_counters()["directory"] == 1 def test_directory_add_with_raw_manifest(self, swh_storage, sample_data): content = sample_data.content directory = sample_data.directory directory = attr.evolve(directory, raw_manifest=b"foo") directory = attr.evolve(directory, id=directory.compute_hash()) assert directory.entries[0].target == content.sha1_git swh_storage.content_add([content]) init_missing = list(swh_storage.directory_missing([directory.id])) assert [directory.id] == init_missing actual_result = swh_storage.directory_add([directory]) assert actual_result == {"directory:add": 1} assert ("directory", directory) in list( swh_storage.journal_writer.journal.objects ) actual_data = list(swh_storage.directory_ls(directory.id)) expected_data = list(transform_entries(swh_storage, directory)) for data in actual_data: assert data in expected_data after_missing = list(swh_storage.directory_missing([directory.id])) assert after_missing == [] # TODO: check the recorded manifest @settings( suppress_health_check=[HealthCheck.too_slow, HealthCheck.data_too_large] + function_scoped_fixture_check, ) @given( strategies.lists(hypothesis_strategies.directories(), min_size=1, max_size=10) ) def test_directory_add_get_arbitrary(self, swh_storage, directories): swh_storage.directory_add(directories) for directory in directories: if directory.raw_manifest is None: assert swh_storage.directory_get_entries(directory.id) == PagedResult( results=list(directory.entries), next_page_token=None, ) else: # TODO: compare the manifests are the same (currently, we can't # because there is no way to get the raw_manifest of a directory) # we can't compare the other fields, because they become non-intrinsic, # so they may clash between hypothesis runs pass def test_directory_add_twice(self, swh_storage, sample_data): directory = sample_data.directories[1] actual_result = swh_storage.directory_add([directory]) assert actual_result == {"directory:add": 1} assert list(swh_storage.journal_writer.journal.objects) == [ ("directory", directory) ] actual_result = swh_storage.directory_add([directory]) assert actual_result == {"directory:add": 0} assert list(swh_storage.journal_writer.journal.objects) == [ ("directory", directory) ] def test_directory_ls_recursive(self, swh_storage, sample_data): # create consistent dataset regarding the directories we want to list content, content2 = sample_data.contents[:2] swh_storage.content_add([content, content2]) dir1, dir2, dir3 = sample_data.directories[:3] dir_ids = [d.id for d in [dir1, dir2, dir3]] init_missing = list(swh_storage.directory_missing(dir_ids)) assert init_missing == dir_ids actual_result = swh_storage.directory_add([dir1, dir2, dir3]) assert actual_result == {"directory:add": 3} # List directory containing one file actual_data = list(swh_storage.directory_ls(dir1.id, recursive=True)) expected_data = list(transform_entries(swh_storage, dir1)) for data in actual_data: assert data in expected_data # List directory containing a file and an unknown subdirectory actual_data = list(swh_storage.directory_ls(dir2.id, recursive=True)) expected_data = list(transform_entries(swh_storage, dir2)) for data in actual_data: assert data in expected_data # List directory containing both a known and unknown subdirectory, entries # should be both those of the directory and of the known subdir (up to contents) actual_data = list(swh_storage.directory_ls(dir3.id, recursive=True)) expected_data = list( itertools.chain( transform_entries(swh_storage, dir3), transform_entries(swh_storage, dir2, prefix=b"subdir/"), ) ) for data in actual_data: assert data in expected_data def test_directory_ls_non_recursive(self, swh_storage, sample_data): # create consistent dataset regarding the directories we want to list content, content2 = sample_data.contents[:2] swh_storage.content_add([content, content2]) dir1, dir2, dir3, _, dir5 = sample_data.directories[:5] dir_ids = [d.id for d in [dir1, dir2, dir3, dir5]] init_missing = list(swh_storage.directory_missing(dir_ids)) assert init_missing == dir_ids actual_result = swh_storage.directory_add([dir1, dir2, dir3, dir5]) assert actual_result == {"directory:add": 4} # List directory containing a file and an unknown subdirectory actual_data = list(swh_storage.directory_ls(dir1.id)) expected_data = list(transform_entries(swh_storage, dir1)) for data in actual_data: assert data in expected_data # List directory containing a single file actual_data = list(swh_storage.directory_ls(dir2.id)) expected_data = list(transform_entries(swh_storage, dir2)) for data in actual_data: assert data in expected_data # List directory containing a known subdirectory, entries should # only be those of the parent directory, not of the subdir actual_data = list(swh_storage.directory_ls(dir3.id)) expected_data = list(transform_entries(swh_storage, dir3)) for data in actual_data: assert data in expected_data def test_directory_ls_missing_content(self, swh_storage, sample_data): swh_storage.directory_add([sample_data.directory2]) assert list(swh_storage.directory_ls(sample_data.directory2.id)) == [ { "dir_id": sample_data.directory2.id, "length": None, "name": b"oof", "perms": 33188, "sha1": None, "sha1_git": None, "sha256": None, "status": None, "target": sample_data.directory2.entries[0].target, "type": "file", }, ] def test_directory_ls_skipped_content(self, swh_storage, sample_data): swh_storage.directory_add([sample_data.directory2]) cont = SkippedContent( sha1_git=sample_data.directory2.entries[0].target, sha1=b"c" * 20, sha256=None, blake2s256=None, length=42, status="absent", reason="You need a premium subscription to access this content", ) swh_storage.skipped_content_add([cont]) assert list(swh_storage.directory_ls(sample_data.directory2.id)) == [ { "dir_id": sample_data.directory2.id, "length": 42, "name": b"oof", "perms": 33188, "sha1": b"c" * 20, "sha1_git": sample_data.directory2.entries[0].target, "sha256": None, "status": "absent", "target": sample_data.directory2.entries[0].target, "type": "file", }, ] def test_directory_entry_get_by_path(self, swh_storage, sample_data): cont, content2 = sample_data.contents[:2] dir1, dir2, dir3, dir4, dir5 = sample_data.directories[:5] # given dir_ids = [d.id for d in [dir1, dir2, dir3, dir4, dir5]] init_missing = list(swh_storage.directory_missing(dir_ids)) assert init_missing == dir_ids actual_result = swh_storage.directory_add([dir3, dir4]) assert actual_result == {"directory:add": 2} expected_entries = [ { "dir_id": dir3.id, "name": b"foo", "type": "file", "target": cont.sha1_git, "sha1": None, "sha1_git": None, "sha256": None, "status": None, "perms": from_disk.DentryPerms.content, "length": None, }, { "dir_id": dir3.id, "name": b"subdir", "type": "dir", "target": dir2.id, "sha1": None, "sha1_git": None, "sha256": None, "status": None, "perms": from_disk.DentryPerms.directory, "length": None, }, { "dir_id": dir3.id, "name": b"hello", "type": "file", "target": content2.sha1_git, "sha1": None, "sha1_git": None, "sha256": None, "status": None, "perms": from_disk.DentryPerms.content, "length": None, }, ] # when (all must be found here) for entry, expected_entry in zip(dir3.entries, expected_entries): actual_entry = swh_storage.directory_entry_get_by_path( dir3.id, [entry.name] ) assert actual_entry == expected_entry # same, but deeper for entry, expected_entry in zip(dir3.entries, expected_entries): actual_entry = swh_storage.directory_entry_get_by_path( dir4.id, [b"subdir1", entry.name] ) expected_entry = expected_entry.copy() expected_entry["name"] = b"subdir1/" + expected_entry["name"] assert actual_entry == expected_entry # when (nothing should be found here since `dir` is not persisted.) for entry in dir2.entries: actual_entry = swh_storage.directory_entry_get_by_path( dir2.id, [entry.name] ) assert actual_entry is None def test_directory_get_entries_pagination(self, swh_storage, sample_data): dir_ = sample_data.directory3 entries = sorted(dir_.entries, key=lambda entry: entry.name) swh_storage.directory_add(sample_data.directories) # No pagination needed actual_data = swh_storage.directory_get_entries(dir_.id) assert sorted(actual_data.results) == sorted(entries) assert actual_data.next_page_token is None, actual_data # A little pagination actual_data = swh_storage.directory_get_entries(dir_.id, limit=2) assert len(actual_data.results) == 2, actual_data assert actual_data.next_page_token is not None, actual_data all_results = list(actual_data.results) actual_data = swh_storage.directory_get_entries( dir_.id, page_token=actual_data.next_page_token ) assert len(actual_data.results) == len(entries) - 2, actual_data assert actual_data.next_page_token is None, actual_data all_results.extend(actual_data.results) assert sorted(all_results) == sorted(entries) @pytest.mark.parametrize("limit", [1, 2, 3, 4, 5]) def test_directory_get_entries(self, swh_storage, sample_data, limit): dir_ = sample_data.directory3 swh_storage.directory_add(sample_data.directories) actual_data = list( stream_results(swh_storage.directory_get_entries, dir_.id, limit=limit,) ) assert sorted(actual_data) == sorted(dir_.entries) def test_directory_get_random(self, swh_storage, sample_data): dir1, dir2, dir3 = sample_data.directories[:3] swh_storage.directory_add([dir1, dir2, dir3]) assert swh_storage.directory_get_random() in { dir1.id, dir2.id, dir3.id, } def test_revision_add(self, swh_storage, sample_data): revision = sample_data.revision init_missing = swh_storage.revision_missing([revision.id]) assert list(init_missing) == [revision.id] actual_result = swh_storage.revision_add([revision]) assert actual_result == {"revision:add": 1} end_missing = swh_storage.revision_missing([revision.id]) assert list(end_missing) == [] assert list(swh_storage.journal_writer.journal.objects) == [ ("revision", revision) ] # already there so nothing added actual_result = swh_storage.revision_add([revision]) assert actual_result == {"revision:add": 0} if isinstance(swh_storage, InMemoryStorage) or not isinstance( swh_storage, CassandraStorage ): swh_storage.refresh_stat_counters() assert swh_storage.stat_counters()["revision"] == 1 def test_revision_add_twice(self, swh_storage, sample_data): revision, revision2 = sample_data.revisions[:2] actual_result = swh_storage.revision_add([revision]) assert actual_result == {"revision:add": 1} assert list(swh_storage.journal_writer.journal.objects) == [ ("revision", revision) ] actual_result = swh_storage.revision_add([revision, revision2]) assert actual_result == {"revision:add": 1} assert list(swh_storage.journal_writer.journal.objects) == [ ("revision", revision), ("revision", revision2), ] def test_revision_add_fractional_timezone(self, swh_storage, sample_data): # When reading a date from this time period on systems configured with # timezone Europe/Paris, postgresql returns them with UTC+00:09:21 as timezone, # but psycopg2 < 2.9.0 had to truncate them. # https://www.psycopg.org/docs/usage.html#time-zones-handling # # There is a workaround in swh.storage.postgresql.storage.Storage.get_db, # to set the timezone to UTC so it works on all psycopg2 versions. # # Therefore, this test always succeeds in tox (because psycopg2 >= 2.9.0) # and on the CI (both because psycopg2 >= 2.9.0 and TZ=UTC); but which means # this test is only useful on machines with older psycopg2 versions and # TZ=Europe/Paris. But the workaround is also only needed on this kind of # configuration, so this is good enough. revision = attr.evolve( sample_data.revision, date=TimestampWithTimezone( timestamp=Timestamp(seconds=-1855958962, microseconds=0), - offset=0, - negative_utc=False, + offset_bytes=b"+0000", ), ) init_missing = swh_storage.revision_missing([revision.id]) assert list(init_missing) == [revision.id] actual_result = swh_storage.revision_add([revision]) assert actual_result == {"revision:add": 1} end_missing = swh_storage.revision_missing([revision.id]) assert list(end_missing) == [] assert list(swh_storage.journal_writer.journal.objects) == [ ("revision", revision) ] assert swh_storage.revision_get([revision.id])[0] == revision def test_revision_add_with_raw_manifest(self, swh_storage, sample_data): revision = sample_data.revision revision = attr.evolve(revision, raw_manifest=b"foo") revision = attr.evolve(revision, id=revision.compute_hash()) init_missing = swh_storage.revision_missing([revision.id]) assert list(init_missing) == [revision.id] actual_result = swh_storage.revision_add([revision]) assert actual_result == {"revision:add": 1} end_missing = swh_storage.revision_missing([revision.id]) assert list(end_missing) == [] assert list(swh_storage.journal_writer.journal.objects) == [ ("revision", revision) ] assert swh_storage.revision_get([revision.id]) == [revision] @settings( suppress_health_check=[HealthCheck.too_slow, HealthCheck.data_too_large] + function_scoped_fixture_check, ) @given( strategies.lists(hypothesis_strategies.revisions(), min_size=1, max_size=10,) ) def test_revision_add_get_arbitrary(self, swh_storage, revisions): # remove non-intrinsic data, so releases inserted with different hypothesis # data can't clash with each other revisions = [ attr.evolve( revision, synthetic=False, metadata=None, committer=attr.evolve(revision.committer, name=None, email=None), author=attr.evolve(revision.author, name=None, email=None), type=RevisionType.GIT, ) for revision in revisions ] swh_storage.revision_add(revisions) for revision in revisions: (rev,) = swh_storage.revision_get([revision.id]) if rev.raw_manifest is None: assert rev == revision else: assert rev.raw_manifest == revision.raw_manifest # we can't compare the other fields, because they become non-intrinsic, # so they may clash between hypothesis runs def test_revision_add_name_clash(self, swh_storage, sample_data): revision, revision2 = sample_data.revisions[:2] revision1 = attr.evolve( revision, author=Person( fullname=b"John Doe ", name=b"John Doe", email=b"john.doe@example.com", ), ) revision2 = attr.evolve( revision2, author=Person( fullname=b"John Doe ", name=b"John Doe ", email=b"john.doe@example.com ", ), ) actual_result = swh_storage.revision_add([revision1, revision2]) assert actual_result == {"revision:add": 2} def test_revision_get_order(self, swh_storage, sample_data): revision, revision2 = sample_data.revisions[:2] add_result = swh_storage.revision_add([revision, revision2]) assert add_result == {"revision:add": 2} # order 1 actual_revisions = swh_storage.revision_get([revision.id, revision2.id]) assert actual_revisions == [revision, revision2] # order 2 actual_revisions2 = swh_storage.revision_get([revision2.id, revision.id]) assert actual_revisions2 == [revision2, revision] def test_revision_log(self, swh_storage, sample_data): revision1, revision2, revision3, revision4 = sample_data.revisions[:4] # rev4 -is-child-of-> rev3 -> rev1, (rev2 -> rev1) swh_storage.revision_add([revision1, revision2, revision3, revision4]) # when results = list(swh_storage.revision_log([revision4.id])) # for comparison purposes actual_results = [Revision.from_dict(r) for r in results] assert len(actual_results) == 4 # rev4 -child-> rev3 -> rev1, (rev2 -> rev1) assert actual_results == [revision4, revision3, revision1, revision2] def test_revision_log_with_limit(self, swh_storage, sample_data): revision1, revision2, revision3, revision4 = sample_data.revisions[:4] # revision4 -is-child-of-> revision3 swh_storage.revision_add([revision3, revision4]) results = list(swh_storage.revision_log([revision4.id], 1)) actual_results = [Revision.from_dict(r) for r in results] assert len(actual_results) == 1 assert actual_results[0] == revision4 def test_revision_log_unknown_revision(self, swh_storage, sample_data): revision = sample_data.revision rev_log = list(swh_storage.revision_log([revision.id])) assert rev_log == [] def test_revision_shortlog(self, swh_storage, sample_data): revision1, revision2, revision3, revision4 = sample_data.revisions[:4] # rev4 -is-child-of-> rev3 -> (rev1, rev2); rev2 -> rev1 swh_storage.revision_add([revision1, revision2, revision3, revision4]) results = list(swh_storage.revision_shortlog([revision4.id])) actual_results = [[id, tuple(parents)] for (id, parents) in results] assert len(actual_results) == 4 assert actual_results == [ [revision4.id, revision4.parents], [revision3.id, revision3.parents], [revision1.id, revision1.parents], [revision2.id, revision2.parents], ] def test_revision_shortlog_with_limit(self, swh_storage, sample_data): revision1, revision2, revision3, revision4 = sample_data.revisions[:4] # revision4 -is-child-of-> revision3 swh_storage.revision_add([revision1, revision2, revision3, revision4]) results = list(swh_storage.revision_shortlog([revision4.id], 1)) actual_results = [[id, tuple(parents)] for (id, parents) in results] assert len(actual_results) == 1 assert list(actual_results[0]) == [revision4.id, revision4.parents] def test_revision_get(self, swh_storage, sample_data): revision, revision2 = sample_data.revisions[:2] swh_storage.revision_add([revision]) actual_revisions = swh_storage.revision_get([revision.id, revision2.id]) assert len(actual_revisions) == 2 assert actual_revisions == [revision, None] def test_revision_get_no_parents(self, swh_storage, sample_data): revision = sample_data.revision swh_storage.revision_add([revision]) actual_revision = swh_storage.revision_get([revision.id])[0] assert revision.parents == () assert actual_revision.parents == () # no parents on this one def test_revision_get_random(self, swh_storage, sample_data): revision1, revision2, revision3 = sample_data.revisions[:3] swh_storage.revision_add([revision1, revision2, revision3]) assert swh_storage.revision_get_random() in { revision1.id, revision2.id, revision3.id, } def test_revision_missing_many(self, swh_storage, sample_data): """Large number of revision ids to check can cause ScyllaDB to reject queries.""" revision = sample_data.revision ids = [bytes([b1, b2]) * 10 for b1 in range(256) for b2 in range(10)] ids.append(revision.id) ids.sort() init_missing = swh_storage.revision_missing(ids) assert set(init_missing) == set(ids) actual_result = swh_storage.revision_add([revision]) assert actual_result == {"revision:add": 1} end_missing = swh_storage.revision_missing(ids) assert set(end_missing) == set(ids) - {revision.id} def test_extid_add_git(self, swh_storage, sample_data): gitids = [ revision.id for revision in sample_data.revisions if revision.type.value == "git" ] extids = [ ExtID( extid=gitid, extid_type="git", target=CoreSWHID(object_id=gitid, object_type=ObjectType.REVISION,), ) for gitid in gitids ] assert swh_storage.extid_get_from_extid("git", gitids) == [] assert swh_storage.extid_get_from_target(ObjectType.REVISION, gitids) == [] summary = swh_storage.extid_add(extids) assert summary == {"extid:add": len(gitids)} assert swh_storage.extid_get_from_extid("git", gitids) == extids assert swh_storage.extid_get_from_target(ObjectType.REVISION, gitids) == extids assert swh_storage.extid_get_from_extid("hg", gitids) == [] assert swh_storage.extid_get_from_target(ObjectType.RELEASE, gitids) == [] # check ExtIDs have been added to the journal extids_in_journal = [ obj for (obj_type, obj) in swh_storage.journal_writer.journal.objects if obj_type == "extid" ] assert extids == extids_in_journal def test_extid_add_hg(self, swh_storage, sample_data): def get_node(revision): node = None if revision.extra_headers: node = dict(revision.extra_headers).get(b"node") if node is None and revision.metadata: node = hash_to_bytes(revision.metadata.get("node")) return node swhids = [ revision.id for revision in sample_data.revisions if revision.type.value == "hg" ] extids = [ get_node(revision) for revision in sample_data.revisions if revision.type.value == "hg" ] assert swh_storage.extid_get_from_extid("hg", extids) == [] assert swh_storage.extid_get_from_target(ObjectType.REVISION, swhids) == [] extid_objs = [ ExtID( extid=hgid, extid_type="hg", extid_version=1, target=CoreSWHID(object_id=swhid, object_type=ObjectType.REVISION,), ) for hgid, swhid in zip(extids, swhids) ] summary = swh_storage.extid_add(extid_objs) assert summary == {"extid:add": len(swhids)} assert swh_storage.extid_get_from_extid("hg", extids) == extid_objs assert ( swh_storage.extid_get_from_target(ObjectType.REVISION, swhids) == extid_objs ) assert swh_storage.extid_get_from_extid("git", extids) == [] assert swh_storage.extid_get_from_target(ObjectType.RELEASE, swhids) == [] # check ExtIDs have been added to the journal extids_in_journal = [ obj for (obj_type, obj) in swh_storage.journal_writer.journal.objects if obj_type == "extid" ] assert extid_objs == extids_in_journal def test_extid_add_twice(self, swh_storage, sample_data): gitids = [ revision.id for revision in sample_data.revisions if revision.type.value == "git" ] extids = [ ExtID( extid=gitid, extid_type="git", target=CoreSWHID(object_id=gitid, object_type=ObjectType.REVISION,), ) for gitid in gitids ] summary = swh_storage.extid_add(extids) assert summary == {"extid:add": len(gitids)} # add them again, should be noop summary = swh_storage.extid_add(extids) # assert summary == {"extid:add": 0} assert swh_storage.extid_get_from_extid("git", gitids) == extids assert swh_storage.extid_get_from_target(ObjectType.REVISION, gitids) == extids def test_extid_add_extid_multicity(self, swh_storage, sample_data): ids = [ revision.id for revision in sample_data.revisions if revision.type.value == "git" ] extids = [ ExtID( extid=extid, extid_type="git", extid_version=2, target=CoreSWHID(object_id=extid, object_type=ObjectType.REVISION,), ) for extid in ids ] swh_storage.extid_add(extids) # try to add "modified-extid" versions, should be added extids2 = [ ExtID( extid=extid, extid_type="hg", extid_version=2, target=CoreSWHID(object_id=extid, object_type=ObjectType.REVISION,), ) for extid in ids ] swh_storage.extid_add(extids2) assert swh_storage.extid_get_from_extid("git", ids) == extids assert swh_storage.extid_get_from_extid("hg", ids) == extids2 assert set(swh_storage.extid_get_from_target(ObjectType.REVISION, ids)) == { *extids, *extids2, } def test_extid_add_target_multicity(self, swh_storage, sample_data): ids = [ revision.id for revision in sample_data.revisions if revision.type.value == "git" ] extids = [ ExtID( extid=extid, extid_type="git", target=CoreSWHID(object_id=extid, object_type=ObjectType.REVISION,), ) for extid in ids ] swh_storage.extid_add(extids) # try to add "modified" versions, should be added extids2 = [ ExtID( extid=extid, extid_type="git", target=CoreSWHID(object_id=extid, object_type=ObjectType.RELEASE,), ) for extid in ids ] swh_storage.extid_add(extids2) assert set(swh_storage.extid_get_from_extid("git", ids)) == {*extids, *extids2} assert swh_storage.extid_get_from_target(ObjectType.REVISION, ids) == extids assert swh_storage.extid_get_from_target(ObjectType.RELEASE, ids) == extids2 def test_extid_version_behavior(self, swh_storage, sample_data): ids = [ revision.id for revision in sample_data.revisions if revision.type.value == "git" ] # Insert extids with several different versions extids = [ ExtID( extid=extid, extid_type="git", extid_version=0, target=CoreSWHID(object_id=extid, object_type=ObjectType.REVISION,), ) for extid in ids ] + [ ExtID( extid=extid, extid_type="git", extid_version=1, target=CoreSWHID(object_id=extid, object_type=ObjectType.REVISION,), ) for extid in ids ] swh_storage.extid_add(extids) # Check that both versions get returned for git_id in ids: objs = swh_storage.extid_get_from_extid("git", [git_id]) assert len(objs) == 2 assert set(obj.extid_version for obj in objs) == {0, 1} for swhid in ids: objs = swh_storage.extid_get_from_target(ObjectType.REVISION, [swhid]) assert len(objs) == 2 assert set(obj.extid_version for obj in objs) == {0, 1} for version in [0, 1]: for git_id in ids: objs = swh_storage.extid_get_from_extid( "git", [git_id], version=version ) assert len(objs) == 1 assert objs[0].extid_version == version for swhid in ids: objs = swh_storage.extid_get_from_target( ObjectType.REVISION, [swhid], extid_version=version, extid_type="git", ) assert len(objs) == 1 assert objs[0].extid_version == version assert objs[0].extid_type == "git" def test_extid_version_behavior_failure(self, swh_storage, sample_data): """Calls with wrong input should raise""" ids = [ revision.id for revision in sample_data.revisions if revision.type.value == "git" ] # Other edge cases with pytest.raises( (ValueError, RemoteException), match="both extid_type and extid_version" ): swh_storage.extid_get_from_target( ObjectType.REVISION, [ids[0]], extid_version=0 ) with pytest.raises( (ValueError, RemoteException), match="both extid_type and extid_version" ): swh_storage.extid_get_from_target( ObjectType.REVISION, [ids[0]], extid_type="git" ) def test_release_add(self, swh_storage, sample_data): release, release2 = sample_data.releases[:2] init_missing = swh_storage.release_missing([release.id, release2.id]) assert list(init_missing) == [release.id, release2.id] actual_result = swh_storage.release_add([release, release2]) assert actual_result == {"release:add": 2} end_missing = swh_storage.release_missing([release.id, release2.id]) assert list(end_missing) == [] assert list(swh_storage.journal_writer.journal.objects) == [ ("release", release), ("release", release2), ] # already present so nothing added actual_result = swh_storage.release_add([release, release2]) assert actual_result == {"release:add": 0} if isinstance(swh_storage, InMemoryStorage) or not isinstance( swh_storage, CassandraStorage ): swh_storage.refresh_stat_counters() assert swh_storage.stat_counters()["release"] == 2 def test_release_add_with_raw_manifest(self, swh_storage, sample_data): release = sample_data.releases[0] release = attr.evolve(release, raw_manifest=b"foo") release = attr.evolve(release, id=release.compute_hash()) init_missing = swh_storage.release_missing([release.id]) assert list(init_missing) == [release.id] actual_result = swh_storage.release_add([release]) assert actual_result == {"release:add": 1} end_missing = swh_storage.release_missing([release.id]) assert list(end_missing) == [] assert list(swh_storage.journal_writer.journal.objects) == [ ("release", release), ] assert swh_storage.release_get([release.id]) == [release] @settings( suppress_health_check=[HealthCheck.too_slow, HealthCheck.data_too_large] + function_scoped_fixture_check, ) @given(strategies.lists(hypothesis_strategies.releases(), min_size=1, max_size=10,)) def test_release_add_get_arbitrary(self, swh_storage, releases): # remove non-intrinsic data, so releases inserted with different hypothesis # data can't clash with each other releases = [ attr.evolve( release, synthetic=False, metadata=None, author=attr.evolve(release.author, name=None, email=None) if release.author else None, ) for release in releases ] swh_storage.release_add(releases) for release in releases: (rev,) = swh_storage.release_get([release.id]) if rev.raw_manifest is None: assert rev == release else: assert rev.raw_manifest == release.raw_manifest # we can't compare the other fields, because they become non-intrinsic, # so they may clash between hypothesis runs def test_release_add_no_author_date(self, swh_storage, sample_data): full_release = sample_data.release release = attr.evolve(full_release, author=None, date=None) actual_result = swh_storage.release_add([release]) assert actual_result == {"release:add": 1} end_missing = swh_storage.release_missing([release.id]) assert list(end_missing) == [] assert list(swh_storage.journal_writer.journal.objects) == [ ("release", release) ] def test_release_add_twice(self, swh_storage, sample_data): release, release2 = sample_data.releases[:2] actual_result = swh_storage.release_add([release]) assert actual_result == {"release:add": 1} assert list(swh_storage.journal_writer.journal.objects) == [ ("release", release) ] actual_result = swh_storage.release_add([release, release2, release, release2]) assert actual_result == {"release:add": 1} assert set(swh_storage.journal_writer.journal.objects) == set( [("release", release), ("release", release2),] ) def test_release_add_name_clash(self, swh_storage, sample_data): release, release2 = [ attr.evolve( c, author=Person( fullname=b"John Doe ", name=b"John Doe", email=b"john.doe@example.com", ), ) for c in sample_data.releases[:2] ] actual_result = swh_storage.release_add([release, release2]) assert actual_result == {"release:add": 2} def test_release_get(self, swh_storage, sample_data): release, release2, release3 = sample_data.releases[:3] # given swh_storage.release_add([release, release2]) # when actual_releases = swh_storage.release_get([release.id, release2.id]) # then assert actual_releases == [release, release2] unknown_releases = swh_storage.release_get([release3.id]) assert unknown_releases[0] is None def test_release_get_order(self, swh_storage, sample_data): release, release2 = sample_data.releases[:2] add_result = swh_storage.release_add([release, release2]) assert add_result == {"release:add": 2} # order 1 actual_releases = swh_storage.release_get([release.id, release2.id]) assert actual_releases == [release, release2] # order 2 actual_releases2 = swh_storage.release_get([release2.id, release.id]) assert actual_releases2 == [release2, release] def test_release_get_random(self, swh_storage, sample_data): release, release2, release3 = sample_data.releases[:3] swh_storage.release_add([release, release2, release3]) assert swh_storage.release_get_random() in { release.id, release2.id, release3.id, } def test_origin_add(self, swh_storage, sample_data): origins = list(sample_data.origins) origin_urls = [o.url for o in origins] assert swh_storage.origin_get(origin_urls) == [None] * len(origins) stats = swh_storage.origin_add(origins) assert stats == {"origin:add": len(origin_urls)} actual_origins = swh_storage.origin_get(origin_urls) assert actual_origins == origins assert set(swh_storage.journal_writer.journal.objects) == set( [("origin", origin) for origin in origins] ) if isinstance(swh_storage, InMemoryStorage) or not isinstance( swh_storage, CassandraStorage ): swh_storage.refresh_stat_counters() assert swh_storage.stat_counters()["origin"] == len(origins) def test_origin_add_twice(self, swh_storage, sample_data): origin, origin2 = sample_data.origins[:2] add1 = swh_storage.origin_add([origin, origin2]) assert set(swh_storage.journal_writer.journal.objects) == set( [("origin", origin), ("origin", origin2),] ) assert add1 == {"origin:add": 2} add2 = swh_storage.origin_add([origin, origin2]) assert set(swh_storage.journal_writer.journal.objects) == set( [("origin", origin), ("origin", origin2),] ) assert add2 == {"origin:add": 0} def test_origin_add_twice_at_once(self, swh_storage, sample_data): origin, origin2 = sample_data.origins[:2] add1 = swh_storage.origin_add([origin, origin2, origin, origin2]) assert set(swh_storage.journal_writer.journal.objects) == set( [("origin", origin), ("origin", origin2),] ) assert add1 == {"origin:add": 2} add2 = swh_storage.origin_add([origin, origin2, origin, origin2]) assert set(swh_storage.journal_writer.journal.objects) == set( [("origin", origin), ("origin", origin2),] ) assert add2 == {"origin:add": 0} def test_origin_get(self, swh_storage, sample_data): origin, origin2 = sample_data.origins[:2] assert swh_storage.origin_get([origin.url]) == [None] swh_storage.origin_add([origin]) actual_origins = swh_storage.origin_get([origin.url]) assert actual_origins == [origin] actual_origins = swh_storage.origin_get([origin.url, "not://exists"]) assert actual_origins == [origin, None] def _generate_random_visits(self, nb_visits=100, start=0, end=7): """Generate random visits within the last 2 months (to avoid computations) """ visits = [] today = now() for weeks in range(nb_visits, 0, -1): hours = random.randint(0, 24) minutes = random.randint(0, 60) seconds = random.randint(0, 60) days = random.randint(0, 28) weeks = random.randint(start, end) date_visit = today - timedelta( weeks=weeks, hours=hours, minutes=minutes, seconds=seconds, days=days ) visits.append(date_visit) return visits def test_origin_visit_get__unknown_origin(self, swh_storage): actual_page = swh_storage.origin_visit_get("foo") assert actual_page.next_page_token is None assert actual_page.results == [] assert actual_page == PagedResult() def test_origin_visit_get__validation_failure(self, swh_storage, sample_data): origin = sample_data.origin swh_storage.origin_add([origin]) with pytest.raises( StorageArgumentException, match="page_token must be a string" ): swh_storage.origin_visit_get(origin.url, page_token=10) # not bytes with pytest.raises( StorageArgumentException, match="order must be a ListOrder value" ): swh_storage.origin_visit_get(origin.url, order="foobar") # wrong order def test_origin_visit_get_all(self, swh_storage, sample_data): origin = sample_data.origin swh_storage.origin_add([origin]) ov1, ov2, ov3 = swh_storage.origin_visit_add( [ OriginVisit( origin=origin.url, date=sample_data.date_visit1, type=sample_data.type_visit1, ), OriginVisit( origin=origin.url, date=sample_data.date_visit2, type=sample_data.type_visit2, ), OriginVisit( origin=origin.url, date=sample_data.date_visit2, type=sample_data.type_visit2, ), ] ) # order asc, no token, no limit actual_page = swh_storage.origin_visit_get(origin.url) assert actual_page.next_page_token is None assert actual_page.results == [ov1, ov2, ov3] # order asc, no token, limit actual_page = swh_storage.origin_visit_get(origin.url, limit=2) next_page_token = actual_page.next_page_token assert next_page_token is not None assert actual_page.results == [ov1, ov2] # order asc, token, no limit actual_page = swh_storage.origin_visit_get( origin.url, page_token=next_page_token ) assert actual_page.next_page_token is None assert actual_page.results == [ov3] # order asc, no token, limit actual_page = swh_storage.origin_visit_get(origin.url, limit=1) next_page_token = actual_page.next_page_token assert next_page_token is not None assert actual_page.results == [ov1] # order asc, token, no limit actual_page = swh_storage.origin_visit_get( origin.url, page_token=next_page_token ) assert actual_page.next_page_token is None assert actual_page.results == [ov2, ov3] # order asc, token, limit actual_page = swh_storage.origin_visit_get( origin.url, page_token=next_page_token, limit=2 ) assert actual_page.next_page_token is None assert actual_page.results == [ov2, ov3] actual_page = swh_storage.origin_visit_get( origin.url, page_token=next_page_token, limit=1 ) next_page_token = actual_page.next_page_token assert next_page_token is not None assert actual_page.results == [ov2] actual_page = swh_storage.origin_visit_get( origin.url, page_token=next_page_token, limit=1 ) assert actual_page.next_page_token is None assert actual_page.results == [ov3] # order desc, no token, no limit actual_page = swh_storage.origin_visit_get(origin.url, order=ListOrder.DESC) assert actual_page.next_page_token is None assert actual_page.results == [ov3, ov2, ov1] # order desc, no token, limit actual_page = swh_storage.origin_visit_get( origin.url, limit=2, order=ListOrder.DESC ) next_page_token = actual_page.next_page_token assert next_page_token is not None assert actual_page.results == [ov3, ov2] # order desc, token, no limit actual_page = swh_storage.origin_visit_get( origin.url, page_token=next_page_token, order=ListOrder.DESC ) assert actual_page.next_page_token is None assert actual_page.results == [ov1] # order desc, no token, limit actual_page = swh_storage.origin_visit_get( origin.url, limit=1, order=ListOrder.DESC ) next_page_token = actual_page.next_page_token assert next_page_token is not None assert actual_page.results == [ov3] # order desc, token, no limit actual_page = swh_storage.origin_visit_get( origin.url, page_token=next_page_token, order=ListOrder.DESC ) assert actual_page.next_page_token is None assert actual_page.results == [ov2, ov1] # order desc, token, limit actual_page = swh_storage.origin_visit_get( origin.url, page_token=next_page_token, order=ListOrder.DESC, limit=1 ) next_page_token = actual_page.next_page_token assert next_page_token is not None assert actual_page.results == [ov2] actual_page = swh_storage.origin_visit_get( origin.url, page_token=next_page_token, order=ListOrder.DESC ) assert actual_page.next_page_token is None assert actual_page.results == [ov1] def test_origin_visit_status_get__unknown_cases(self, swh_storage, sample_data): origin = sample_data.origin actual_page = swh_storage.origin_visit_status_get("foobar", 1) assert actual_page.next_page_token is None assert actual_page.results == [] actual_page = swh_storage.origin_visit_status_get(origin.url, 1) assert actual_page.next_page_token is None assert actual_page.results == [] origin = sample_data.origin swh_storage.origin_add([origin]) ov1 = swh_storage.origin_visit_add( [ OriginVisit( origin=origin.url, date=sample_data.date_visit1, type=sample_data.type_visit1, ), ] )[0] actual_page = swh_storage.origin_visit_status_get(origin.url, ov1.visit + 10) assert actual_page.next_page_token is None assert actual_page.results == [] def test_origin_visit_status_add_unknown_type(self, swh_storage, sample_data): ov = OriginVisit( origin=sample_data.origin.url, date=now(), type=sample_data.type_visit1, visit=42, ) ovs = OriginVisitStatus( origin=ov.origin, visit=ov.visit, date=now(), status="created", snapshot=None, ) with pytest.raises(StorageArgumentException): swh_storage.origin_visit_status_add([ovs]) swh_storage.origin_add([sample_data.origin]) with pytest.raises(StorageArgumentException): swh_storage.origin_visit_status_add([ovs]) swh_storage.origin_visit_add([ov]) swh_storage.origin_visit_status_add([ovs]) def test_origin_visit_status_get_all(self, swh_storage, sample_data): origin = sample_data.origin swh_storage.origin_add([origin]) date_visit3 = round_to_milliseconds(now()) date_visit1 = date_visit3 - datetime.timedelta(hours=2) date_visit2 = date_visit3 - datetime.timedelta(hours=1) assert date_visit1 < date_visit2 < date_visit3 ov1 = swh_storage.origin_visit_add( [ OriginVisit( origin=origin.url, date=date_visit1, type=sample_data.type_visit1, ), ] )[0] ovs1 = OriginVisitStatus( origin=ov1.origin, visit=ov1.visit, date=date_visit1, type=ov1.type, status="created", snapshot=None, ) ovs2 = OriginVisitStatus( origin=ov1.origin, visit=ov1.visit, date=date_visit2, type=ov1.type, status="partial", snapshot=None, ) ovs3 = OriginVisitStatus( origin=ov1.origin, visit=ov1.visit, date=date_visit3, type=ov1.type, status="full", snapshot=sample_data.snapshot.id, metadata={}, ) swh_storage.origin_visit_status_add([ovs2, ovs3]) # order asc, no token, no limit actual_page = swh_storage.origin_visit_status_get(origin.url, ov1.visit) assert actual_page.next_page_token is None assert actual_page.results == [ovs1, ovs2, ovs3] # order asc, no token, limit actual_page = swh_storage.origin_visit_status_get( origin.url, ov1.visit, limit=2 ) next_page_token = actual_page.next_page_token assert next_page_token is not None assert actual_page.results == [ovs1, ovs2] # order asc, token, no limit actual_page = swh_storage.origin_visit_status_get( origin.url, ov1.visit, page_token=next_page_token ) assert actual_page.next_page_token is None assert actual_page.results == [ovs3] # order asc, no token, limit actual_page = swh_storage.origin_visit_status_get( origin.url, ov1.visit, limit=1 ) next_page_token = actual_page.next_page_token assert next_page_token is not None assert actual_page.results == [ovs1] actual_page = swh_storage.origin_visit_status_get( origin.url, ov1.visit, page_token=next_page_token ) assert actual_page.next_page_token is None assert actual_page.results == [ovs2, ovs3] # order asc, token, limit actual_page = swh_storage.origin_visit_status_get( origin.url, ov1.visit, page_token=next_page_token, limit=2 ) assert actual_page.next_page_token is None assert actual_page.results == [ovs2, ovs3] # order asc, no token, limit actual_page = swh_storage.origin_visit_status_get( origin.url, ov1.visit, limit=2 ) next_page_token = actual_page.next_page_token assert next_page_token is not None assert actual_page.results == [ovs1, ovs2] actual_page = swh_storage.origin_visit_status_get( origin.url, ov1.visit, page_token=next_page_token, limit=1 ) assert actual_page.next_page_token is None assert actual_page.results == [ovs3] # order desc, no token, no limit actual_page = swh_storage.origin_visit_status_get( origin.url, ov1.visit, order=ListOrder.DESC ) assert actual_page.next_page_token is None assert actual_page.results == [ovs3, ovs2, ovs1] # order desc, no token, limit actual_page = swh_storage.origin_visit_status_get( origin.url, ov1.visit, limit=2, order=ListOrder.DESC ) next_page_token = actual_page.next_page_token assert next_page_token is not None assert actual_page.results == [ovs3, ovs2] actual_page = swh_storage.origin_visit_status_get( origin.url, ov1.visit, page_token=next_page_token, order=ListOrder.DESC ) assert actual_page.next_page_token is None assert actual_page.results == [ovs1] # order desc, no token, limit actual_page = swh_storage.origin_visit_status_get( origin.url, ov1.visit, order=ListOrder.DESC, limit=1 ) next_page_token = actual_page.next_page_token assert next_page_token is not None assert actual_page.results == [ovs3] # order desc, token, no limit actual_page = swh_storage.origin_visit_status_get( origin.url, ov1.visit, page_token=next_page_token, order=ListOrder.DESC ) assert actual_page.next_page_token is None assert actual_page.results == [ovs2, ovs1] # order desc, token, limit actual_page = swh_storage.origin_visit_status_get( origin.url, ov1.visit, page_token=next_page_token, order=ListOrder.DESC, limit=1, ) next_page_token = actual_page.next_page_token assert next_page_token is not None assert actual_page.results == [ovs2] actual_page = swh_storage.origin_visit_status_get( origin.url, ov1.visit, page_token=next_page_token, order=ListOrder.DESC ) assert actual_page.next_page_token is None assert actual_page.results == [ovs1] def test_origin_visit_status_get_random(self, swh_storage, sample_data): origins = sample_data.origins[:2] swh_storage.origin_add(origins) # Add some random visits within the selection range visits = self._generate_random_visits() visit_type = "git" # Add visits to those origins for origin in origins: for date_visit in visits: visit = swh_storage.origin_visit_add( [OriginVisit(origin=origin.url, date=date_visit, type=visit_type,)] )[0] swh_storage.origin_visit_status_add( [ OriginVisitStatus( origin=origin.url, visit=visit.visit, date=now(), status="full", snapshot=hash_to_bytes( "9b922e6d8d5b803c1582aabe5525b7b91150788e" ), ) ] ) if isinstance(swh_storage, InMemoryStorage) or not isinstance( swh_storage, CassandraStorage ): swh_storage.refresh_stat_counters() stats = swh_storage.stat_counters() assert stats["origin"] == len(origins) assert stats["origin_visit"] == len(origins) * len(visits) random_ovs = swh_storage.origin_visit_status_get_random(visit_type) assert random_ovs assert random_ovs.origin is not None assert random_ovs.origin in [o.url for o in origins] assert random_ovs.type is not None def test_origin_visit_status_get_random_nothing_found( self, swh_storage, sample_data ): origins = sample_data.origins swh_storage.origin_add(origins) visit_type = "hg" # Add some visits outside of the random generation selection so nothing # will be found by the random selection visits = self._generate_random_visits(nb_visits=3, start=13, end=24) for origin in origins: for date_visit in visits: visit = swh_storage.origin_visit_add( [OriginVisit(origin=origin.url, date=date_visit, type=visit_type,)] )[0] swh_storage.origin_visit_status_add( [ OriginVisitStatus( origin=origin.url, visit=visit.visit, date=now(), status="full", snapshot=None, ) ] ) random_origin_visit = swh_storage.origin_visit_status_get_random(visit_type) assert random_origin_visit is None def test_origin_snapshot_get_all(self, swh_storage, sample_data): origin = sample_data.origins[0] swh_storage.origin_add([origin]) # add some random visits within the selection range visits = self._generate_random_visits() visit_type = "git" # set first visit to a null snapshot visit = swh_storage.origin_visit_add( [OriginVisit(origin=origin.url, date=visits[0], type=visit_type,)] )[0] swh_storage.origin_visit_status_add( [ OriginVisitStatus( origin=origin.url, visit=visit.visit, date=now(), status="created", snapshot=None, ) ] ) # add visits to origin snapshots = set() for date_visit in visits[1:]: visit = swh_storage.origin_visit_add( [OriginVisit(origin=origin.url, date=date_visit, type=visit_type,)] )[0] # pick a random snapshot and keep track of it snapshot = random.choice(sample_data.snapshots).id snapshots.add(snapshot) swh_storage.origin_visit_status_add( [ OriginVisitStatus( origin=origin.url, visit=visit.visit, date=now(), status="full", snapshot=snapshot, ) ] ) # check expected snapshots are returned assert set(swh_storage.origin_snapshot_get_all(origin.url)) == snapshots def test_origin_get_by_sha1(self, swh_storage, sample_data): origin = sample_data.origin assert swh_storage.origin_get([origin.url])[0] is None swh_storage.origin_add([origin]) origins = list(swh_storage.origin_get_by_sha1([sha1(origin.url)])) assert len(origins) == 1 assert origins[0]["url"] == origin.url def test_origin_get_by_sha1_not_found(self, swh_storage, sample_data): unknown_origin = sample_data.origin assert swh_storage.origin_get([unknown_origin.url])[0] is None origins = list(swh_storage.origin_get_by_sha1([sha1(unknown_origin.url)])) assert len(origins) == 1 assert origins[0] is None def test_origin_search_single_result(self, swh_storage, sample_data): origin, origin2 = sample_data.origins[:2] actual_page = swh_storage.origin_search(origin.url) assert actual_page.next_page_token is None assert actual_page.results == [] actual_page = swh_storage.origin_search(origin.url, regexp=True) assert actual_page.next_page_token is None assert actual_page.results == [] swh_storage.origin_add([origin]) actual_page = swh_storage.origin_search(origin.url) assert actual_page.next_page_token is None assert actual_page.results == [origin] actual_page = swh_storage.origin_search(f".{origin.url[1:-1]}.", regexp=True) assert actual_page.next_page_token is None assert actual_page.results == [origin] swh_storage.origin_add([origin2]) actual_page = swh_storage.origin_search(origin2.url) assert actual_page.next_page_token is None assert actual_page.results == [origin2] actual_page = swh_storage.origin_search(f".{origin2.url[1:-1]}.", regexp=True) assert actual_page.next_page_token is None assert actual_page.results == [origin2] def test_origin_search_no_regexp(self, swh_storage, sample_data): origin, origin2 = sample_data.origins[:2] swh_storage.origin_add([origin, origin2]) # no pagination actual_page = swh_storage.origin_search("/") assert actual_page.next_page_token is None assert actual_page.results == [origin, origin2] # offset=0 actual_page = swh_storage.origin_search("/", page_token=None, limit=1) next_page_token = actual_page.next_page_token assert next_page_token is not None assert actual_page.results == [origin] # offset=1 actual_page = swh_storage.origin_search( "/", page_token=next_page_token, limit=1 ) assert actual_page.next_page_token is None assert actual_page.results == [origin2] def test_origin_search_regexp_substring(self, swh_storage, sample_data): origin, origin2 = sample_data.origins[:2] swh_storage.origin_add([origin, origin2]) # no pagination actual_page = swh_storage.origin_search("/", regexp=True) assert actual_page.next_page_token is None assert actual_page.results == [origin, origin2] # offset=0 actual_page = swh_storage.origin_search( "/", page_token=None, limit=1, regexp=True ) next_page_token = actual_page.next_page_token assert next_page_token is not None assert actual_page.results == [origin] # offset=1 actual_page = swh_storage.origin_search( "/", page_token=next_page_token, limit=1, regexp=True ) assert actual_page.next_page_token is None assert actual_page.results == [origin2] def test_origin_search_regexp_fullstring(self, swh_storage, sample_data): origin, origin2 = sample_data.origins[:2] swh_storage.origin_add([origin, origin2]) # no pagination actual_page = swh_storage.origin_search(".*/.*", regexp=True) assert actual_page.next_page_token is None assert actual_page.results == [origin, origin2] # offset=0 actual_page = swh_storage.origin_search( ".*/.*", page_token=None, limit=1, regexp=True ) next_page_token = actual_page.next_page_token assert next_page_token is not None assert actual_page.results == [origin] # offset=1 actual_page = swh_storage.origin_search( ".*/.*", page_token=next_page_token, limit=1, regexp=True ) assert actual_page.next_page_token is None assert actual_page.results == [origin2] def test_origin_search_no_visit_types(self, swh_storage, sample_data): origin = sample_data.origins[0] swh_storage.origin_add([origin]) actual_page = swh_storage.origin_search(origin.url, visit_types=["git"]) assert actual_page.next_page_token is None assert actual_page.results == [] def test_origin_search_with_visit_types(self, swh_storage, sample_data): origin, origin2 = sample_data.origins[:2] swh_storage.origin_add([origin, origin2]) swh_storage.origin_visit_add( [ OriginVisit(origin=origin.url, date=now(), type="git"), OriginVisit(origin=origin2.url, date=now(), type="svn"), ] ) actual_page = swh_storage.origin_search(origin.url, visit_types=["git"]) assert actual_page.next_page_token is None assert actual_page.results == [origin] actual_page = swh_storage.origin_search(origin2.url, visit_types=["svn"]) assert actual_page.next_page_token is None assert actual_page.results == [origin2] def test_origin_search_multiple_visit_types(self, swh_storage, sample_data): origin = sample_data.origins[0] swh_storage.origin_add([origin]) def _add_visit_type(visit_type): swh_storage.origin_visit_add( [OriginVisit(origin=origin.url, date=now(), type=visit_type)] ) def _check_visit_types(visit_types): actual_page = swh_storage.origin_search(origin.url, visit_types=visit_types) assert actual_page.next_page_token is None assert actual_page.results == [origin] _add_visit_type("git") _check_visit_types(["git"]) _check_visit_types(["git", "hg"]) _add_visit_type("hg") _check_visit_types(["hg"]) _check_visit_types(["git", "hg"]) def test_origin_visit_add(self, swh_storage, sample_data): origin1 = sample_data.origins[1] swh_storage.origin_add([origin1]) date_visit = now() date_visit2 = date_visit + datetime.timedelta(minutes=1) date_visit = round_to_milliseconds(date_visit) date_visit2 = round_to_milliseconds(date_visit2) visit1 = OriginVisit( origin=origin1.url, date=date_visit, type=sample_data.type_visit1, ) visit2 = OriginVisit( origin=origin1.url, date=date_visit2, type=sample_data.type_visit2, ) # add once ov1, ov2 = swh_storage.origin_visit_add([visit1, visit2]) # then again (will be ignored as they already exist) origin_visit1, origin_visit2 = swh_storage.origin_visit_add([ov1, ov2]) assert ov1 == origin_visit1 assert ov2 == origin_visit2 assert ov1.visit == 1 assert ov2.visit == 2 ovs1 = OriginVisitStatus( origin=ov1.origin, visit=ov1.visit, date=date_visit, type=ov1.type, status="created", snapshot=None, ) ovs2 = OriginVisitStatus( origin=ov2.origin, visit=ov2.visit, date=date_visit2, type=ov2.type, status="created", snapshot=None, ) actual_visits = swh_storage.origin_visit_get(origin1.url).results expected_visits = [ov1, ov2] assert len(expected_visits) == len(actual_visits) for visit in expected_visits: assert visit in actual_visits actual_objects = list(swh_storage.journal_writer.journal.objects) expected_objects = list( [("origin", origin1)] + [("origin_visit", visit) for visit in expected_visits] * 2 + [("origin_visit_status", ovs) for ovs in [ovs1, ovs2]] ) for obj in expected_objects: assert obj in actual_objects def test_origin_visit_add_replayed(self, swh_storage, sample_data): """Tests adding a visit with an id makes sure the next id is higher""" origin1 = sample_data.origins[1] swh_storage.origin_add([origin1]) date_visit = now() date_visit2 = date_visit + datetime.timedelta(minutes=1) date_visit = round_to_milliseconds(date_visit) date_visit2 = round_to_milliseconds(date_visit2) visit1 = OriginVisit( origin=origin1.url, date=date_visit, type=sample_data.type_visit1, visit=42 ) visit2 = OriginVisit( origin=origin1.url, date=date_visit2, type=sample_data.type_visit2, ) # add once ov1, ov2 = swh_storage.origin_visit_add([visit1, visit2]) # then again (will be ignored as they already exist) origin_visit1, origin_visit2 = swh_storage.origin_visit_add([ov1, ov2]) assert ov1 == origin_visit1 assert ov2 == origin_visit2 assert ov1.visit == 42 assert ov2.visit == 43 visit3 = OriginVisit( origin=origin1.url, date=date_visit, type=sample_data.type_visit1, visit=12 ) visit4 = OriginVisit( origin=origin1.url, date=date_visit2, type=sample_data.type_visit2, ) # add once ov3, ov4 = swh_storage.origin_visit_add([visit3, visit4]) # then again (will be ignored as they already exist) origin_visit3, origin_visit4 = swh_storage.origin_visit_add([ov3, ov4]) assert ov3 == origin_visit3 assert ov4 == origin_visit4 assert ov3.visit == 12 assert ov4.visit == 44 def test_origin_visit_add_validation(self, swh_storage, sample_data): """Unknown origin when adding visits should raise""" visit = attr.evolve(sample_data.origin_visit, origin="something-unknonw") with pytest.raises(StorageArgumentException, match="Unknown origin"): swh_storage.origin_visit_add([visit]) objects = list(swh_storage.journal_writer.journal.objects) assert not objects def test_origin_visit_status_add_validation(self, swh_storage): """Wrong origin_visit_status input should raise storage argument error""" date_visit = now() visit_status1 = OriginVisitStatus( origin="unknown-origin-url", visit=10, date=date_visit, status="full", snapshot=None, ) with pytest.raises(StorageArgumentException, match="Unknown origin"): swh_storage.origin_visit_status_add([visit_status1]) objects = list(swh_storage.journal_writer.journal.objects) assert not objects def test_origin_visit_status_add(self, swh_storage, sample_data): """Correct origin visit statuses should add a new visit status """ snapshot = sample_data.snapshot origin1 = sample_data.origins[1] origin2 = Origin(url="new-origin") swh_storage.origin_add([origin1, origin2]) ov1, ov2 = swh_storage.origin_visit_add( [ OriginVisit( origin=origin1.url, date=sample_data.date_visit1, type=sample_data.type_visit1, ), OriginVisit( origin=origin2.url, date=sample_data.date_visit2, type=sample_data.type_visit2, ), ] ) ovs1 = OriginVisitStatus( origin=ov1.origin, visit=ov1.visit, date=sample_data.date_visit1, type=ov1.type, status="created", snapshot=None, ) ovs2 = OriginVisitStatus( origin=ov2.origin, visit=ov2.visit, date=sample_data.date_visit2, type=ov2.type, status="created", snapshot=None, ) date_visit_now = round_to_milliseconds(now()) visit_status1 = OriginVisitStatus( origin=ov1.origin, visit=ov1.visit, date=date_visit_now, type=ov1.type, status="full", snapshot=snapshot.id, ) date_visit_now = round_to_milliseconds(now()) visit_status2 = OriginVisitStatus( origin=ov2.origin, visit=ov2.visit, date=date_visit_now, type=ov2.type, status="ongoing", snapshot=None, metadata={"intrinsic": "something"}, ) stats = swh_storage.origin_visit_status_add([visit_status1, visit_status2]) assert stats == {"origin_visit_status:add": 2} visit = swh_storage.origin_visit_get_latest(origin1.url, require_snapshot=True) visit_status = swh_storage.origin_visit_status_get_latest( origin1.url, visit.visit, require_snapshot=True ) assert visit_status == visit_status1 visit = swh_storage.origin_visit_get_latest(origin2.url, require_snapshot=False) visit_status = swh_storage.origin_visit_status_get_latest( origin2.url, visit.visit, require_snapshot=False ) assert origin2.url != origin1.url assert visit_status == visit_status2 actual_objects = list(swh_storage.journal_writer.journal.objects) expected_origins = [origin1, origin2] expected_visits = [ov1, ov2] expected_visit_statuses = [ovs1, ovs2, visit_status1, visit_status2] expected_objects = ( [("origin", o) for o in expected_origins] + [("origin_visit", v) for v in expected_visits] + [("origin_visit_status", ovs) for ovs in expected_visit_statuses] ) for obj in expected_objects: assert obj in actual_objects def test_origin_visit_status_add_twice(self, swh_storage, sample_data): """Correct origin visit statuses should add a new visit status """ snapshot = sample_data.snapshot origin1 = sample_data.origins[1] swh_storage.origin_add([origin1]) ov1 = swh_storage.origin_visit_add( [ OriginVisit( origin=origin1.url, date=sample_data.date_visit1, type=sample_data.type_visit1, ), ] )[0] ovs1 = OriginVisitStatus( origin=ov1.origin, visit=ov1.visit, date=sample_data.date_visit1, type=ov1.type, status="created", snapshot=None, ) date_visit_now = round_to_milliseconds(now()) visit_status1 = OriginVisitStatus( origin=ov1.origin, visit=ov1.visit, date=date_visit_now, type=ov1.type, status="full", snapshot=snapshot.id, ) stats = swh_storage.origin_visit_status_add([visit_status1]) assert stats == {"origin_visit_status:add": 1} # second call will ignore existing entries (will send to storage though) stats = swh_storage.origin_visit_status_add([visit_status1]) # ...so the storage still returns it as an addition assert stats == {"origin_visit_status:add": 1} visit_status = swh_storage.origin_visit_status_get_latest(ov1.origin, ov1.visit) assert visit_status == visit_status1 actual_objects = list(swh_storage.journal_writer.journal.objects) expected_origins = [origin1] expected_visits = [ov1] expected_visit_statuses = [ovs1, visit_status1, visit_status1] # write twice in the journal expected_objects = ( [("origin", o) for o in expected_origins] + [("origin_visit", v) for v in expected_visits] + [("origin_visit_status", ovs) for ovs in expected_visit_statuses] ) for obj in expected_objects: assert obj in actual_objects def test_origin_visit_find_by_date(self, swh_storage, sample_data): origin = sample_data.origin swh_storage.origin_add([origin]) visit1 = OriginVisit( origin=origin.url, date=sample_data.date_visit2, type=sample_data.type_visit1, ) visit2 = OriginVisit( origin=origin.url, date=sample_data.date_visit3, type=sample_data.type_visit2, ) visit3 = OriginVisit( origin=origin.url, date=sample_data.date_visit2, type=sample_data.type_visit3, ) ov1, ov2, ov3 = swh_storage.origin_visit_add([visit1, visit2, visit3]) ovs1 = OriginVisitStatus( origin=origin.url, visit=ov1.visit, date=sample_data.date_visit2, status="ongoing", snapshot=None, ) ovs2 = OriginVisitStatus( origin=origin.url, visit=ov2.visit, date=sample_data.date_visit3, status="ongoing", snapshot=None, ) ovs3 = OriginVisitStatus( origin=origin.url, visit=ov3.visit, date=sample_data.date_visit2, status="ongoing", snapshot=None, ) swh_storage.origin_visit_status_add([ovs1, ovs2, ovs3]) # Simple case actual_visit = swh_storage.origin_visit_find_by_date( origin.url, sample_data.date_visit3 ) assert actual_visit == ov2 # There are two visits at the same date, the latest must be returned actual_visit = swh_storage.origin_visit_find_by_date( origin.url, sample_data.date_visit2 ) assert actual_visit == ov3 def test_origin_visit_find_by_date__unknown_origin(self, swh_storage, sample_data): actual_visit = swh_storage.origin_visit_find_by_date( "foo", sample_data.date_visit2 ) assert actual_visit is None def test_origin_visit_get_by(self, swh_storage, sample_data): snapshot = sample_data.snapshot origins = sample_data.origins[:2] swh_storage.origin_add(origins) origin_url, origin_url2 = [o.url for o in origins] visit = OriginVisit( origin=origin_url, date=sample_data.date_visit2, type=sample_data.type_visit2, ) origin_visit1 = swh_storage.origin_visit_add([visit])[0] swh_storage.snapshot_add([snapshot]) swh_storage.origin_visit_status_add( [ OriginVisitStatus( origin=origin_url, visit=origin_visit1.visit, date=now(), status="ongoing", snapshot=snapshot.id, ) ] ) # Add some other {origin, visit} entries visit2 = OriginVisit( origin=origin_url, date=sample_data.date_visit3, type=sample_data.type_visit3, ) visit3 = OriginVisit( origin=origin_url2, date=sample_data.date_visit3, type=sample_data.type_visit3, ) swh_storage.origin_visit_add([visit2, visit3]) # when visit1_metadata = { "contents": 42, "directories": 22, } swh_storage.origin_visit_status_add( [ OriginVisitStatus( origin=origin_url, visit=origin_visit1.visit, date=now(), status="full", snapshot=snapshot.id, metadata=visit1_metadata, ) ] ) actual_visit = swh_storage.origin_visit_get_by(origin_url, origin_visit1.visit) assert actual_visit == origin_visit1 def test_origin_visit_get_by__no_result(self, swh_storage, sample_data): actual_visit = swh_storage.origin_visit_get_by("unknown", 10) # unknown origin assert actual_visit is None origin = sample_data.origin swh_storage.origin_add([origin]) actual_visit = swh_storage.origin_visit_get_by(origin.url, 999) # unknown visit assert actual_visit is None def test_origin_visit_get_latest_edge_cases(self, swh_storage, sample_data): # unknown origin so no result assert swh_storage.origin_visit_get_latest("unknown-origin") is None # unknown type so no result origin = sample_data.origin swh_storage.origin_add([origin]) assert swh_storage.origin_visit_get_latest(origin.url, type="unknown") is None # unknown allowed statuses should raise with pytest.raises(StorageArgumentException, match="Unknown allowed statuses"): swh_storage.origin_visit_get_latest( origin.url, allowed_statuses=["unknown"] ) def test_origin_visit_get_latest_filter_type(self, swh_storage, sample_data): """Filtering origin visit get latest with filter type should be ok """ origin = sample_data.origin swh_storage.origin_add([origin]) visit1 = OriginVisit( origin=origin.url, date=sample_data.date_visit1, type="git", ) visit2 = OriginVisit( origin=origin.url, date=sample_data.date_visit2, type="hg", ) date_now = round_to_milliseconds(now()) visit3 = OriginVisit(origin=origin.url, date=date_now, type="hg",) assert sample_data.date_visit1 < sample_data.date_visit2 assert sample_data.date_visit2 < date_now ov1, ov2, ov3 = swh_storage.origin_visit_add([visit1, visit2, visit3]) # Check type filter is ok actual_visit = swh_storage.origin_visit_get_latest(origin.url, type="git") assert actual_visit == ov1 actual_visit = swh_storage.origin_visit_get_latest(origin.url, type="hg") assert actual_visit == ov3 actual_visit_unknown_type = swh_storage.origin_visit_get_latest( origin.url, type="npm", # no visit matching that type ) assert actual_visit_unknown_type is None def test_origin_visit_get_latest(self, swh_storage, sample_data): empty_snapshot, complete_snapshot = sample_data.snapshots[1:3] origin = sample_data.origin swh_storage.origin_add([origin]) visit1 = OriginVisit( origin=origin.url, date=sample_data.date_visit1, type="git", ) visit2 = OriginVisit( origin=origin.url, date=sample_data.date_visit2, type="hg", ) date_now = round_to_milliseconds(now()) visit3 = OriginVisit(origin=origin.url, date=date_now, type="hg",) assert visit1.date < visit2.date assert visit2.date < visit3.date ov1, ov2, ov3 = swh_storage.origin_visit_add([visit1, visit2, visit3]) # no filters, latest visit is the last one (whose date is most recent) actual_visit = swh_storage.origin_visit_get_latest(origin.url) assert actual_visit == ov3 # 3 visits, none has snapshot so nothing is returned actual_visit = swh_storage.origin_visit_get_latest( origin.url, require_snapshot=True ) assert actual_visit is None # visit are created with "created" status, so nothing will get returned actual_visit = swh_storage.origin_visit_get_latest( origin.url, allowed_statuses=["partial"] ) assert actual_visit is None # visit are created with "created" status, so most recent again actual_visit = swh_storage.origin_visit_get_latest( origin.url, allowed_statuses=["created"] ) assert actual_visit == ov3 # Add snapshot to visit1; require_snapshot=True makes it return first visit swh_storage.snapshot_add([complete_snapshot]) visit_status_with_snapshot = OriginVisitStatus( origin=ov1.origin, visit=ov1.visit, date=round_to_milliseconds(now()), type=ov1.type, status="ongoing", snapshot=complete_snapshot.id, ) swh_storage.origin_visit_status_add([visit_status_with_snapshot]) # only the first visit has a snapshot now actual_visit = swh_storage.origin_visit_get_latest( origin.url, require_snapshot=True ) assert actual_visit == ov1 # only the first visit has a status ongoing now actual_visit = swh_storage.origin_visit_get_latest( origin.url, allowed_statuses=["ongoing"] ) assert actual_visit == ov1 actual_visit_status = swh_storage.origin_visit_status_get_latest( origin.url, ov1.visit, require_snapshot=True ) assert actual_visit_status == visit_status_with_snapshot # ... and require_snapshot=False (defaults) still returns latest visit (3rd) actual_visit = swh_storage.origin_visit_get_latest( origin.url, require_snapshot=False ) assert actual_visit == ov3 # no specific filter, this returns as before the latest visit actual_visit = swh_storage.origin_visit_get_latest(origin.url) assert actual_visit == ov3 # Status filter: all three visits are status=ongoing, so no visit # returned actual_visit = swh_storage.origin_visit_get_latest( origin.url, allowed_statuses=["full"] ) assert actual_visit is None visit_status1_full = OriginVisitStatus( origin=ov1.origin, visit=ov1.visit, date=round_to_milliseconds(now()), type=ov1.type, status="full", snapshot=complete_snapshot.id, ) # Mark the first visit as completed and check status filter again swh_storage.origin_visit_status_add([visit_status1_full]) # only the first visit has the full status actual_visit = swh_storage.origin_visit_get_latest( origin.url, allowed_statuses=["full"] ) assert actual_visit == ov1 actual_visit_status = swh_storage.origin_visit_status_get_latest( origin.url, ov1.visit, allowed_statuses=["full"] ) assert actual_visit_status == visit_status1_full # no specific filter, this returns as before the latest visit actual_visit = swh_storage.origin_visit_get_latest(origin.url) assert actual_visit == ov3 # Add snapshot to visit2 and check that the new snapshot is returned swh_storage.snapshot_add([empty_snapshot]) visit_status2_full = OriginVisitStatus( origin=ov2.origin, visit=ov2.visit, date=round_to_milliseconds(now()), type=ov2.type, status="ongoing", snapshot=empty_snapshot.id, ) swh_storage.origin_visit_status_add([visit_status2_full]) actual_visit = swh_storage.origin_visit_get_latest( origin.url, require_snapshot=True ) # 2nd visit is most recent with a snapshot assert actual_visit == ov2 actual_visit_status = swh_storage.origin_visit_status_get_latest( origin.url, ov2.visit, require_snapshot=True ) assert actual_visit_status == visit_status2_full # no specific filter, this returns as before the latest visit, 3rd one actual_origin = swh_storage.origin_visit_get_latest(origin.url) assert actual_origin == ov3 # full status is still the first visit actual_visit = swh_storage.origin_visit_get_latest( origin.url, allowed_statuses=["full"] ) assert actual_visit == ov1 # Add snapshot to visit3 (same date as visit2) visit_status3_with_snapshot = OriginVisitStatus( origin=ov3.origin, visit=ov3.visit, date=round_to_milliseconds(now()), type=ov3.type, status="ongoing", snapshot=complete_snapshot.id, ) swh_storage.origin_visit_status_add([visit_status3_with_snapshot]) # full status is still the first visit actual_visit = swh_storage.origin_visit_get_latest( origin.url, allowed_statuses=["full"], require_snapshot=True, ) assert actual_visit == ov1 actual_visit_status = swh_storage.origin_visit_status_get_latest( origin.url, visit=actual_visit.visit, allowed_statuses=["full"], require_snapshot=True, ) assert actual_visit_status == visit_status1_full # most recent is still the 3rd visit actual_visit = swh_storage.origin_visit_get_latest(origin.url) assert actual_visit == ov3 # 3rd visit has a snapshot now, so it's elected actual_visit = swh_storage.origin_visit_get_latest( origin.url, require_snapshot=True ) assert actual_visit == ov3 actual_visit_status = swh_storage.origin_visit_status_get_latest( origin.url, ov3.visit, require_snapshot=True ) assert actual_visit_status == visit_status3_with_snapshot def test_origin_visit_get_latest__same_date(self, swh_storage, sample_data): empty_snapshot, complete_snapshot = sample_data.snapshots[1:3] origin = sample_data.origin swh_storage.origin_add([origin]) visit1 = OriginVisit( origin=origin.url, date=sample_data.date_visit1, type="git", ) visit2 = OriginVisit( origin=origin.url, date=sample_data.date_visit1, type="hg", ) ov1, ov2 = swh_storage.origin_visit_add([visit1, visit2]) # ties should be broken by using the visit id actual_visit = swh_storage.origin_visit_get_latest(origin.url) assert actual_visit == ov2 def test_origin_visit_get_latest_order(self, swh_storage, sample_data): empty_snapshot, complete_snapshot = sample_data.snapshots[1:3] origin = sample_data.origin id1 = 2 id2 = 1 id3 = 3 date1 = datetime.datetime(2021, 8, 2, tzinfo=datetime.timezone.utc) date2 = datetime.datetime(2021, 8, 3, tzinfo=datetime.timezone.utc) date3 = datetime.datetime(2021, 8, 1, tzinfo=datetime.timezone.utc) swh_storage.origin_add([origin]) visit1 = OriginVisit(origin=origin.url, visit=id1, date=date1, type="git",) visit2 = OriginVisit(origin=origin.url, visit=id2, date=date2, type="hg",) visit3 = OriginVisit(origin=origin.url, visit=id3, date=date3, type="tar",) ov1, ov2, ov3 = swh_storage.origin_visit_add([visit1, visit2, visit3]) # no filters, latest visit is the last one (whose date is most recent) actual_visit = swh_storage.origin_visit_get_latest(origin.url) assert actual_visit == ov2 def test_origin_visit_get_latest__not_last(self, swh_storage, sample_data): origin = sample_data.origin swh_storage.origin_add([origin]) visit1, visit2 = sample_data.origin_visits[:2] assert visit1.origin == origin.url swh_storage.origin_visit_add([visit1]) ov1 = swh_storage.origin_visit_get_latest(origin.url) # Add snapshot to visit1, latest snapshot = visit 1 snapshot complete_snapshot = sample_data.snapshots[2] swh_storage.snapshot_add([complete_snapshot]) swh_storage.origin_visit_status_add( [ OriginVisitStatus( origin=origin.url, visit=ov1.visit, date=visit2.date, status="partial", snapshot=None, ) ] ) assert visit1.date < visit2.date # no snapshot associated to the visit, so None visit = swh_storage.origin_visit_get_latest( origin.url, allowed_statuses=["partial"], require_snapshot=True, ) assert visit is None date_now = now() assert visit2.date < date_now swh_storage.origin_visit_status_add( [ OriginVisitStatus( origin=origin.url, visit=ov1.visit, date=date_now, status="full", snapshot=complete_snapshot.id, ) ] ) swh_storage.origin_visit_add( [OriginVisit(origin=origin.url, date=now(), type=visit1.type,)] ) visit = swh_storage.origin_visit_get_latest(origin.url, require_snapshot=True) assert visit is not None def test_origin_visit_status_get_latest__validation(self, swh_storage, sample_data): origin = sample_data.origin swh_storage.origin_add([origin]) visit1 = OriginVisit( origin=origin.url, date=sample_data.date_visit1, type="git", ) # unknown allowed statuses should raise with pytest.raises(StorageArgumentException, match="Unknown allowed statuses"): swh_storage.origin_visit_status_get_latest( origin.url, visit1.visit, allowed_statuses=["unknown"] ) def test_origin_visit_status_get_latest(self, swh_storage, sample_data): snapshot = sample_data.snapshots[2] origin1 = sample_data.origin swh_storage.origin_add([origin1]) # to have some reference visits ov1, ov2 = swh_storage.origin_visit_add( [ OriginVisit( origin=origin1.url, date=sample_data.date_visit1, type=sample_data.type_visit1, ), OriginVisit( origin=origin1.url, date=sample_data.date_visit2, type=sample_data.type_visit2, ), ] ) swh_storage.snapshot_add([snapshot]) date_now = round_to_milliseconds(now()) assert sample_data.date_visit1 < sample_data.date_visit2 assert sample_data.date_visit2 < date_now ovs1 = OriginVisitStatus( origin=ov1.origin, visit=ov1.visit, date=sample_data.date_visit1, type=ov1.type, status="partial", snapshot=None, ) ovs2 = OriginVisitStatus( origin=ov1.origin, visit=ov1.visit, date=sample_data.date_visit2, type=ov1.type, status="ongoing", snapshot=None, ) ovs3 = OriginVisitStatus( origin=ov2.origin, visit=ov2.visit, date=sample_data.date_visit2 + datetime.timedelta(minutes=1), # to not be ignored type=ov2.type, status="ongoing", snapshot=None, ) ovs4 = OriginVisitStatus( origin=ov2.origin, visit=ov2.visit, date=date_now, type=ov2.type, status="full", snapshot=snapshot.id, metadata={"something": "wicked"}, ) swh_storage.origin_visit_status_add([ovs1, ovs2, ovs3, ovs4]) # unknown origin so no result actual_origin_visit = swh_storage.origin_visit_status_get_latest( "unknown-origin", ov1.visit ) assert actual_origin_visit is None # unknown visit so no result actual_origin_visit = swh_storage.origin_visit_status_get_latest( ov1.origin, ov1.visit + 10 ) assert actual_origin_visit is None # Two visits, both with no snapshot, take the most recent actual_origin_visit2 = swh_storage.origin_visit_status_get_latest( origin1.url, ov1.visit ) assert isinstance(actual_origin_visit2, OriginVisitStatus) assert actual_origin_visit2 == ovs2 assert ovs2.origin == origin1.url assert ovs2.visit == ov1.visit actual_origin_visit = swh_storage.origin_visit_status_get_latest( origin1.url, ov1.visit, require_snapshot=True ) # there is no visit with snapshot yet for that visit assert actual_origin_visit is None actual_origin_visit2 = swh_storage.origin_visit_status_get_latest( origin1.url, ov1.visit, allowed_statuses=["partial", "ongoing"] ) # visit status with partial status visit elected assert actual_origin_visit2 == ovs2 assert actual_origin_visit2.status == "ongoing" actual_origin_visit4 = swh_storage.origin_visit_status_get_latest( origin1.url, ov2.visit, require_snapshot=True ) assert actual_origin_visit4 == ovs4 assert actual_origin_visit4.snapshot == snapshot.id actual_origin_visit = swh_storage.origin_visit_status_get_latest( origin1.url, ov2.visit, require_snapshot=True, allowed_statuses=["ongoing"] ) # nothing matches so nothing assert actual_origin_visit is None # there is no visit with status full actual_origin_visit3 = swh_storage.origin_visit_status_get_latest( origin1.url, ov2.visit, allowed_statuses=["ongoing"] ) assert actual_origin_visit3 == ovs3 def test_person_fullname_unicity(self, swh_storage, sample_data): revision, rev2 = sample_data.revisions[0:2] # create a revision with same committer fullname but wo name and email revision2 = attr.evolve( rev2, committer=Person( fullname=revision.committer.fullname, name=None, email=None ), ) swh_storage.revision_add([revision, revision2]) # when getting added revisions revisions = swh_storage.revision_get([revision.id, revision2.id]) # then check committers are the same assert revisions[0].committer == revisions[1].committer def test_snapshot_add_get_empty(self, swh_storage, sample_data): empty_snapshot = sample_data.snapshots[1] empty_snapshot_dict = empty_snapshot.to_dict() origin = sample_data.origin swh_storage.origin_add([origin]) ov1 = swh_storage.origin_visit_add( [ OriginVisit( origin=origin.url, date=sample_data.date_visit1, type=sample_data.type_visit1, ) ] )[0] actual_result = swh_storage.snapshot_add([empty_snapshot]) assert actual_result == {"snapshot:add": 1} date_now = now() swh_storage.origin_visit_status_add( [ OriginVisitStatus( origin=ov1.origin, visit=ov1.visit, date=date_now, type=ov1.type, status="full", snapshot=empty_snapshot.id, ) ] ) by_id = swh_storage.snapshot_get(empty_snapshot.id) assert by_id == {**empty_snapshot_dict, "next_branch": None} ovs1 = OriginVisitStatus.from_dict( { "origin": ov1.origin, "date": sample_data.date_visit1, "type": ov1.type, "visit": ov1.visit, "status": "created", "snapshot": None, "metadata": None, } ) ovs2 = OriginVisitStatus.from_dict( { "origin": ov1.origin, "date": date_now, "type": ov1.type, "visit": ov1.visit, "status": "full", "metadata": None, "snapshot": empty_snapshot.id, } ) actual_objects = list(swh_storage.journal_writer.journal.objects) expected_objects = [ ("origin", origin), ("origin_visit", ov1), ("origin_visit_status", ovs1,), ("snapshot", empty_snapshot), ("origin_visit_status", ovs2,), ] for obj in expected_objects: assert obj in actual_objects def test_snapshot_add_get_complete(self, swh_storage, sample_data): complete_snapshot = sample_data.snapshots[2] complete_snapshot_dict = complete_snapshot.to_dict() origin = sample_data.origin swh_storage.origin_add([origin]) visit = OriginVisit( origin=origin.url, date=sample_data.date_visit1, type=sample_data.type_visit1, ) origin_visit1 = swh_storage.origin_visit_add([visit])[0] actual_result = swh_storage.snapshot_add([complete_snapshot]) swh_storage.origin_visit_status_add( [ OriginVisitStatus( origin=origin.url, visit=origin_visit1.visit, date=now(), status="ongoing", snapshot=complete_snapshot.id, ) ] ) assert actual_result == {"snapshot:add": 1} by_id = swh_storage.snapshot_get(complete_snapshot.id) assert by_id == {**complete_snapshot_dict, "next_branch": None} @settings( suppress_health_check=[HealthCheck.too_slow, HealthCheck.data_too_large] + function_scoped_fixture_check, ) @given(strategies.lists(hypothesis_strategies.snapshots(), min_size=1, max_size=10)) def test_snapshot_add_get_arbitrary(self, swh_storage, snapshots): swh_storage.snapshot_add(snapshots) for snapshot in snapshots: assert swh_storage.snapshot_get(snapshot.id) == { **snapshot.to_dict(), "next_branch": None, } def test_snapshot_add_many(self, swh_storage, sample_data): snapshot, _, complete_snapshot = sample_data.snapshots[:3] actual_result = swh_storage.snapshot_add([snapshot, complete_snapshot]) assert actual_result == {"snapshot:add": 2} assert swh_storage.snapshot_get(complete_snapshot.id) == { **complete_snapshot.to_dict(), "next_branch": None, } assert swh_storage.snapshot_get(snapshot.id) == { **snapshot.to_dict(), "next_branch": None, } if isinstance(swh_storage, InMemoryStorage) or not isinstance( swh_storage, CassandraStorage ): swh_storage.refresh_stat_counters() assert swh_storage.stat_counters()["snapshot"] == 2 def test_snapshot_add_many_incremental(self, swh_storage, sample_data): snapshot, _, complete_snapshot = sample_data.snapshots[:3] actual_result = swh_storage.snapshot_add([complete_snapshot]) assert actual_result == {"snapshot:add": 1} actual_result2 = swh_storage.snapshot_add([snapshot, complete_snapshot]) assert actual_result2 == {"snapshot:add": 1} assert swh_storage.snapshot_get(complete_snapshot.id) == { **complete_snapshot.to_dict(), "next_branch": None, } assert swh_storage.snapshot_get(snapshot.id) == { **snapshot.to_dict(), "next_branch": None, } def test_snapshot_add_twice(self, swh_storage, sample_data): snapshot, empty_snapshot = sample_data.snapshots[:2] actual_result = swh_storage.snapshot_add([empty_snapshot]) assert actual_result == {"snapshot:add": 1} assert list(swh_storage.journal_writer.journal.objects) == [ ("snapshot", empty_snapshot) ] actual_result = swh_storage.snapshot_add([snapshot]) assert actual_result == {"snapshot:add": 1} assert list(swh_storage.journal_writer.journal.objects) == [ ("snapshot", empty_snapshot), ("snapshot", snapshot), ] def test_snapshot_add_count_branches(self, swh_storage, sample_data): complete_snapshot = sample_data.snapshots[2] actual_result = swh_storage.snapshot_add([complete_snapshot]) assert actual_result == {"snapshot:add": 1} snp_size = swh_storage.snapshot_count_branches(complete_snapshot.id) expected_snp_size = { "alias": 1, "content": 1, "directory": 2, "release": 1, "revision": 1, "snapshot": 1, None: 1, } assert snp_size == expected_snp_size def test_snapshot_add_count_branches_with_filtering(self, swh_storage, sample_data): complete_snapshot = sample_data.snapshots[2] actual_result = swh_storage.snapshot_add([complete_snapshot]) assert actual_result == {"snapshot:add": 1} snp_size = swh_storage.snapshot_count_branches( complete_snapshot.id, branch_name_exclude_prefix=b"release" ) expected_snp_size = { "alias": 1, "content": 1, "directory": 2, "revision": 1, "snapshot": 1, None: 1, } assert snp_size == expected_snp_size def test_snapshot_add_count_branches_with_filtering_edge_cases( self, swh_storage, sample_data ): snapshot = Snapshot( branches={ b"\xaa\xff": SnapshotBranch( target=sample_data.revision.id, target_type=TargetType.REVISION, ), b"\xaa\xff\x00": SnapshotBranch( target=sample_data.revision.id, target_type=TargetType.REVISION, ), b"\xff\xff": SnapshotBranch( target=sample_data.release.id, target_type=TargetType.RELEASE, ), b"\xff\xff\x00": SnapshotBranch( target=sample_data.release.id, target_type=TargetType.RELEASE, ), b"dangling": None, }, ) swh_storage.snapshot_add([snapshot]) assert swh_storage.snapshot_count_branches( snapshot.id, branch_name_exclude_prefix=b"\xaa\xff" ) == {None: 1, "release": 2} assert swh_storage.snapshot_count_branches( snapshot.id, branch_name_exclude_prefix=b"\xff\xff" ) == {None: 1, "revision": 2} def test_snapshot_add_get_paginated(self, swh_storage, sample_data): complete_snapshot = sample_data.snapshots[2] swh_storage.snapshot_add([complete_snapshot]) snp_id = complete_snapshot.id branches = complete_snapshot.branches branch_names = list(sorted(branches)) # Test branch_from snapshot = swh_storage.snapshot_get_branches(snp_id, branches_from=b"release") rel_idx = branch_names.index(b"release") expected_snapshot = { "id": snp_id, "branches": {name: branches[name] for name in branch_names[rel_idx:]}, "next_branch": None, } assert snapshot == expected_snapshot # Test branches_count snapshot = swh_storage.snapshot_get_branches(snp_id, branches_count=1) expected_snapshot = { "id": snp_id, "branches": {branch_names[0]: branches[branch_names[0]],}, "next_branch": b"content", } assert snapshot == expected_snapshot # test branch_from + branches_count snapshot = swh_storage.snapshot_get_branches( snp_id, branches_from=b"directory", branches_count=3 ) dir_idx = branch_names.index(b"directory") expected_snapshot = { "id": snp_id, "branches": { name: branches[name] for name in branch_names[dir_idx : dir_idx + 3] }, "next_branch": branch_names[dir_idx + 3], } assert snapshot == expected_snapshot def test_snapshot_add_get_filtered(self, swh_storage, sample_data): origin = sample_data.origin complete_snapshot = sample_data.snapshots[2] swh_storage.origin_add([origin]) visit = OriginVisit( origin=origin.url, date=sample_data.date_visit1, type=sample_data.type_visit1, ) origin_visit1 = swh_storage.origin_visit_add([visit])[0] swh_storage.snapshot_add([complete_snapshot]) swh_storage.origin_visit_status_add( [ OriginVisitStatus( origin=origin.url, visit=origin_visit1.visit, date=now(), status="ongoing", snapshot=complete_snapshot.id, ) ] ) snp_id = complete_snapshot.id branches = complete_snapshot.branches snapshot = swh_storage.snapshot_get_branches( snp_id, target_types=["release", "revision"] ) expected_snapshot = { "id": snp_id, "branches": { name: tgt for name, tgt in branches.items() if tgt and tgt.target_type in [TargetType.RELEASE, TargetType.REVISION] }, "next_branch": None, } assert snapshot == expected_snapshot snapshot = swh_storage.snapshot_get_branches(snp_id, target_types=["alias"]) expected_snapshot = { "id": snp_id, "branches": { name: tgt for name, tgt in branches.items() if tgt and tgt.target_type == TargetType.ALIAS }, "next_branch": None, } assert snapshot == expected_snapshot def test_snapshot_add_get_filtered_and_paginated(self, swh_storage, sample_data): complete_snapshot = sample_data.snapshots[2] swh_storage.snapshot_add([complete_snapshot]) snp_id = complete_snapshot.id branches = complete_snapshot.branches branch_names = list(sorted(branches)) # Test branch_from snapshot = swh_storage.snapshot_get_branches( snp_id, target_types=["directory", "release"], branches_from=b"directory2" ) expected_snapshot = { "id": snp_id, "branches": {name: branches[name] for name in (b"directory2", b"release")}, "next_branch": None, } assert snapshot == expected_snapshot # Test branches_count snapshot = swh_storage.snapshot_get_branches( snp_id, target_types=["directory", "release"], branches_count=1 ) expected_snapshot = { "id": snp_id, "branches": {b"directory": branches[b"directory"]}, "next_branch": b"directory2", } assert snapshot == expected_snapshot # Test branches_count snapshot = swh_storage.snapshot_get_branches( snp_id, target_types=["directory", "release"], branches_count=2 ) expected_snapshot = { "id": snp_id, "branches": { name: branches[name] for name in (b"directory", b"directory2") }, "next_branch": b"release", } assert snapshot == expected_snapshot # test branch_from + branches_count snapshot = swh_storage.snapshot_get_branches( snp_id, target_types=["directory", "release"], branches_from=b"directory2", branches_count=1, ) dir_idx = branch_names.index(b"directory2") expected_snapshot = { "id": snp_id, "branches": {branch_names[dir_idx]: branches[branch_names[dir_idx]],}, "next_branch": b"release", } assert snapshot == expected_snapshot def test_snapshot_add_get_branch_by_type(self, swh_storage, sample_data): complete_snapshot = sample_data.snapshots[2] snapshot = complete_snapshot.to_dict() alias1 = b"alias1" alias2 = b"alias2" target1 = random.choice(list(snapshot["branches"].keys())) target2 = random.choice(list(snapshot["branches"].keys())) snapshot["branches"][alias2] = { "target": target2, "target_type": "alias", } snapshot["branches"][alias1] = { "target": target1, "target_type": "alias", } new_snapshot = Snapshot.from_dict(snapshot) swh_storage.snapshot_add([new_snapshot]) branches = swh_storage.snapshot_get_branches( new_snapshot.id, target_types=["alias"], branches_from=alias1, branches_count=1, )["branches"] assert len(branches) == 1 assert alias1 in branches def test_snapshot_add_get_by_branches_name_pattern(self, swh_storage, sample_data): snapshot = Snapshot( branches={ b"refs/heads/master": SnapshotBranch( target=sample_data.revision.id, target_type=TargetType.REVISION, ), b"refs/heads/incoming": SnapshotBranch( target=sample_data.revision.id, target_type=TargetType.REVISION, ), b"refs/pull/1": SnapshotBranch( target=sample_data.revision.id, target_type=TargetType.REVISION, ), b"refs/pull/2": SnapshotBranch( target=sample_data.revision.id, target_type=TargetType.REVISION, ), b"dangling": None, b"\xaa\xff": SnapshotBranch( target=sample_data.revision.id, target_type=TargetType.REVISION, ), b"\xaa\xff\x00": SnapshotBranch( target=sample_data.revision.id, target_type=TargetType.REVISION, ), b"\xff\xff": SnapshotBranch( target=sample_data.revision.id, target_type=TargetType.REVISION, ), b"\xff\xff\x00": SnapshotBranch( target=sample_data.revision.id, target_type=TargetType.REVISION, ), }, ) swh_storage.snapshot_add([snapshot]) for include_pattern, exclude_prefix, nb_results in ( (b"pull", None, 2), (b"incoming", None, 1), (b"dangling", None, 1), (None, b"refs/heads/", 7), (b"refs", b"refs/heads/master", 3), (b"refs", b"refs/heads/master", 3), (None, b"\xaa\xff", 7), (None, b"\xff\xff", 7), ): branches = swh_storage.snapshot_get_branches( snapshot.id, branch_name_include_substring=include_pattern, branch_name_exclude_prefix=exclude_prefix, )["branches"] expected_branches = [ branch_name for branch_name in snapshot.branches if (include_pattern is None or include_pattern in branch_name) and ( exclude_prefix is None or not branch_name.startswith(exclude_prefix) ) ] assert sorted(branches) == sorted(expected_branches) assert len(branches) == nb_results def test_snapshot_add_get_by_branches_name_pattern_filtered_paginated( self, swh_storage, sample_data ): pattern = b"foo" nb_branches_by_target_type = 10 branches = {} for i in range(nb_branches_by_target_type): branches[f"branch/directory/bar{i}".encode()] = SnapshotBranch( target=sample_data.directory.id, target_type=TargetType.DIRECTORY, ) branches[f"branch/revision/bar{i}".encode()] = SnapshotBranch( target=sample_data.revision.id, target_type=TargetType.REVISION, ) branches[f"branch/directory/{pattern}{i}".encode()] = SnapshotBranch( target=sample_data.directory.id, target_type=TargetType.DIRECTORY, ) branches[f"branch/revision/{pattern}{i}".encode()] = SnapshotBranch( target=sample_data.revision.id, target_type=TargetType.REVISION, ) snapshot = Snapshot(branches=branches) swh_storage.snapshot_add([snapshot]) branches_count = nb_branches_by_target_type // 2 for target_type in ( TargetType.DIRECTORY, TargetType.REVISION, ): target_type_str = target_type.value partial_branches = swh_storage.snapshot_get_branches( snapshot.id, branch_name_include_substring=pattern, target_types=[target_type_str], branches_count=branches_count, ) branches = partial_branches["branches"] expected_branches = [ branch_name for branch_name, branch_data in snapshot.branches.items() if pattern in branch_name and branch_data.target_type == target_type ][:branches_count] assert sorted(branches) == sorted(expected_branches) assert ( partial_branches["next_branch"] == f"branch/{target_type_str}/{pattern}{branches_count}".encode() ) partial_branches = swh_storage.snapshot_get_branches( snapshot.id, branch_name_include_substring=pattern, target_types=[target_type_str], branches_from=partial_branches["next_branch"], ) branches = partial_branches["branches"] expected_branches = [ branch_name for branch_name, branch_data in snapshot.branches.items() if pattern in branch_name and branch_data.target_type == target_type ][branches_count:] assert sorted(branches) == sorted(expected_branches) assert partial_branches["next_branch"] is None def test_snapshot_get_branches_from_no_result(self, swh_storage, sample_data): snapshot = Snapshot( branches={ b"refs/heads/master": SnapshotBranch( target=sample_data.revision.id, target_type=TargetType.REVISION, ), }, ) swh_storage.snapshot_add([snapshot]) partial_branches = swh_storage.snapshot_get_branches( snapshot.id, branches_from=b"s", ) assert partial_branches is not None assert partial_branches["branches"] == {} @settings(suppress_health_check=function_scoped_fixture_check,) @given(hypothesis_strategies.snapshots(min_size=1)) def test_snapshot_get_unknown_snapshot(self, swh_storage, unknown_snapshot): assert swh_storage.snapshot_get(unknown_snapshot.id) is None assert swh_storage.snapshot_get_branches(unknown_snapshot.id) is None def test_snapshot_add_get(self, swh_storage, sample_data): snapshot = sample_data.snapshot origin = sample_data.origin swh_storage.origin_add([origin]) visit = OriginVisit( origin=origin.url, date=sample_data.date_visit1, type=sample_data.type_visit1, ) ov1 = swh_storage.origin_visit_add([visit])[0] swh_storage.snapshot_add([snapshot]) swh_storage.origin_visit_status_add( [ OriginVisitStatus( origin=origin.url, visit=ov1.visit, date=now(), status="ongoing", snapshot=snapshot.id, ) ] ) expected_snapshot = {**snapshot.to_dict(), "next_branch": None} by_id = swh_storage.snapshot_get(snapshot.id) assert by_id == expected_snapshot actual_visit = swh_storage.origin_visit_get_by(origin.url, ov1.visit) assert actual_visit == ov1 visit_status = swh_storage.origin_visit_status_get_latest( origin.url, ov1.visit, require_snapshot=True ) assert visit_status.snapshot == snapshot.id def test_snapshot_get_random(self, swh_storage, sample_data): snapshot, empty_snapshot, complete_snapshot = sample_data.snapshots[:3] swh_storage.snapshot_add([snapshot, empty_snapshot, complete_snapshot]) assert swh_storage.snapshot_get_random() in { snapshot.id, empty_snapshot.id, complete_snapshot.id, } def test_snapshot_missing(self, swh_storage, sample_data): snapshot, missing_snapshot = sample_data.snapshots[:2] snapshots = [snapshot.id, missing_snapshot.id] swh_storage.snapshot_add([snapshot]) missing_snapshots = swh_storage.snapshot_missing(snapshots) assert list(missing_snapshots) == [missing_snapshot.id] def test_stat_counters(self, swh_storage, sample_data): if isinstance(swh_storage, CassandraStorage) and not isinstance( swh_storage, InMemoryStorage ): pytest.skip("Cassandra backend does not support stat counters") origin = sample_data.origin snapshot = sample_data.snapshot revision = sample_data.revision release = sample_data.release directory = sample_data.directory content = sample_data.content expected_keys = ["content", "directory", "origin", "revision"] # Initially, all counters are 0 swh_storage.refresh_stat_counters() counters = swh_storage.stat_counters() assert set(expected_keys) <= set(counters) for key in expected_keys: assert counters[key] == 0 # Add a content. Only the content counter should increase. swh_storage.content_add([content]) swh_storage.refresh_stat_counters() counters = swh_storage.stat_counters() assert set(expected_keys) <= set(counters) for key in expected_keys: if key != "content": assert counters[key] == 0 assert counters["content"] == 1 # Add other objects. Check their counter increased as well. swh_storage.origin_add([origin]) visit = OriginVisit( origin=origin.url, date=sample_data.date_visit2, type=sample_data.type_visit2, ) origin_visit1 = swh_storage.origin_visit_add([visit])[0] swh_storage.snapshot_add([snapshot]) swh_storage.origin_visit_status_add( [ OriginVisitStatus( origin=origin.url, visit=origin_visit1.visit, date=now(), status="ongoing", snapshot=snapshot.id, ) ] ) swh_storage.directory_add([directory]) swh_storage.revision_add([revision]) swh_storage.release_add([release]) swh_storage.refresh_stat_counters() counters = swh_storage.stat_counters() assert counters["content"] == 1 assert counters["directory"] == 1 assert counters["snapshot"] == 1 assert counters["origin"] == 1 assert counters["origin_visit"] == 1 assert counters["revision"] == 1 assert counters["release"] == 1 assert counters["snapshot"] == 1 if "person" in counters: assert counters["person"] == 3 def test_content_find_ctime(self, swh_storage, sample_data): origin_content = sample_data.content ctime = round_to_milliseconds(now()) content = attr.evolve(origin_content, data=None, ctime=ctime) swh_storage.content_add_metadata([content]) actually_present = swh_storage.content_find({"sha1": content.sha1}) assert actually_present[0] == content assert actually_present[0].ctime is not None assert actually_present[0].ctime.tzinfo is not None def test_content_find_with_present_content(self, swh_storage, sample_data): content = sample_data.content expected_content = attr.evolve(content, data=None) # 1. with something to find swh_storage.content_add([content]) actually_present = swh_storage.content_find({"sha1": content.sha1}) assert 1 == len(actually_present) assert actually_present[0] == expected_content # 2. with something to find actually_present = swh_storage.content_find({"sha1_git": content.sha1_git}) assert 1 == len(actually_present) assert actually_present[0] == expected_content # 3. with something to find actually_present = swh_storage.content_find({"sha256": content.sha256}) assert 1 == len(actually_present) assert actually_present[0] == expected_content # 4. with something to find actually_present = swh_storage.content_find(content.hashes()) assert 1 == len(actually_present) assert actually_present[0] == expected_content def test_content_find_with_non_present_content(self, swh_storage, sample_data): missing_content = sample_data.skipped_content # 1. with something that does not exist actually_present = swh_storage.content_find({"sha1": missing_content.sha1}) assert actually_present == [] # 2. with something that does not exist actually_present = swh_storage.content_find( {"sha1_git": missing_content.sha1_git} ) assert actually_present == [] # 3. with something that does not exist actually_present = swh_storage.content_find({"sha256": missing_content.sha256}) assert actually_present == [] def test_content_find_with_duplicate_input(self, swh_storage, sample_data): content = sample_data.content # Create fake data with colliding sha256 and blake2s256 sha1_array = bytearray(content.sha1) sha1_array[0] += 1 sha1git_array = bytearray(content.sha1_git) sha1git_array[0] += 1 duplicated_content = attr.evolve( content, sha1=bytes(sha1_array), sha1_git=bytes(sha1git_array) ) # Inject the data swh_storage.content_add([content, duplicated_content]) actual_result = swh_storage.content_find( { "blake2s256": duplicated_content.blake2s256, "sha256": duplicated_content.sha256, } ) expected_content = attr.evolve(content, data=None) expected_duplicated_content = attr.evolve(duplicated_content, data=None) for result in actual_result: assert result in [expected_content, expected_duplicated_content] def test_content_find_with_duplicate_sha256(self, swh_storage, sample_data): content = sample_data.content hashes = {} # Create fake data with colliding sha256 for hashalgo in ("sha1", "sha1_git", "blake2s256"): value = bytearray(getattr(content, hashalgo)) value[0] += 1 hashes[hashalgo] = bytes(value) duplicated_content = attr.evolve( content, sha1=hashes["sha1"], sha1_git=hashes["sha1_git"], blake2s256=hashes["blake2s256"], ) swh_storage.content_add([content, duplicated_content]) actual_result = swh_storage.content_find({"sha256": duplicated_content.sha256}) assert len(actual_result) == 2 expected_content = attr.evolve(content, data=None) expected_duplicated_content = attr.evolve(duplicated_content, data=None) for result in actual_result: assert result in [expected_content, expected_duplicated_content] # Find with both sha256 and blake2s256 actual_result = swh_storage.content_find( { "sha256": duplicated_content.sha256, "blake2s256": duplicated_content.blake2s256, } ) assert len(actual_result) == 1 assert actual_result == [expected_duplicated_content] def test_content_find_with_duplicate_blake2s256(self, swh_storage, sample_data): content = sample_data.content # Create fake data with colliding sha256 and blake2s256 sha1_array = bytearray(content.sha1) sha1_array[0] += 1 sha1git_array = bytearray(content.sha1_git) sha1git_array[0] += 1 sha256_array = bytearray(content.sha256) sha256_array[0] += 1 duplicated_content = attr.evolve( content, sha1=bytes(sha1_array), sha1_git=bytes(sha1git_array), sha256=bytes(sha256_array), ) swh_storage.content_add([content, duplicated_content]) actual_result = swh_storage.content_find( {"blake2s256": duplicated_content.blake2s256} ) expected_content = attr.evolve(content, data=None) expected_duplicated_content = attr.evolve(duplicated_content, data=None) for result in actual_result: assert result in [expected_content, expected_duplicated_content] # Find with both sha256 and blake2s256 actual_result = swh_storage.content_find( { "sha256": duplicated_content.sha256, "blake2s256": duplicated_content.blake2s256, } ) assert actual_result == [expected_duplicated_content] def test_content_find_bad_input(self, swh_storage): # 1. with no hash to lookup with pytest.raises(StorageArgumentException): swh_storage.content_find({}) # need at least one hash # 2. with bad hash with pytest.raises(StorageArgumentException): swh_storage.content_find({"unknown-sha1": "something"}) # not the right key def test_object_find_by_sha1_git(self, swh_storage, sample_data): content = sample_data.content directory = sample_data.directory revision = sample_data.revision release = sample_data.release sha1_gits = [b"00000000000000000000"] expected = { b"00000000000000000000": [], } swh_storage.content_add([content]) sha1_gits.append(content.sha1_git) expected[content.sha1_git] = [ {"sha1_git": content.sha1_git, "type": "content",} ] swh_storage.directory_add([directory]) sha1_gits.append(directory.id) expected[directory.id] = [{"sha1_git": directory.id, "type": "directory",}] swh_storage.revision_add([revision]) sha1_gits.append(revision.id) expected[revision.id] = [{"sha1_git": revision.id, "type": "revision",}] swh_storage.release_add([release]) sha1_gits.append(release.id) expected[release.id] = [{"sha1_git": release.id, "type": "release",}] ret = swh_storage.object_find_by_sha1_git(sha1_gits) assert expected == ret def test_metadata_fetcher_add_get(self, swh_storage, sample_data): fetcher = sample_data.metadata_fetcher actual_fetcher = swh_storage.metadata_fetcher_get(fetcher.name, fetcher.version) assert actual_fetcher is None # does not exist swh_storage.metadata_fetcher_add([fetcher]) res = swh_storage.metadata_fetcher_get(fetcher.name, fetcher.version) assert res == fetcher actual_objects = list(swh_storage.journal_writer.journal.objects) expected_objects = [ ("metadata_fetcher", fetcher), ] for obj in expected_objects: assert obj in actual_objects def test_metadata_fetcher_add_zero(self, swh_storage, sample_data): fetcher = sample_data.metadata_fetcher actual_fetcher = swh_storage.metadata_fetcher_get(fetcher.name, fetcher.version) assert actual_fetcher is None # does not exist swh_storage.metadata_fetcher_add([]) def test_metadata_authority_add_get(self, swh_storage, sample_data): authority = sample_data.metadata_authority actual_authority = swh_storage.metadata_authority_get( authority.type, authority.url ) assert actual_authority is None # does not exist swh_storage.metadata_authority_add([authority]) res = swh_storage.metadata_authority_get(authority.type, authority.url) assert res == authority actual_objects = list(swh_storage.journal_writer.journal.objects) expected_objects = [ ("metadata_authority", authority), ] for obj in expected_objects: assert obj in actual_objects def test_metadata_authority_add_zero(self, swh_storage, sample_data): authority = sample_data.metadata_authority actual_authority = swh_storage.metadata_authority_get( authority.type, authority.url ) assert actual_authority is None # does not exist swh_storage.metadata_authority_add([]) def test_content_metadata_add(self, swh_storage, sample_data): content = sample_data.content fetcher = sample_data.metadata_fetcher authority = sample_data.metadata_authority content_metadata = sample_data.content_metadata[:2] swh_storage.metadata_fetcher_add([fetcher]) swh_storage.metadata_authority_add([authority]) swh_storage.raw_extrinsic_metadata_add(content_metadata) result = swh_storage.raw_extrinsic_metadata_get( content.swhid().to_extended(), authority ) assert result.next_page_token is None assert list(sorted(result.results, key=lambda x: x.discovery_date,)) == list( content_metadata ) actual_objects = list(swh_storage.journal_writer.journal.objects) expected_objects = [ ("metadata_authority", authority), ("metadata_fetcher", fetcher), ] + [("raw_extrinsic_metadata", item) for item in content_metadata] for obj in expected_objects: assert obj in actual_objects def test_content_metadata_add_duplicate(self, swh_storage, sample_data): """Duplicates should be silently ignored.""" content = sample_data.content fetcher = sample_data.metadata_fetcher authority = sample_data.metadata_authority content_metadata, content_metadata2 = sample_data.content_metadata[:2] swh_storage.metadata_fetcher_add([fetcher]) swh_storage.metadata_authority_add([authority]) swh_storage.raw_extrinsic_metadata_add([content_metadata, content_metadata2]) swh_storage.raw_extrinsic_metadata_add([content_metadata2, content_metadata]) result = swh_storage.raw_extrinsic_metadata_get( content.swhid().to_extended(), authority ) assert result.next_page_token is None expected_results = (content_metadata, content_metadata2) assert ( tuple(sorted(result.results, key=lambda x: x.discovery_date,)) == expected_results ) def test_content_metadata_get(self, swh_storage, sample_data): content, content2 = sample_data.contents[:2] fetcher, fetcher2 = sample_data.fetchers[:2] authority, authority2 = sample_data.authorities[:2] ( content1_metadata1, content1_metadata2, content1_metadata3, ) = sample_data.content_metadata[:3] content2_metadata = RawExtrinsicMetadata.from_dict( { **remove_keys(content1_metadata2.to_dict(), ("id",)), # recompute id "target": str(content2.swhid()), } ) swh_storage.metadata_authority_add([authority, authority2]) swh_storage.metadata_fetcher_add([fetcher, fetcher2]) swh_storage.raw_extrinsic_metadata_add( [ content1_metadata1, content1_metadata2, content1_metadata3, content2_metadata, ] ) result = swh_storage.raw_extrinsic_metadata_get( content.swhid().to_extended(), authority ) assert result.next_page_token is None assert [content1_metadata1, content1_metadata2] == list( sorted(result.results, key=lambda x: x.discovery_date,) ) result = swh_storage.raw_extrinsic_metadata_get( content.swhid().to_extended(), authority2 ) assert result.next_page_token is None assert [content1_metadata3] == list( sorted(result.results, key=lambda x: x.discovery_date,) ) result = swh_storage.raw_extrinsic_metadata_get( content2.swhid().to_extended(), authority ) assert result.next_page_token is None assert [content2_metadata] == list(result.results,) def test_content_metadata_get_after(self, swh_storage, sample_data): content = sample_data.content fetcher = sample_data.metadata_fetcher authority = sample_data.metadata_authority content_metadata, content_metadata2 = sample_data.content_metadata[:2] swh_storage.metadata_fetcher_add([fetcher]) swh_storage.metadata_authority_add([authority]) swh_storage.raw_extrinsic_metadata_add([content_metadata, content_metadata2]) result = swh_storage.raw_extrinsic_metadata_get( content.swhid().to_extended(), authority, after=content_metadata.discovery_date - timedelta(seconds=1), ) assert result.next_page_token is None assert [content_metadata, content_metadata2] == list( sorted(result.results, key=lambda x: x.discovery_date,) ) result = swh_storage.raw_extrinsic_metadata_get( content.swhid().to_extended(), authority, after=content_metadata.discovery_date, ) assert result.next_page_token is None assert result.results == [content_metadata2] result = swh_storage.raw_extrinsic_metadata_get( content.swhid().to_extended(), authority, after=content_metadata2.discovery_date, ) assert result.next_page_token is None assert result.results == [] def test_content_metadata_get_paginate(self, swh_storage, sample_data): content = sample_data.content fetcher = sample_data.metadata_fetcher authority = sample_data.metadata_authority content_metadata, content_metadata2 = sample_data.content_metadata[:2] swh_storage.metadata_fetcher_add([fetcher]) swh_storage.metadata_authority_add([authority]) swh_storage.raw_extrinsic_metadata_add([content_metadata, content_metadata2]) swh_storage.raw_extrinsic_metadata_get(content.swhid().to_extended(), authority) result = swh_storage.raw_extrinsic_metadata_get( content.swhid().to_extended(), authority, limit=1 ) assert result.next_page_token is not None assert result.results == [content_metadata] result = swh_storage.raw_extrinsic_metadata_get( content.swhid().to_extended(), authority, limit=1, page_token=result.next_page_token, ) assert result.next_page_token is None assert result.results == [content_metadata2] def test_content_metadata_get_paginate_same_date(self, swh_storage, sample_data): content = sample_data.content fetcher1, fetcher2 = sample_data.fetchers[:2] authority = sample_data.metadata_authority content_metadata, content_metadata2 = sample_data.content_metadata[:2] swh_storage.metadata_fetcher_add([fetcher1, fetcher2]) swh_storage.metadata_authority_add([authority]) new_content_metadata2 = RawExtrinsicMetadata.from_dict( { **remove_keys(content_metadata2.to_dict(), ("id",)), # recompute id "discovery_date": content_metadata2.discovery_date, "fetcher": attr.evolve(fetcher2, metadata=None).to_dict(), } ) swh_storage.raw_extrinsic_metadata_add( [content_metadata, new_content_metadata2] ) result = swh_storage.raw_extrinsic_metadata_get( content.swhid().to_extended(), authority, limit=1 ) assert result.next_page_token is not None assert result.results == [content_metadata] result = swh_storage.raw_extrinsic_metadata_get( content.swhid().to_extended(), authority, limit=1, page_token=result.next_page_token, ) assert result.next_page_token is None assert result.results[0].to_dict() == new_content_metadata2.to_dict() assert result.results == [new_content_metadata2] def test_content_metadata_get_by_ids(self, swh_storage, sample_data): content, content2 = sample_data.contents[:2] fetcher, fetcher2 = sample_data.fetchers[:2] authority, authority2 = sample_data.authorities[:2] ( content1_metadata1, content1_metadata2, content1_metadata3, ) = sample_data.content_metadata[:3] content2_metadata = RawExtrinsicMetadata.from_dict( { **remove_keys(content1_metadata2.to_dict(), ("id",)), # recompute id "target": str(content2.swhid()), } ) swh_storage.metadata_authority_add([authority, authority2]) swh_storage.metadata_fetcher_add([fetcher, fetcher2]) swh_storage.raw_extrinsic_metadata_add( [ content1_metadata1, content1_metadata2, content1_metadata3, content2_metadata, ] ) assert set( swh_storage.raw_extrinsic_metadata_get_by_ids( [content1_metadata1.id, b"\x00" * 20, content2_metadata.id] ) ) == {content1_metadata1, content2_metadata} def test_content_metadata_get_authorities(self, swh_storage, sample_data): content1, content2, content3 = sample_data.contents[:3] fetcher, fetcher2 = sample_data.fetchers[:2] authority, authority2 = sample_data.authorities[:2] ( content1_metadata1, content1_metadata2, content1_metadata3, ) = sample_data.content_metadata[:3] content2_metadata = RawExtrinsicMetadata.from_dict( { **remove_keys(content1_metadata2.to_dict(), ("id",)), # recompute id "target": str(content2.swhid()), } ) content1_metadata2 = RawExtrinsicMetadata.from_dict( { **remove_keys(content1_metadata2.to_dict(), ("id",)), # recompute id "authority": authority2.to_dict(), } ) swh_storage.metadata_authority_add([authority, authority2]) swh_storage.metadata_fetcher_add([fetcher, fetcher2]) swh_storage.raw_extrinsic_metadata_add( [ content1_metadata1, content1_metadata2, content1_metadata3, content2_metadata, ] ) assert swh_storage.raw_extrinsic_metadata_get_authorities(content1.swhid()) in ( [authority, authority2], [authority2, authority], ) assert swh_storage.raw_extrinsic_metadata_get_authorities(content2.swhid()) == [ authority ] assert ( swh_storage.raw_extrinsic_metadata_get_authorities(content3.swhid()) == [] ) def test_origin_metadata_add(self, swh_storage, sample_data): origin = sample_data.origin fetcher = sample_data.metadata_fetcher authority = sample_data.metadata_authority origin_metadata, origin_metadata2 = sample_data.origin_metadata[:2] assert swh_storage.origin_add([origin]) == {"origin:add": 1} swh_storage.metadata_fetcher_add([fetcher]) swh_storage.metadata_authority_add([authority]) swh_storage.raw_extrinsic_metadata_add([origin_metadata, origin_metadata2]) result = swh_storage.raw_extrinsic_metadata_get( Origin(origin.url).swhid(), authority ) assert result.next_page_token is None assert list(sorted(result.results, key=lambda x: x.discovery_date)) == [ origin_metadata, origin_metadata2, ] actual_objects = list(swh_storage.journal_writer.journal.objects) expected_objects = [ ("metadata_authority", authority), ("metadata_fetcher", fetcher), ("raw_extrinsic_metadata", origin_metadata), ("raw_extrinsic_metadata", origin_metadata2), ] for obj in expected_objects: assert obj in actual_objects def test_origin_metadata_add_duplicate(self, swh_storage, sample_data): """Duplicates should be silently updated.""" origin = sample_data.origin fetcher = sample_data.metadata_fetcher authority = sample_data.metadata_authority origin_metadata, origin_metadata2 = sample_data.origin_metadata[:2] assert swh_storage.origin_add([origin]) == {"origin:add": 1} swh_storage.metadata_fetcher_add([fetcher]) swh_storage.metadata_authority_add([authority]) swh_storage.raw_extrinsic_metadata_add([origin_metadata, origin_metadata2]) swh_storage.raw_extrinsic_metadata_add([origin_metadata2, origin_metadata]) result = swh_storage.raw_extrinsic_metadata_get( Origin(origin.url).swhid(), authority ) assert result.next_page_token is None # which of the two behavior happens is backend-specific. expected_results = (origin_metadata, origin_metadata2) assert ( tuple(sorted(result.results, key=lambda x: x.discovery_date,)) == expected_results ) def test_origin_metadata_get(self, swh_storage, sample_data): origin, origin2 = sample_data.origins[:2] fetcher, fetcher2 = sample_data.fetchers[:2] authority, authority2 = sample_data.authorities[:2] ( origin1_metadata1, origin1_metadata2, origin1_metadata3, ) = sample_data.origin_metadata[:3] assert swh_storage.origin_add([origin, origin2]) == {"origin:add": 2} origin2_metadata = RawExtrinsicMetadata.from_dict( { **remove_keys(origin1_metadata2.to_dict(), ("id",)), # recompute id "target": str(Origin(origin2.url).swhid()), } ) swh_storage.metadata_authority_add([authority, authority2]) swh_storage.metadata_fetcher_add([fetcher, fetcher2]) swh_storage.raw_extrinsic_metadata_add( [origin1_metadata1, origin1_metadata2, origin1_metadata3, origin2_metadata] ) result = swh_storage.raw_extrinsic_metadata_get( Origin(origin.url).swhid(), authority ) assert result.next_page_token is None assert [origin1_metadata1, origin1_metadata2] == list( sorted(result.results, key=lambda x: x.discovery_date,) ) result = swh_storage.raw_extrinsic_metadata_get( Origin(origin.url).swhid(), authority2 ) assert result.next_page_token is None assert [origin1_metadata3] == list( sorted(result.results, key=lambda x: x.discovery_date,) ) result = swh_storage.raw_extrinsic_metadata_get( Origin(origin2.url).swhid(), authority ) assert result.next_page_token is None assert [origin2_metadata] == list(result.results,) def test_origin_metadata_get_after(self, swh_storage, sample_data): origin = sample_data.origin fetcher = sample_data.metadata_fetcher authority = sample_data.metadata_authority origin_metadata, origin_metadata2 = sample_data.origin_metadata[:2] assert swh_storage.origin_add([origin]) == {"origin:add": 1} swh_storage.metadata_fetcher_add([fetcher]) swh_storage.metadata_authority_add([authority]) swh_storage.raw_extrinsic_metadata_add([origin_metadata, origin_metadata2]) result = swh_storage.raw_extrinsic_metadata_get( Origin(origin.url).swhid(), authority, after=origin_metadata.discovery_date - timedelta(seconds=1), ) assert result.next_page_token is None assert list(sorted(result.results, key=lambda x: x.discovery_date,)) == [ origin_metadata, origin_metadata2, ] result = swh_storage.raw_extrinsic_metadata_get( Origin(origin.url).swhid(), authority, after=origin_metadata.discovery_date, ) assert result.next_page_token is None assert result.results == [origin_metadata2] result = swh_storage.raw_extrinsic_metadata_get( Origin(origin.url).swhid(), authority, after=origin_metadata2.discovery_date, ) assert result.next_page_token is None assert result.results == [] def test_origin_metadata_get_paginate(self, swh_storage, sample_data): origin = sample_data.origin fetcher = sample_data.metadata_fetcher authority = sample_data.metadata_authority origin_metadata, origin_metadata2 = sample_data.origin_metadata[:2] assert swh_storage.origin_add([origin]) == {"origin:add": 1} swh_storage.metadata_fetcher_add([fetcher]) swh_storage.metadata_authority_add([authority]) swh_storage.raw_extrinsic_metadata_add([origin_metadata, origin_metadata2]) swh_storage.raw_extrinsic_metadata_get(Origin(origin.url).swhid(), authority) result = swh_storage.raw_extrinsic_metadata_get( Origin(origin.url).swhid(), authority, limit=1 ) assert result.next_page_token is not None assert result.results == [origin_metadata] result = swh_storage.raw_extrinsic_metadata_get( Origin(origin.url).swhid(), authority, limit=1, page_token=result.next_page_token, ) assert result.next_page_token is None assert result.results == [origin_metadata2] def test_origin_metadata_get_paginate_same_date(self, swh_storage, sample_data): origin = sample_data.origin fetcher1, fetcher2 = sample_data.fetchers[:2] authority = sample_data.metadata_authority origin_metadata, origin_metadata2 = sample_data.origin_metadata[:2] assert swh_storage.origin_add([origin]) == {"origin:add": 1} swh_storage.metadata_fetcher_add([fetcher1, fetcher2]) swh_storage.metadata_authority_add([authority]) new_origin_metadata2 = RawExtrinsicMetadata.from_dict( { **remove_keys(origin_metadata2.to_dict(), ("id",)), # recompute id "discovery_date": origin_metadata2.discovery_date, "fetcher": attr.evolve(fetcher2, metadata=None).to_dict(), } ) swh_storage.raw_extrinsic_metadata_add([origin_metadata, new_origin_metadata2]) result = swh_storage.raw_extrinsic_metadata_get( Origin(origin.url).swhid(), authority, limit=1 ) assert result.next_page_token is not None assert result.results == [origin_metadata] result = swh_storage.raw_extrinsic_metadata_get( Origin(origin.url).swhid(), authority, limit=1, page_token=result.next_page_token, ) assert result.next_page_token is None assert result.results == [new_origin_metadata2] def test_origin_metadata_add_missing_authority(self, swh_storage, sample_data): origin = sample_data.origin fetcher = sample_data.metadata_fetcher origin_metadata, origin_metadata2 = sample_data.origin_metadata[:2] assert swh_storage.origin_add([origin]) == {"origin:add": 1} swh_storage.metadata_fetcher_add([fetcher]) with pytest.raises(StorageArgumentException, match="authority"): swh_storage.raw_extrinsic_metadata_add([origin_metadata, origin_metadata2]) def test_origin_metadata_add_missing_fetcher(self, swh_storage, sample_data): origin = sample_data.origin authority = sample_data.metadata_authority origin_metadata, origin_metadata2 = sample_data.origin_metadata[:2] assert swh_storage.origin_add([origin]) == {"origin:add": 1} swh_storage.metadata_authority_add([authority]) with pytest.raises(StorageArgumentException, match="fetcher"): swh_storage.raw_extrinsic_metadata_add([origin_metadata, origin_metadata2]) class TestStorageGeneratedData: def test_generate_content_get_data(self, swh_storage, swh_contents): contents_with_data = [c for c in swh_contents if c.status != "absent"] # retrieve contents for content in contents_with_data: actual_content_data = swh_storage.content_get_data(content.sha1) assert actual_content_data is not None assert actual_content_data == content.data def test_generate_content_get(self, swh_storage, swh_contents): expected_contents = [ attr.evolve(c, data=None) for c in swh_contents if c.status != "absent" ] actual_contents = swh_storage.content_get([c.sha1 for c in expected_contents]) assert len(actual_contents) == len(expected_contents) assert actual_contents == expected_contents @pytest.mark.parametrize("limit", [1, 7, 10, 100, 1000]) def test_origin_list(self, swh_storage, swh_origins, limit): returned_origins = [] page_token = None i = 0 while True: actual_page = swh_storage.origin_list(page_token=page_token, limit=limit) assert len(actual_page.results) <= limit returned_origins.extend(actual_page.results) i += 1 page_token = actual_page.next_page_token if page_token is None: assert i * limit >= len(swh_origins) break else: assert len(actual_page.results) == limit assert sorted(returned_origins) == sorted(swh_origins) def test_origin_count(self, swh_storage, sample_data): swh_storage.origin_add(sample_data.origins) assert swh_storage.origin_count("github") == 3 assert swh_storage.origin_count("gitlab") == 2 assert swh_storage.origin_count(".*user.*", regexp=True) == 5 assert swh_storage.origin_count(".*user.*", regexp=False) == 0 assert swh_storage.origin_count(".*user1.*", regexp=True) == 2 assert swh_storage.origin_count(".*user1.*", regexp=False) == 0 def test_origin_count_with_visit_no_visits(self, swh_storage, sample_data): swh_storage.origin_add(sample_data.origins) # none of them have visits, so with_visit=True => 0 assert swh_storage.origin_count("github", with_visit=True) == 0 assert swh_storage.origin_count("gitlab", with_visit=True) == 0 assert swh_storage.origin_count(".*user.*", regexp=True, with_visit=True) == 0 assert swh_storage.origin_count(".*user.*", regexp=False, with_visit=True) == 0 assert swh_storage.origin_count(".*user1.*", regexp=True, with_visit=True) == 0 assert swh_storage.origin_count(".*user1.*", regexp=False, with_visit=True) == 0 def test_origin_count_with_visit_with_visits_no_snapshot( self, swh_storage, sample_data ): swh_storage.origin_add(sample_data.origins) origin_url = "https://github.com/user1/repo1" visit = OriginVisit(origin=origin_url, date=now(), type="git",) swh_storage.origin_visit_add([visit]) assert swh_storage.origin_count("github", with_visit=False) == 3 # it has a visit, but no snapshot, so with_visit=True => 0 assert swh_storage.origin_count("github", with_visit=True) == 0 assert swh_storage.origin_count("gitlab", with_visit=False) == 2 # these gitlab origins have no visit assert swh_storage.origin_count("gitlab", with_visit=True) == 0 assert ( swh_storage.origin_count("github.*user1", regexp=True, with_visit=False) == 1 ) assert ( swh_storage.origin_count("github.*user1", regexp=True, with_visit=True) == 0 ) assert swh_storage.origin_count("github", regexp=True, with_visit=True) == 0 def test_origin_count_with_visit_with_visits_and_snapshot( self, swh_storage, sample_data ): snapshot = sample_data.snapshot swh_storage.origin_add(sample_data.origins) swh_storage.snapshot_add([snapshot]) origin_url = "https://github.com/user1/repo1" visit = OriginVisit(origin=origin_url, date=now(), type="git",) visit = swh_storage.origin_visit_add([visit])[0] swh_storage.origin_visit_status_add( [ OriginVisitStatus( origin=origin_url, visit=visit.visit, date=now(), status="ongoing", snapshot=snapshot.id, ) ] ) assert swh_storage.origin_count("github", with_visit=False) == 3 # github/user1 has a visit and a snapshot, so with_visit=True => 1 assert swh_storage.origin_count("github", with_visit=True) == 1 assert ( swh_storage.origin_count("github.*user1", regexp=True, with_visit=False) == 1 ) assert ( swh_storage.origin_count("github.*user1", regexp=True, with_visit=True) == 1 ) assert swh_storage.origin_count("github", regexp=True, with_visit=True) == 1 @settings( suppress_health_check=[HealthCheck.too_slow, HealthCheck.data_too_large] + function_scoped_fixture_check, ) @given( strategies.lists(hypothesis_strategies.objects(split_content=True), max_size=2) ) def test_add_arbitrary(self, swh_storage, objects): for (obj_type, obj) in objects: if obj.object_type == "origin_visit": swh_storage.origin_add([Origin(url=obj.origin)]) visit = OriginVisit(origin=obj.origin, date=obj.date, type=obj.type,) swh_storage.origin_visit_add([visit]) elif obj.object_type == "raw_extrinsic_metadata": swh_storage.metadata_authority_add([obj.authority]) swh_storage.metadata_fetcher_add([obj.fetcher]) swh_storage.raw_extrinsic_metadata_add([obj]) else: method = getattr(swh_storage, obj_type + "_add") try: method([obj]) except HashCollision: pass diff --git a/swh/storage/tests/test_backfill.py b/swh/storage/tests/test_backfill.py index a9037838..fc0c0f51 100644 --- a/swh/storage/tests/test_backfill.py +++ b/swh/storage/tests/test_backfill.py @@ -1,303 +1,301 @@ # Copyright (C) 2019-2021 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import functools import logging from unittest.mock import patch import pytest from swh.journal.client import JournalClient from swh.model.tests.swh_model_data import TEST_OBJECTS from swh.storage import get_storage from swh.storage.backfill import ( PARTITION_KEY, JournalBackfiller, byte_ranges, compute_query, raw_extrinsic_metadata_target_ranges, ) from swh.storage.in_memory import InMemoryStorage from swh.storage.replay import ModelObjectDeserializer, process_replay_objects from swh.storage.tests.test_replay import check_replayed TEST_CONFIG = { "journal_writer": { "brokers": ["localhost"], "prefix": "swh.tmp_journal.new", "client_id": "swh.journal.client.test", }, "storage": {"cls": "postgresql", "db": "service=swh-dev"}, } def test_config_ko_missing_mandatory_key(): """Missing configuration key will make the initialization fail """ for key in TEST_CONFIG.keys(): config = TEST_CONFIG.copy() config.pop(key) with pytest.raises(ValueError) as e: JournalBackfiller(config) error = "Configuration error: The following keys must be provided: %s" % ( ",".join([key]), ) assert e.value.args[0] == error def test_config_ko_unknown_object_type(): """Parse arguments will fail if the object type is unknown """ backfiller = JournalBackfiller(TEST_CONFIG) with pytest.raises(ValueError) as e: backfiller.parse_arguments("unknown-object-type", 1, 2) error = ( "Object type unknown-object-type is not supported. " "The only possible values are %s" % (", ".join(sorted(PARTITION_KEY))) ) assert e.value.args[0] == error def test_compute_query_content(): query, where_args, column_aliases = compute_query("content", "\x000000", "\x000001") assert where_args == ["\x000000", "\x000001"] assert column_aliases == [ "sha1", "sha1_git", "sha256", "blake2s256", "length", "status", "ctime", ] assert ( query == """ select sha1,sha1_git,sha256,blake2s256,length,status,ctime from content where (sha1) >= %s and (sha1) < %s """ ) def test_compute_query_skipped_content(): query, where_args, column_aliases = compute_query("skipped_content", None, None) assert where_args == [] assert column_aliases == [ "sha1", "sha1_git", "sha256", "blake2s256", "length", "ctime", "status", "reason", ] assert ( query == """ select sha1,sha1_git,sha256,blake2s256,length,ctime,status,reason from skipped_content """ ) def test_compute_query_origin_visit(): query, where_args, column_aliases = compute_query("origin_visit", 1, 10) assert where_args == [1, 10] assert column_aliases == [ "visit", "type", "origin", "date", ] assert ( query == """ select visit,type,origin.url as origin,date from origin_visit left join origin on origin_visit.origin=origin.id where (origin_visit.origin) >= %s and (origin_visit.origin) < %s """ ) def test_compute_query_release(): query, where_args, column_aliases = compute_query("release", "\x000002", "\x000003") assert where_args == ["\x000002", "\x000003"] assert column_aliases == [ "id", "date", - "date_offset", - "date_neg_utc_offset", "date_offset_bytes", "comment", "name", "synthetic", "target", "target_type", "author_id", "author_name", "author_email", "author_fullname", "raw_manifest", ] assert ( query == """ -select release.id as id,date,date_offset,date_neg_utc_offset,date_offset_bytes,comment,release.name as name,synthetic,target,target_type,a.id as author_id,a.name as author_name,a.email as author_email,a.fullname as author_fullname,raw_manifest +select release.id as id,date,date_offset_bytes,comment,release.name as name,synthetic,target,target_type,a.id as author_id,a.name as author_name,a.email as author_email,a.fullname as author_fullname,raw_manifest from release left join person a on release.author=a.id where (release.id) >= %s and (release.id) < %s """ # noqa ) @pytest.mark.parametrize("numbits", [2, 3, 8, 16]) def test_byte_ranges(numbits): ranges = list(byte_ranges(numbits)) assert len(ranges) == 2 ** numbits assert ranges[0][0] is None assert ranges[-1][1] is None bounds = [] for i, (left, right) in enumerate(zip(ranges[:-1], ranges[1:])): assert left[1] == right[0], f"Mismatched bounds in {i}th range" bounds.append(left[1]) assert bounds == sorted(bounds) def test_raw_extrinsic_metadata_target_ranges(): ranges = list(raw_extrinsic_metadata_target_ranges()) assert ranges[0][0] == "" assert ranges[-1][1] is None bounds = [] for i, (left, right) in enumerate(zip(ranges[:-1], ranges[1:])): assert left[1] == right[0], f"Mismatched bounds in {i}th range" bounds.append(left[1]) assert bounds == sorted(bounds) RANGE_GENERATORS = { "content": lambda start, end: [(None, None)], "skipped_content": lambda start, end: [(None, None)], "directory": lambda start, end: [(None, None)], "extid": lambda start, end: [(None, None)], "metadata_authority": lambda start, end: [(None, None)], "metadata_fetcher": lambda start, end: [(None, None)], "revision": lambda start, end: [(None, None)], "release": lambda start, end: [(None, None)], "snapshot": lambda start, end: [(None, None)], "origin": lambda start, end: [(None, 10000)], "origin_visit": lambda start, end: [(None, 10000)], "origin_visit_status": lambda start, end: [(None, 10000)], "raw_extrinsic_metadata": lambda start, end: [(None, None)], } @patch("swh.storage.backfill.RANGE_GENERATORS", RANGE_GENERATORS) def test_backfiller( swh_storage_backend_config, kafka_prefix: str, kafka_consumer_group: str, kafka_server: str, caplog, ): prefix1 = f"{kafka_prefix}-1" prefix2 = f"{kafka_prefix}-2" journal1 = { "cls": "kafka", "brokers": [kafka_server], "client_id": "kafka_writer-1", "prefix": prefix1, } swh_storage_backend_config["journal_writer"] = journal1 storage = get_storage(**swh_storage_backend_config) # fill the storage and the journal (under prefix1) for object_type, objects in TEST_OBJECTS.items(): method = getattr(storage, object_type + "_add") method(objects) # now apply the backfiller on the storage to fill the journal under prefix2 backfiller_config = { "journal_writer": { "brokers": [kafka_server], "client_id": "kafka_writer-2", "prefix": prefix2, }, "storage": swh_storage_backend_config, } # Backfilling backfiller = JournalBackfiller(backfiller_config) for object_type in TEST_OBJECTS: backfiller.run(object_type, None, None) # Trace log messages for unhandled object types in the replayer caplog.set_level(logging.DEBUG, "swh.storage.replay") # now check journal content are the same under both topics # use the replayer scaffolding to fill storages to make is a bit easier # Replaying #1 deserializer = ModelObjectDeserializer() sto1 = get_storage(cls="memory") replayer1 = JournalClient( brokers=kafka_server, group_id=f"{kafka_consumer_group}-1", prefix=prefix1, stop_on_eof=True, value_deserializer=deserializer.convert, ) worker_fn1 = functools.partial(process_replay_objects, storage=sto1) replayer1.process(worker_fn1) # Replaying #2 sto2 = get_storage(cls="memory") replayer2 = JournalClient( brokers=kafka_server, group_id=f"{kafka_consumer_group}-2", prefix=prefix2, stop_on_eof=True, value_deserializer=deserializer.convert, ) worker_fn2 = functools.partial(process_replay_objects, storage=sto2) replayer2.process(worker_fn2) # Compare storages assert isinstance(sto1, InMemoryStorage) # needed to help mypy assert isinstance(sto2, InMemoryStorage) check_replayed(sto1, sto2) for record in caplog.records: assert ( "this should not happen" not in record.message ), "Replayer ignored some message types, see captured logging" diff --git a/swh/storage/tests/test_postgresql_converters.py b/swh/storage/tests/test_postgresql_converters.py index 5f6d4937..0a52874c 100644 --- a/swh/storage/tests/test_postgresql_converters.py +++ b/swh/storage/tests/test_postgresql_converters.py @@ -1,335 +1,307 @@ # Copyright (C) 2015-2021 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime import pytest from swh.model.model import ( ObjectType, Person, Release, Revision, RevisionType, Timestamp, TimestampWithTimezone, ) from swh.model.swhids import ExtendedSWHID from swh.storage.postgresql import converters @pytest.mark.parametrize( "model_date,db_date", [ ( None, { "timestamp": None, "offset": 0, "neg_utc_offset": None, "offset_bytes": None, }, ), ( TimestampWithTimezone( timestamp=Timestamp(seconds=1234567890, microseconds=0,), - offset=120, - negative_utc=False, offset_bytes=b"+0200", ), { "timestamp": "2009-02-13T23:31:30+00:00", "offset": 120, "neg_utc_offset": False, "offset_bytes": b"+0200", }, ), ( TimestampWithTimezone( timestamp=Timestamp(seconds=1123456789, microseconds=0,), - offset=0, - negative_utc=True, offset_bytes=b"-0000", ), { "timestamp": "2005-08-07T23:19:49+00:00", "offset": 0, "neg_utc_offset": True, "offset_bytes": b"-0000", }, ), ( TimestampWithTimezone( timestamp=Timestamp(seconds=1234567890, microseconds=0,), - offset=42, - negative_utc=False, offset_bytes=b"+0042", ), { "timestamp": "2009-02-13T23:31:30+00:00", "offset": 42, "neg_utc_offset": False, "offset_bytes": b"+0042", }, ), ( TimestampWithTimezone( timestamp=Timestamp(seconds=1634366813, microseconds=0,), - offset=-120, - negative_utc=False, offset_bytes=b"-0200", ), { "timestamp": "2021-10-16T06:46:53+00:00", "offset": -120, "neg_utc_offset": False, "offset_bytes": b"-0200", }, ), ( TimestampWithTimezone( - timestamp=Timestamp(seconds=0, microseconds=0,), - offset=-120, - negative_utc=False, - offset_bytes=b"-0200", + timestamp=Timestamp(seconds=0, microseconds=0,), offset_bytes=b"-0200", ), { "timestamp": "1970-01-01T00:00:00+00:00", "offset": -120, "neg_utc_offset": False, "offset_bytes": b"-0200", }, ), ( TimestampWithTimezone( - timestamp=Timestamp(seconds=0, microseconds=1,), - offset=-120, - negative_utc=False, - offset_bytes=b"-0200", + timestamp=Timestamp(seconds=0, microseconds=1,), offset_bytes=b"-0200", ), { "timestamp": "1970-01-01T00:00:00.000001+00:00", "offset": -120, "neg_utc_offset": False, "offset_bytes": b"-0200", }, ), ( TimestampWithTimezone( - timestamp=Timestamp(seconds=-1, microseconds=0,), - offset=-120, - negative_utc=False, - offset_bytes=b"-0200", + timestamp=Timestamp(seconds=-1, microseconds=0,), offset_bytes=b"-0200", ), { "timestamp": "1969-12-31T23:59:59+00:00", "offset": -120, "neg_utc_offset": False, "offset_bytes": b"-0200", }, ), ( TimestampWithTimezone( - timestamp=Timestamp(seconds=-1, microseconds=1,), - offset=-120, - negative_utc=False, - offset_bytes=b"-0200", + timestamp=Timestamp(seconds=-1, microseconds=1,), offset_bytes=b"-0200", ), { "timestamp": "1969-12-31T23:59:59.000001+00:00", "offset": -120, "neg_utc_offset": False, "offset_bytes": b"-0200", }, ), ( TimestampWithTimezone( timestamp=Timestamp(seconds=-3600, microseconds=0,), - offset=-120, - negative_utc=False, offset_bytes=b"-0200", ), { "timestamp": "1969-12-31T23:00:00+00:00", "offset": -120, "neg_utc_offset": False, "offset_bytes": b"-0200", }, ), ( TimestampWithTimezone( timestamp=Timestamp(seconds=-3600, microseconds=1,), - offset=-120, - negative_utc=False, offset_bytes=b"-0200", ), { "timestamp": "1969-12-31T23:00:00.000001+00:00", "offset": -120, "neg_utc_offset": False, "offset_bytes": b"-0200", }, ), ( TimestampWithTimezone( timestamp=Timestamp(seconds=1234567890, microseconds=0,), - offset=120, - negative_utc=False, offset_bytes=b"+200", ), { "timestamp": "2009-02-13T23:31:30+00:00", "offset": 120, "neg_utc_offset": False, "offset_bytes": b"+200", }, ), ], ) def test_date(model_date, db_date): assert converters.date_to_db(model_date) == db_date assert ( converters.db_to_date( date=None if db_date["timestamp"] is None else datetime.datetime.fromisoformat(db_date["timestamp"]), - offset=db_date["offset"], - neg_utc_offset=db_date["neg_utc_offset"], offset_bytes=db_date["offset_bytes"], ) == model_date ) def test_db_to_author(): # when actual_author = converters.db_to_author(b"fullname", b"name", b"email") # then assert actual_author == Person(fullname=b"fullname", name=b"name", email=b"email",) def test_db_to_author_none(): # when actual_author = converters.db_to_author(None, None, None) # then assert actual_author is None def test_db_to_revision(): # when actual_revision = converters.db_to_revision( { "id": b"revision-id", "date": None, "date_offset": None, "date_neg_utc_offset": None, "date_offset_bytes": None, "committer_date": None, "committer_date_offset": None, "committer_date_neg_utc_offset": None, "committer_date_offset_bytes": None, "type": "git", "directory": b"dir-sha1", "message": b"commit message", "author_fullname": b"auth-fullname", "author_name": b"auth-name", "author_email": b"auth-email", "committer_fullname": b"comm-fullname", "committer_name": b"comm-name", "committer_email": b"comm-email", "metadata": {}, "synthetic": False, "extra_headers": (), "raw_manifest": None, "parents": [b"123", b"456"], } ) # then assert actual_revision == Revision( id=b"revision-id", author=Person( fullname=b"auth-fullname", name=b"auth-name", email=b"auth-email", ), date=None, committer=Person( fullname=b"comm-fullname", name=b"comm-name", email=b"comm-email", ), committer_date=None, type=RevisionType.GIT, directory=b"dir-sha1", message=b"commit message", metadata={}, synthetic=False, extra_headers=(), parents=(b"123", b"456"), ) def test_db_to_release(): # when actual_release = converters.db_to_release( { "id": b"release-id", "target": b"revision-id", "target_type": "revision", "date": None, "date_offset": None, "date_neg_utc_offset": None, "date_offset_bytes": None, "name": b"release-name", "comment": b"release comment", "synthetic": True, "author_fullname": b"auth-fullname", "author_name": b"auth-name", "author_email": b"auth-email", "raw_manifest": None, } ) # then assert actual_release == Release( author=Person( fullname=b"auth-fullname", name=b"auth-name", email=b"auth-email", ), date=None, id=b"release-id", name=b"release-name", message=b"release comment", synthetic=True, target=b"revision-id", target_type=ObjectType.REVISION, ) def test_db_to_raw_extrinsic_metadata_raw_target(): row = { "raw_extrinsic_metadata.target": "https://example.com/origin", "metadata_authority.type": "forge", "metadata_authority.url": "https://example.com", "metadata_fetcher.name": "swh.lister", "metadata_fetcher.version": "1.0.0", "discovery_date": datetime.datetime( 2021, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc ), "format": "text/plain", "raw_extrinsic_metadata.metadata": b"metadata", "origin": None, "visit": None, "snapshot": None, "release": None, "revision": None, "path": None, "directory": None, } with pytest.deprecated_call(): computed_rem = converters.db_to_raw_extrinsic_metadata(row) assert computed_rem.target == ExtendedSWHID.from_string( "swh:1:ori:5a7439b0b93a5d230b6a67b8e7e0f7dc3c9f6c70" )