diff --git a/swh/model/identifiers.py b/swh/model/identifiers.py index 274cb35..98843a5 100644 --- a/swh/model/identifiers.py +++ b/swh/model/identifiers.py @@ -1,877 +1,878 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import binascii import datetime from functools import lru_cache import hashlib import re from typing import Any, Dict, Iterable, List, Optional, Tuple, Union import attr from .collections import ImmutableDict from .exceptions import ValidationError from .fields.hashes import validate_sha1 from .hashutil import MultiHash, hash_git_data, hash_to_hex ORIGIN = "origin" SNAPSHOT = "snapshot" REVISION = "revision" RELEASE = "release" DIRECTORY = "directory" CONTENT = "content" SWHID_NAMESPACE = "swh" SWHID_VERSION = 1 SWHID_TYPES = ["ori", "snp", "rel", "rev", "dir", "cnt"] SWHID_SEP = ":" SWHID_CTXT_SEP = ";" +SWHID_QUALIFIERS = {"origin", "anchor", "visit", "path", "lines"} + +SWHID_RE_RAW = ( + f"(?P{SWHID_NAMESPACE})" + f"{SWHID_SEP}(?P{SWHID_VERSION})" + f"{SWHID_SEP}(?P{'|'.join(SWHID_TYPES)})" + f"{SWHID_SEP}(?P[0-9a-f]{{40}})" + f"({SWHID_CTXT_SEP}(?P\\S+))?" +) +SWHID_RE = re.compile(SWHID_RE_RAW) @lru_cache() def identifier_to_bytes(identifier): """Convert a text identifier to bytes. Args: identifier: an identifier, either a 40-char hexadecimal string or a bytes object of length 20 Returns: The length 20 bytestring corresponding to the given identifier Raises: ValueError: if the identifier is of an unexpected type or length. """ if isinstance(identifier, bytes): if len(identifier) != 20: raise ValueError( "Wrong length for bytes identifier %s, expected 20" % len(identifier) ) return identifier if isinstance(identifier, str): if len(identifier) != 40: raise ValueError( "Wrong length for str identifier %s, expected 40" % len(identifier) ) return bytes.fromhex(identifier) raise ValueError( "Wrong type for identifier %s, expected bytes or str" % identifier.__class__.__name__ ) @lru_cache() def identifier_to_str(identifier): """Convert an identifier to an hexadecimal string. Args: identifier: an identifier, either a 40-char hexadecimal string or a bytes object of length 20 Returns: The length 40 string corresponding to the given identifier, hex encoded Raises: ValueError: if the identifier is of an unexpected type or length. """ if isinstance(identifier, str): if len(identifier) != 40: raise ValueError( "Wrong length for str identifier %s, expected 40" % len(identifier) ) return identifier if isinstance(identifier, bytes): if len(identifier) != 20: raise ValueError( "Wrong length for bytes identifier %s, expected 20" % len(identifier) ) return binascii.hexlify(identifier).decode() raise ValueError( "Wrong type for identifier %s, expected bytes or str" % identifier.__class__.__name__ ) def content_identifier(content): """Return the intrinsic identifier for a content. A content's identifier is the sha1, sha1_git and sha256 checksums of its data. Args: content: a content conforming to the Software Heritage schema Returns: A dictionary with all the hashes for the data Raises: KeyError: if the content doesn't have a data member. """ return MultiHash.from_data(content["data"]).digest() def directory_entry_sort_key(entry): """The sorting key for tree entries""" if entry["type"] == "dir": return entry["name"] + b"/" else: return entry["name"] @lru_cache() def _perms_to_bytes(perms): """Convert the perms value to its bytes representation""" oc = oct(perms)[2:] return oc.encode("ascii") def escape_newlines(snippet): """Escape the newlines present in snippet according to git rules. New lines in git manifests are escaped by indenting the next line by one space. """ if b"\n" in snippet: return b"\n ".join(snippet.split(b"\n")) else: return snippet def directory_identifier(directory): """Return the intrinsic identifier for a directory. A directory's identifier is the tree sha1 à la git of a directory listing, using the following algorithm, which is equivalent to the git algorithm for trees: 1. Entries of the directory are sorted using the name (or the name with '/' appended for directory entries) as key, in bytes order. 2. For each entry of the directory, the following bytes are output: - the octal representation of the permissions for the entry (stored in the 'perms' member), which is a representation of the entry type: - b'100644' (int 33188) for files - b'100755' (int 33261) for executable files - b'120000' (int 40960) for symbolic links - b'40000' (int 16384) for directories - b'160000' (int 57344) for references to revisions - an ascii space (b'\x20') - the entry's name (as raw bytes), stored in the 'name' member - a null byte (b'\x00') - the 20 byte long identifier of the object pointed at by the entry, stored in the 'target' member: - for files or executable files: their blob sha1_git - for symbolic links: the blob sha1_git of a file containing the link destination - for directories: their intrinsic identifier - for revisions: their intrinsic identifier (Note that there is no separator between entries) """ components = [] for entry in sorted(directory["entries"], key=directory_entry_sort_key): components.extend( [ _perms_to_bytes(entry["perms"]), b"\x20", entry["name"], b"\x00", identifier_to_bytes(entry["target"]), ] ) return identifier_to_str(hash_git_data(b"".join(components), "tree")) def format_date(date): """Convert a date object into an UTC timestamp encoded as ascii bytes. Git stores timestamps as an integer number of seconds since the UNIX epoch. However, Software Heritage stores timestamps as an integer number of microseconds (postgres type "datetime with timezone"). Therefore, we print timestamps with no microseconds as integers, and timestamps with microseconds as floating point values. We elide the trailing zeroes from microsecond values, to "future-proof" our representation if we ever need more precision in timestamps. """ if not isinstance(date, dict): raise ValueError("format_date only supports dicts, %r received" % date) seconds = date.get("seconds", 0) microseconds = date.get("microseconds", 0) if not microseconds: return str(seconds).encode() else: float_value = "%d.%06d" % (seconds, microseconds) return float_value.rstrip("0").encode() @lru_cache() def format_offset(offset, negative_utc=None): """Convert an integer number of minutes into an offset representation. The offset representation is [+-]hhmm where: - hh is the number of hours; - mm is the number of minutes. A null offset is represented as +0000. """ if offset < 0 or offset == 0 and negative_utc: sign = "-" else: sign = "+" hours = abs(offset) // 60 minutes = abs(offset) % 60 t = "%s%02d%02d" % (sign, hours, minutes) return t.encode() def normalize_timestamp(time_representation): """Normalize a time representation for processing by Software Heritage This function supports a numeric timestamp (representing a number of seconds since the UNIX epoch, 1970-01-01 at 00:00 UTC), a :obj:`datetime.datetime` object (with timezone information), or a normalized Software Heritage time representation (idempotency). Args: time_representation: the representation of a timestamp Returns: dict: a normalized dictionary with three keys: - timestamp: a dict with two optional keys: - seconds: the integral number of seconds since the UNIX epoch - microseconds: the integral number of microseconds - offset: the timezone offset as a number of minutes relative to UTC - negative_utc: a boolean representing whether the offset is -0000 when offset = 0. """ if time_representation is None: return None negative_utc = False if isinstance(time_representation, dict): ts = time_representation["timestamp"] if isinstance(ts, dict): seconds = ts.get("seconds", 0) microseconds = ts.get("microseconds", 0) elif isinstance(ts, int): seconds = ts microseconds = 0 else: raise ValueError( "normalize_timestamp received non-integer timestamp member:" " %r" % ts ) offset = time_representation["offset"] if "negative_utc" in time_representation: negative_utc = time_representation["negative_utc"] if negative_utc is None: negative_utc = False elif isinstance(time_representation, datetime.datetime): seconds = int(time_representation.timestamp()) microseconds = time_representation.microsecond utcoffset = time_representation.utcoffset() if utcoffset is None: raise ValueError( "normalize_timestamp received datetime without timezone: %s" % time_representation ) # utcoffset is an integer number of minutes seconds_offset = utcoffset.total_seconds() offset = int(seconds_offset) // 60 elif isinstance(time_representation, int): seconds = time_representation microseconds = 0 offset = 0 else: raise ValueError( "normalize_timestamp received non-integer timestamp:" " %r" % time_representation ) return { "timestamp": {"seconds": seconds, "microseconds": microseconds,}, "offset": offset, "negative_utc": negative_utc, } def format_author(author): """Format the specification of an author. An author is either a byte string (passed unchanged), or a dict with three keys, fullname, name and email. If the fullname exists, return it; if it doesn't, we construct a fullname using the following heuristics: if the name value is None, we return the email in angle brackets, else, we return the name, a space, and the email in angle brackets. """ if isinstance(author, bytes) or author is None: return author if "fullname" in author: return author["fullname"] ret = [] if author["name"] is not None: ret.append(author["name"]) if author["email"] is not None: ret.append(b"".join([b"<", author["email"], b">"])) return b" ".join(ret) def format_manifest( headers: Iterable[Tuple[bytes, bytes]], message: Optional[bytes] = None, ) -> bytes: """Format a manifest comprised of a sequence of `headers` and an optional `message`. The manifest format, compatible with the git format for tag and commit objects, is as follows: - for each `key`, `value` in `headers`, emit: - the `key`, literally - an ascii space (``\\x20``) - the `value`, with newlines escaped using :func:`escape_newlines`, - an ascii newline (``\\x0a``) - if the `message` is not None, emit: - an ascii newline (``\\x0a``) - the `message`, literally Args: headers: a sequence of key/value headers stored in the manifest; message: an optional message used to trail the manifest. Returns: the formatted manifest as bytes """ entries: List[bytes] = [] for key, value in headers: entries.extend((key, b" ", escape_newlines(value), b"\n")) if message is not None: entries.extend((b"\n", message)) return b"".join(entries) def hash_manifest( type: str, headers: Iterable[Tuple[bytes, bytes]], message: Optional[bytes] = None, ): """Hash the manifest of an object of type `type`, comprised of a sequence of `headers` and an optional `message`. Before hashing, the manifest is serialized with the :func:`format_manifest` function. We then use the git "salted sha1" (:func:`swh.model.hashutil.hash_git_data`) with the given `type` to hash the manifest. Args: type: the type of object for which we're computing a manifest (e.g. "tag", "commit", ...) headers: a sequence of key/value headers stored in the manifest; message: an optional message used to trail the manifest. """ manifest = format_manifest(headers, message) return hash_git_data(manifest, type) def format_author_data(author, date_offset) -> bytes: """Format authorship data according to git standards. Git authorship data has two components: - an author specification, usually a name and email, but in practice an arbitrary bytestring - optionally, a timestamp with a UTC offset specification The authorship data is formatted thus:: `name and email`[ `timestamp` `utc_offset`] The timestamp is encoded as a (decimal) number of seconds since the UNIX epoch (1970-01-01 at 00:00 UTC). As an extension to the git format, we support fractional timestamps, using a dot as the separator for the decimal part. The utc offset is a number of minutes encoded as '[+-]HHMM'. Note that some tools can pass a negative offset corresponding to the UTC timezone ('-0000'), which is valid and is encoded as such. Args: author: an author specification (dict with two bytes values: name and email, or byte value) date_offset: a normalized date/time representation as returned by :func:`normalize_timestamp`. Returns: the byte string containing the authorship data """ ret = [format_author(author)] date_offset = normalize_timestamp(date_offset) if date_offset is not None: date_f = format_date(date_offset["timestamp"]) offset_f = format_offset(date_offset["offset"], date_offset["negative_utc"]) ret.extend([b" ", date_f, b" ", offset_f]) return b"".join(ret) def revision_identifier(revision): """Return the intrinsic identifier for a revision. The fields used for the revision identifier computation are: - directory - parents - author - author_date - committer - committer_date - extra_headers or metadata -> extra_headers - message A revision's identifier is the 'git'-checksum of a commit manifest constructed as follows (newlines are a single ASCII newline character):: tree [for each parent in parents] parent [end for each parents] author committer [for each key, value in extra_headers] [end for each extra_headers] The directory identifier is the ascii representation of its hexadecimal encoding. Author and committer are formatted with the :func:`format_author` function. Dates are formatted with the :func:`format_offset` function. Extra headers are an ordered list of [key, value] pairs. Keys are strings and get encoded to utf-8 for identifier computation. Values are either byte strings, unicode strings (that get encoded to utf-8), or integers (that get encoded to their utf-8 decimal representation). Multiline extra header values are escaped by indenting the continuation lines with one ascii space. If the message is None, the manifest ends with the last header. Else, the message is appended to the headers after an empty line. The checksum of the full manifest is computed using the 'commit' git object type. """ headers = [(b"tree", identifier_to_str(revision["directory"]).encode())] for parent in revision["parents"]: if parent: headers.append((b"parent", identifier_to_str(parent).encode())) headers.append( (b"author", format_author_data(revision["author"], revision["date"])) ) headers.append( ( b"committer", format_author_data(revision["committer"], revision["committer_date"]), ) ) # Handle extra headers metadata = revision.get("metadata") or {} extra_headers = revision.get("extra_headers", ()) if not extra_headers and "extra_headers" in metadata: extra_headers = metadata["extra_headers"] headers.extend(extra_headers) return identifier_to_str(hash_manifest("commit", headers, revision["message"])) def target_type_to_git(target_type): """Convert a software heritage target type to a git object type""" return { "content": b"blob", "directory": b"tree", "revision": b"commit", "release": b"tag", "snapshot": b"refs", }[target_type] def release_identifier(release): """Return the intrinsic identifier for a release.""" headers = [ (b"object", identifier_to_str(release["target"]).encode()), (b"type", target_type_to_git(release["target_type"])), (b"tag", release["name"]), ] if "author" in release and release["author"]: headers.append( (b"tagger", format_author_data(release["author"], release["date"])) ) return identifier_to_str(hash_manifest("tag", headers, release["message"])) def snapshot_identifier(snapshot, *, ignore_unresolved=False): """Return the intrinsic identifier for a snapshot. Snapshots are a set of named branches, which are pointers to objects at any level of the Software Heritage DAG. As well as pointing to other objects in the Software Heritage DAG, branches can also be *alias*es, in which case their target is the name of another branch in the same snapshot, or *dangling*, in which case the target is unknown (and represented by the ``None`` value). A snapshot identifier is a salted sha1 (using the git hashing algorithm with the ``snapshot`` object type) of a manifest following the algorithm: 1. Branches are sorted using the name as key, in bytes order. 2. For each branch, the following bytes are output: - the type of the branch target: - ``content``, ``directory``, ``revision``, ``release`` or ``snapshot`` for the corresponding entries in the DAG; - ``alias`` for branches referencing another branch; - ``dangling`` for dangling branches - an ascii space (``\\x20``) - the branch name (as raw bytes) - a null byte (``\\x00``) - the length of the target identifier, as an ascii-encoded decimal number (``20`` for current intrinsic identifiers, ``0`` for dangling branches, the length of the target branch name for branch aliases) - a colon (``:``) - the identifier of the target object pointed at by the branch, stored in the 'target' member: - for contents: their *sha1_git* - for directories, revisions, releases or snapshots: their intrinsic identifier - for branch aliases, the name of the target branch (as raw bytes) - for dangling branches, the empty string Note that, akin to directory manifests, there is no separator between entries. Because of symbolic branches, identifiers are of arbitrary length but are length-encoded to avoid ambiguity. Args: snapshot (dict): the snapshot of which to compute the identifier. A single entry is needed, ``'branches'``, which is itself a :class:`dict` mapping each branch to its target ignore_unresolved (bool): if `True`, ignore unresolved branch aliases. Returns: str: the intrinsic identifier for `snapshot` """ unresolved = [] lines = [] for name, target in sorted(snapshot["branches"].items()): if not target: target_type = b"dangling" target_id = b"" elif target["target_type"] == "alias": target_type = b"alias" target_id = target["target"] if target_id not in snapshot["branches"] or target_id == name: unresolved.append((name, target_id)) else: target_type = target["target_type"].encode() target_id = identifier_to_bytes(target["target"]) lines.extend( [ target_type, b"\x20", name, b"\x00", ("%d:" % len(target_id)).encode(), target_id, ] ) if unresolved and not ignore_unresolved: raise ValueError( "Branch aliases unresolved: %s" % ", ".join("%s -> %s" % x for x in unresolved), unresolved, ) return identifier_to_str(hash_git_data(b"".join(lines), "snapshot")) def origin_identifier(origin): """Return the intrinsic identifier for an origin. An origin's identifier is the sha1 checksum of the entire origin URL """ return hashlib.sha1(origin["url"].encode("utf-8")).hexdigest() _object_type_map = { ORIGIN: {"short_name": "ori", "key_id": "id"}, SNAPSHOT: {"short_name": "snp", "key_id": "id"}, RELEASE: {"short_name": "rel", "key_id": "id"}, REVISION: {"short_name": "rev", "key_id": "id"}, DIRECTORY: {"short_name": "dir", "key_id": "id"}, CONTENT: {"short_name": "cnt", "key_id": "sha1_git"}, } +_swhid_type_map = { + "ori": ORIGIN, + "snp": SNAPSHOT, + "rel": RELEASE, + "rev": REVISION, + "dir": DIRECTORY, + "cnt": CONTENT, +} + @attr.s(frozen=True) class SWHID: """ Named tuple holding the relevant info associated to a SoftWare Heritage persistent IDentifier (SWHID) Args: namespace (str): the namespace of the identifier, defaults to ``swh`` scheme_version (int): the scheme version of the identifier, defaults to 1 object_type (str): the type of object the identifier points to, either ``content``, ``directory``, ``release``, ``revision`` or ``snapshot`` object_id (str): object's identifier metadata (dict): optional dict filled with metadata related to pointed object Raises: swh.model.exceptions.ValidationError: In case of invalid object type or id Once created, it contains the following attributes: Attributes: namespace (str): the namespace of the identifier scheme_version (int): the scheme version of the identifier object_type (str): the type of object the identifier points to object_id (str): hexadecimal representation of the object hash metadata (dict): metadata related to the pointed object To get the raw SWHID string from an instance of this named tuple, use the :func:`str` function:: swhid = SWHID( object_type='content', object_id='8ff44f081d43176474b267de5451f2c2e88089d0' ) swhid_str = str(swhid) # 'swh:1:cnt:8ff44f081d43176474b267de5451f2c2e88089d0' """ - namespace = attr.ib(type=str, default="swh") - scheme_version = attr.ib(type=int, default=1) + namespace = attr.ib(type=str, default=SWHID_NAMESPACE) + scheme_version = attr.ib(type=int, default=SWHID_VERSION) object_type = attr.ib(type=str, default="") object_id = attr.ib(type=str, converter=hash_to_hex, default="") # type: ignore metadata = attr.ib( type=ImmutableDict[str, Any], converter=ImmutableDict, default=ImmutableDict() ) @namespace.validator def check_namespace(self, attribute, value): if value != SWHID_NAMESPACE: raise ValidationError( - f"Invalid SWHID: namespace is '{value}' but must be '{SWHID_NAMESPACE}'" + "Invalid SWHID: invalid namespace: %(namespace)s", + params={"namespace": value}, ) @scheme_version.validator def check_scheme_version(self, attribute, value): if value != SWHID_VERSION: raise ValidationError( - f"Invalid SWHID: version is {value} but must be {SWHID_VERSION}" + "Invalid SWHID: invalid version: %(version)s", params={"version": value} ) @object_type.validator def check_object_type(self, attribute, value): if value not in _object_type_map: - supported_types = ", ".join(_object_type_map.keys()) raise ValidationError( - f"Invalid SWHID: object type is {value} but must be " - f"one of {supported_types}" + "Invalid SWHID: invalid type: %(object_type)s)", + params={"object_type": value}, ) @object_id.validator def check_object_id(self, attribute, value): - validate_sha1(value) # can raise if invalid hash + try: + validate_sha1(value) # can raise if invalid hash + except ValidationError: + raise ValidationError( + "Invalid SWHID: invalid checksum: %(object_id)s", + params={"object_id": value}, + ) from None + + @metadata.validator + def check_qualifiers(self, attribute, value): + for k in value: + if k not in SWHID_QUALIFIERS: + raise ValidationError( + "Invalid SWHID: unknown qualifier: %(qualifier)s", + params={"qualifier": k}, + ) def to_dict(self) -> Dict[str, Any]: return attr.asdict(self) def __str__(self) -> str: o = _object_type_map.get(self.object_type) assert o swhid = SWHID_SEP.join( [self.namespace, str(self.scheme_version), o["short_name"], self.object_id] ) if self.metadata: for k, v in self.metadata.items(): swhid += "%s%s=%s" % (SWHID_CTXT_SEP, k, v) return swhid def swhid( object_type: str, object_id: Union[str, Dict[str, Any]], scheme_version: int = 1, metadata: Union[ImmutableDict[str, Any], Dict[str, Any]] = ImmutableDict(), ) -> str: """Compute :ref:`persistent-identifiers` Args: object_type: object's type, either ``content``, ``directory``, ``release``, ``revision`` or ``snapshot`` object_id: object's identifier scheme_version: SWHID scheme version, defaults to 1 metadata: metadata related to the pointed object Raises: swh.model.exceptions.ValidationError: In case of invalid object type or id Returns: the SWHID of the object """ if isinstance(object_id, dict): o = _object_type_map[object_type] object_id = object_id[o["key_id"]] swhid = SWHID( scheme_version=scheme_version, object_type=object_type, object_id=object_id, metadata=metadata, # type: ignore # mypy can't properly unify types ) return str(swhid) -CONTEXT_QUALIFIERS = {"origin", "anchor", "visit", "path", "lines"} - - def parse_swhid(swhid: str) -> SWHID: - """Parse :ref:`persistent-identifiers`. + """Parse a Software Heritage identifier (SWHID) from string (see: + :ref:`persistent-identifiers`.) Args: swhid (str): A persistent identifier - Raises: - swh.model.exceptions.ValidationError: in case of: - - * missing mandatory values (4) - * invalid namespace supplied - * invalid version supplied - * invalid type supplied - * missing hash - * invalid hash identifier supplied - Returns: a named tuple holding the parsing result - """ - if re.search(r"[ \t\n\r\f\v]", swhid): - raise ValidationError("Invalid SwHID: SWHIDs cannot contain whitespaces") - - # ; - swhid_parts = swhid.split(SWHID_CTXT_SEP) - swhid_data = swhid_parts.pop(0).split(":") - - if len(swhid_data) != 4: - raise ValidationError( - "Invalid SWHID, format must be 'swh:1:OBJECT_TYPE:OBJECT_ID'" - ) - - # Checking for parsing errors - _ns, _version, _type, _id = swhid_data - - for otype, data in _object_type_map.items(): - if _type == data["short_name"]: - _type = otype - break + Raises: + swh.model.exceptions.ValidationError: if passed string is not a valid SWHID - if not _id: + """ + m = SWHID_RE.fullmatch(swhid) + if not m: raise ValidationError( - "Invalid SWHID: missing OBJECT_ID (as a 40 hex digit string)" - ) - - _metadata = {} - for part in swhid_parts: - try: - qualifier, val = part.split("=") - _metadata[qualifier] = val - except Exception: - raise ValidationError( - "Invalid SWHID: contextual data must be a ;-separated list of " - "key=value pairs" - ) - - wrong_qualifiers = set(_metadata) - set(CONTEXT_QUALIFIERS) - if wrong_qualifiers: - error_msg = ( - f"Invalid SWHID: Wrong qualifiers {', '.join(wrong_qualifiers)}. " - f"The qualifiers must be one of {', '.join(CONTEXT_QUALIFIERS)}" + "Invalid SWHID: invalid syntax: %(swhid)s", params={"swhid": swhid} ) - raise ValidationError(error_msg) + parts = m.groupdict() + + _qualifiers = {} + qualifiers_raw = parts["qualifiers"] + if qualifiers_raw: + for qualifier in qualifiers_raw.split(SWHID_CTXT_SEP): + try: + k, v = qualifier.split("=") + except ValueError: + raise ValidationError( + "Invalid SWHID: invalid qualifier: %(qualifier)s", + params={"qualifier": qualifier}, + ) + _qualifiers[k] = v return SWHID( - _ns, - int(_version), - _type, - _id, - _metadata, # type: ignore # mypy can't properly unify types + parts["scheme"], + int(parts["version"]), + _swhid_type_map[parts["object_type"]], + parts["object_id"], + _qualifiers, # type: ignore # mypy can't properly unify types ) diff --git a/swh/model/tests/test_identifiers.py b/swh/model/tests/test_identifiers.py index 73515c6..6041bda 100644 --- a/swh/model/tests/test_identifiers.py +++ b/swh/model/tests/test_identifiers.py @@ -1,1175 +1,1157 @@ # Copyright (C) 2015-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import binascii import datetime import unittest import pytest from swh.model import hashutil, identifiers from swh.model.exceptions import ValidationError from swh.model.hashutil import hash_to_bytes as _x from swh.model.identifiers import ( CONTENT, DIRECTORY, RELEASE, REVISION, SNAPSHOT, SWHID, normalize_timestamp, ) class UtilityFunctionsIdentifier(unittest.TestCase): def setUp(self): self.str_id = "c2e41aae41ac17bd4a650770d6ee77f62e52235b" self.bytes_id = binascii.unhexlify(self.str_id) self.bad_type_id = object() def test_identifier_to_bytes(self): for id in [self.str_id, self.bytes_id]: self.assertEqual(identifiers.identifier_to_bytes(id), self.bytes_id) # wrong length with self.assertRaises(ValueError) as cm: identifiers.identifier_to_bytes(id[:-2]) self.assertIn("length", str(cm.exception)) with self.assertRaises(ValueError) as cm: identifiers.identifier_to_bytes(self.bad_type_id) self.assertIn("type", str(cm.exception)) def test_identifier_to_str(self): for id in [self.str_id, self.bytes_id]: self.assertEqual(identifiers.identifier_to_str(id), self.str_id) # wrong length with self.assertRaises(ValueError) as cm: identifiers.identifier_to_str(id[:-2]) self.assertIn("length", str(cm.exception)) with self.assertRaises(ValueError) as cm: identifiers.identifier_to_str(self.bad_type_id) self.assertIn("type", str(cm.exception)) class UtilityFunctionsDateOffset(unittest.TestCase): def setUp(self): self.dates = { b"1448210036": {"seconds": 1448210036, "microseconds": 0,}, b"1448210036.002342": {"seconds": 1448210036, "microseconds": 2342,}, b"1448210036.12": {"seconds": 1448210036, "microseconds": 120000,}, } self.broken_dates = [ 1448210036.12, ] self.offsets = { 0: b"+0000", -630: b"-1030", 800: b"+1320", } def test_format_date(self): for date_repr, date in self.dates.items(): self.assertEqual(identifiers.format_date(date), date_repr) def test_format_date_fail(self): for date in self.broken_dates: with self.assertRaises(ValueError): identifiers.format_date(date) def test_format_offset(self): for offset, res in self.offsets.items(): self.assertEqual(identifiers.format_offset(offset), res) class ContentIdentifier(unittest.TestCase): def setUp(self): self.content = { "status": "visible", "length": 5, "data": b"1984\n", "ctime": datetime.datetime( 2015, 11, 22, 16, 33, 56, tzinfo=datetime.timezone.utc ), } self.content_id = hashutil.MultiHash.from_data(self.content["data"]).digest() def test_content_identifier(self): self.assertEqual(identifiers.content_identifier(self.content), self.content_id) directory_example = { "id": "d7ed3d2c31d608823be58b1cbe57605310615231", "entries": [ { "type": "file", "perms": 33188, "name": b"README", "target": _x("37ec8ea2110c0b7a32fbb0e872f6e7debbf95e21"), }, { "type": "file", "perms": 33188, "name": b"Rakefile", "target": _x("3bb0e8592a41ae3185ee32266c860714980dbed7"), }, { "type": "dir", "perms": 16384, "name": b"app", "target": _x("61e6e867f5d7ba3b40540869bc050b0c4fed9e95"), }, { "type": "file", "perms": 33188, "name": b"1.megabyte", "target": _x("7c2b2fbdd57d6765cdc9d84c2d7d333f11be7fb3"), }, { "type": "dir", "perms": 16384, "name": b"config", "target": _x("591dfe784a2e9ccc63aaba1cb68a765734310d98"), }, { "type": "dir", "perms": 16384, "name": b"public", "target": _x("9588bf4522c2b4648bfd1c61d175d1f88c1ad4a5"), }, { "type": "file", "perms": 33188, "name": b"development.sqlite3", "target": _x("e69de29bb2d1d6434b8b29ae775ad8c2e48c5391"), }, { "type": "dir", "perms": 16384, "name": b"doc", "target": _x("154705c6aa1c8ead8c99c7915373e3c44012057f"), }, { "type": "dir", "perms": 16384, "name": b"db", "target": _x("85f157bdc39356b7bc7de9d0099b4ced8b3b382c"), }, { "type": "dir", "perms": 16384, "name": b"log", "target": _x("5e3d3941c51cce73352dff89c805a304ba96fffe"), }, { "type": "dir", "perms": 16384, "name": b"script", "target": _x("1b278423caf176da3f3533592012502aa10f566c"), }, { "type": "dir", "perms": 16384, "name": b"test", "target": _x("035f0437c080bfd8711670b3e8677e686c69c763"), }, { "type": "dir", "perms": 16384, "name": b"vendor", "target": _x("7c0dc9ad978c1af3f9a4ce061e50f5918bd27138"), }, { "type": "rev", "perms": 57344, "name": b"will_paginate", "target": _x("3d531e169db92a16a9a8974f0ae6edf52e52659e"), }, # in git order, the dir named "order" should be between the files # named "order." and "order0" { "type": "dir", "perms": 16384, "name": b"order", "target": _x("62cdb7020ff920e5aa642c3d4066950dd1f01f4d"), }, { "type": "file", "perms": 16384, "name": b"order.", "target": _x("0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33"), }, { "type": "file", "perms": 16384, "name": b"order0", "target": _x("bbe960a25ea311d21d40669e93df2003ba9b90a2"), }, ], } +dummy_qualifiers = {"origin": "https://example.com", "lines": "42"} + class DirectoryIdentifier(unittest.TestCase): def setUp(self): self.directory = directory_example self.empty_directory = { "id": "4b825dc642cb6eb9a060e54bf8d69288fbee4904", "entries": [], } def test_dir_identifier(self): self.assertEqual( identifiers.directory_identifier(self.directory), self.directory["id"] ) def test_dir_identifier_entry_order(self): # Reverse order of entries, check the id is still the same. directory = {"entries": reversed(self.directory["entries"])} self.assertEqual( identifiers.directory_identifier(directory), self.directory["id"] ) def test_dir_identifier_empty_directory(self): self.assertEqual( identifiers.directory_identifier(self.empty_directory), self.empty_directory["id"], ) linus_tz = datetime.timezone(datetime.timedelta(minutes=-420)) revision_example = { "id": "bc0195aad0daa2ad5b0d76cce22b167bc3435590", "directory": _x("85a74718d377195e1efd0843ba4f3260bad4fe07"), "parents": [_x("01e2d0627a9a6edb24c37db45db5ecb31e9de808")], "author": { "name": b"Linus Torvalds", "email": b"torvalds@linux-foundation.org", "fullname": b"Linus Torvalds ", }, "date": datetime.datetime(2015, 7, 12, 15, 10, 30, tzinfo=linus_tz), "committer": { "name": b"Linus Torvalds", "email": b"torvalds@linux-foundation.org", "fullname": b"Linus Torvalds ", }, "committer_date": datetime.datetime(2015, 7, 12, 15, 10, 30, tzinfo=linus_tz), "message": b"Linux 4.2-rc2\n", "type": "git", "synthetic": False, } class RevisionIdentifier(unittest.TestCase): def setUp(self): gpgsig = b"""\ -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.13 (Darwin) iQIcBAABAgAGBQJVJcYsAAoJEBiY3kIkQRNJVAUQAJ8/XQIfMqqC5oYeEFfHOPYZ L7qy46bXHVBa9Qd8zAJ2Dou3IbI2ZoF6/Et89K/UggOycMlt5FKV/9toWyuZv4Po L682wonoxX99qvVTHo6+wtnmYO7+G0f82h+qHMErxjP+I6gzRNBvRr+SfY7VlGdK wikMKOMWC5smrScSHITnOq1Ews5pe3N7qDYMzK0XVZmgDoaem4RSWMJs4My/qVLN e0CqYWq2A22GX7sXl6pjneJYQvcAXUX+CAzp24QnPSb+Q22Guj91TcxLFcHCTDdn qgqMsEyMiisoglwrCbO+D+1xq9mjN9tNFWP66SQ48mrrHYTBV5sz9eJyDfroJaLP CWgbDTgq6GzRMehHT3hXfYS5NNatjnhkNISXR7pnVP/obIi/vpWh5ll6Gd8q26z+ a/O41UzOaLTeNI365MWT4/cnXohVLRG7iVJbAbCxoQmEgsYMRc/pBAzWJtLfcB2G jdTswYL6+MUdL8sB9pZ82D+BP/YAdHe69CyTu1lk9RT2pYtI/kkfjHubXBCYEJSG +VGllBbYG6idQJpyrOYNRJyrDi9yvDJ2W+S0iQrlZrxzGBVGTB/y65S8C+2WTBcE lf1Qb5GDsQrZWgD+jtWTywOYHtCBwyCKSAXxSARMbNPeak9WPlcW/Jmu+fUcMe2x dg1KdHOa34shrKDaOVzW =od6m -----END PGP SIGNATURE-----""" self.revision = revision_example self.revision_none_metadata = { "id": "bc0195aad0daa2ad5b0d76cce22b167bc3435590", "directory": _x("85a74718d377195e1efd0843ba4f3260bad4fe07"), "parents": [_x("01e2d0627a9a6edb24c37db45db5ecb31e9de808")], "author": { "name": b"Linus Torvalds", "email": b"torvalds@linux-foundation.org", }, "date": datetime.datetime(2015, 7, 12, 15, 10, 30, tzinfo=linus_tz), "committer": { "name": b"Linus Torvalds", "email": b"torvalds@linux-foundation.org", }, "committer_date": datetime.datetime( 2015, 7, 12, 15, 10, 30, tzinfo=linus_tz ), "message": b"Linux 4.2-rc2\n", "metadata": None, } self.synthetic_revision = { "id": b"\xb2\xa7\xe1&\x04\x92\xe3D\xfa\xb3\xcb\xf9\x1b\xc1<\x91" b"\xe0T&\xfd", "author": { "name": b"Software Heritage", "email": b"robot@softwareheritage.org", }, "date": { "timestamp": {"seconds": 1437047495}, "offset": 0, "negative_utc": False, }, "type": "tar", "committer": { "name": b"Software Heritage", "email": b"robot@softwareheritage.org", }, "committer_date": 1437047495, "synthetic": True, "parents": [None], "message": b"synthetic revision message\n", "directory": b"\xd1\x1f\x00\xa6\xa0\xfe\xa6\x05SA\xd2U\x84\xb5\xa9" b"e\x16\xc0\xd2\xb8", "metadata": { "original_artifact": [ { "archive_type": "tar", "name": "gcc-5.2.0.tar.bz2", "sha1_git": "39d281aff934d44b439730057e55b055e206a586", "sha1": "fe3f5390949d47054b613edc36c557eb1d51c18e", "sha256": "5f835b04b5f7dd4f4d2dc96190ec1621b8d89f" "2dc6f638f9f8bc1b1014ba8cad", } ] }, } # cat commit.txt | git hash-object -t commit --stdin self.revision_with_extra_headers = { "id": "010d34f384fa99d047cdd5e2f41e56e5c2feee45", "directory": _x("85a74718d377195e1efd0843ba4f3260bad4fe07"), "parents": [_x("01e2d0627a9a6edb24c37db45db5ecb31e9de808")], "author": { "name": b"Linus Torvalds", "email": b"torvalds@linux-foundation.org", "fullname": b"Linus Torvalds ", }, "date": datetime.datetime(2015, 7, 12, 15, 10, 30, tzinfo=linus_tz), "committer": { "name": b"Linus Torvalds", "email": b"torvalds@linux-foundation.org", "fullname": b"Linus Torvalds ", }, "committer_date": datetime.datetime( 2015, 7, 12, 15, 10, 30, tzinfo=linus_tz ), "message": b"Linux 4.2-rc2\n", "extra_headers": ( (b"svn-repo-uuid", b"046f1af7-66c2-d61b-5410-ce57b7db7bff"), (b"svn-revision", b"10"), ), } self.revision_with_gpgsig = { "id": "44cc742a8ca17b9c279be4cc195a93a6ef7a320e", "directory": _x("b134f9b7dc434f593c0bab696345548b37de0558"), "parents": [ _x("689664ae944b4692724f13b709a4e4de28b54e57"), _x("c888305e1efbaa252d01b4e5e6b778f865a97514"), ], "author": { "name": b"Jiang Xin", "email": b"worldhello.net@gmail.com", "fullname": b"Jiang Xin ", }, "date": {"timestamp": 1428538899, "offset": 480,}, "committer": {"name": b"Jiang Xin", "email": b"worldhello.net@gmail.com",}, "committer_date": {"timestamp": 1428538899, "offset": 480,}, "extra_headers": ((b"gpgsig", gpgsig),), "message": b"""Merge branch 'master' of git://github.com/alexhenrie/git-po * 'master' of git://github.com/alexhenrie/git-po: l10n: ca.po: update translation """, } self.revision_no_message = { "id": "4cfc623c9238fa92c832beed000ce2d003fd8333", "directory": _x("b134f9b7dc434f593c0bab696345548b37de0558"), "parents": [ _x("689664ae944b4692724f13b709a4e4de28b54e57"), _x("c888305e1efbaa252d01b4e5e6b778f865a97514"), ], "author": { "name": b"Jiang Xin", "email": b"worldhello.net@gmail.com", "fullname": b"Jiang Xin ", }, "date": {"timestamp": 1428538899, "offset": 480,}, "committer": {"name": b"Jiang Xin", "email": b"worldhello.net@gmail.com",}, "committer_date": {"timestamp": 1428538899, "offset": 480,}, "message": None, } self.revision_empty_message = { "id": "7442cd78bd3b4966921d6a7f7447417b7acb15eb", "directory": _x("b134f9b7dc434f593c0bab696345548b37de0558"), "parents": [ _x("689664ae944b4692724f13b709a4e4de28b54e57"), _x("c888305e1efbaa252d01b4e5e6b778f865a97514"), ], "author": { "name": b"Jiang Xin", "email": b"worldhello.net@gmail.com", "fullname": b"Jiang Xin ", }, "date": {"timestamp": 1428538899, "offset": 480,}, "committer": {"name": b"Jiang Xin", "email": b"worldhello.net@gmail.com",}, "committer_date": {"timestamp": 1428538899, "offset": 480,}, "message": b"", } self.revision_only_fullname = { "id": "010d34f384fa99d047cdd5e2f41e56e5c2feee45", "directory": _x("85a74718d377195e1efd0843ba4f3260bad4fe07"), "parents": [_x("01e2d0627a9a6edb24c37db45db5ecb31e9de808")], "author": {"fullname": b"Linus Torvalds ",}, "date": datetime.datetime(2015, 7, 12, 15, 10, 30, tzinfo=linus_tz), "committer": { "fullname": b"Linus Torvalds ", }, "committer_date": datetime.datetime( 2015, 7, 12, 15, 10, 30, tzinfo=linus_tz ), "message": b"Linux 4.2-rc2\n", "extra_headers": ( (b"svn-repo-uuid", b"046f1af7-66c2-d61b-5410-ce57b7db7bff"), (b"svn-revision", b"10"), ), } def test_revision_identifier(self): self.assertEqual( identifiers.revision_identifier(self.revision), identifiers.identifier_to_str(self.revision["id"]), ) def test_revision_identifier_none_metadata(self): self.assertEqual( identifiers.revision_identifier(self.revision_none_metadata), identifiers.identifier_to_str(self.revision_none_metadata["id"]), ) def test_revision_identifier_synthetic(self): self.assertEqual( identifiers.revision_identifier(self.synthetic_revision), identifiers.identifier_to_str(self.synthetic_revision["id"]), ) def test_revision_identifier_with_extra_headers(self): self.assertEqual( identifiers.revision_identifier(self.revision_with_extra_headers), identifiers.identifier_to_str(self.revision_with_extra_headers["id"]), ) def test_revision_identifier_with_gpgsig(self): self.assertEqual( identifiers.revision_identifier(self.revision_with_gpgsig), identifiers.identifier_to_str(self.revision_with_gpgsig["id"]), ) def test_revision_identifier_no_message(self): self.assertEqual( identifiers.revision_identifier(self.revision_no_message), identifiers.identifier_to_str(self.revision_no_message["id"]), ) def test_revision_identifier_empty_message(self): self.assertEqual( identifiers.revision_identifier(self.revision_empty_message), identifiers.identifier_to_str(self.revision_empty_message["id"]), ) def test_revision_identifier_only_fullname(self): self.assertEqual( identifiers.revision_identifier(self.revision_only_fullname), identifiers.identifier_to_str(self.revision_only_fullname["id"]), ) release_example = { "id": "2b10839e32c4c476e9d94492756bb1a3e1ec4aa8", "target": b't\x1b"R\xa5\xe1Ml`\xa9\x13\xc7z`\x99\xab\xe7:\x85J', "target_type": "revision", "name": b"v2.6.14", "author": { "name": b"Linus Torvalds", "email": b"torvalds@g5.osdl.org", "fullname": b"Linus Torvalds ", }, "date": datetime.datetime(2005, 10, 27, 17, 2, 33, tzinfo=linus_tz), "message": b"""\ Linux 2.6.14 release -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.1 (GNU/Linux) iD8DBQBDYWq6F3YsRnbiHLsRAmaeAJ9RCez0y8rOBbhSv344h86l/VVcugCeIhO1 wdLOnvj91G4wxYqrvThthbE= =7VeT -----END PGP SIGNATURE----- """, "synthetic": False, } class ReleaseIdentifier(unittest.TestCase): def setUp(self): linus_tz = datetime.timezone(datetime.timedelta(minutes=-420)) self.release = release_example self.release_no_author = { "id": b"&y\x1a\x8b\xcf\x0em3\xf4:\xefv\x82\xbd\xb5U#mV\xde", "target": "9ee1c939d1cb936b1f98e8d81aeffab57bae46ab", "target_type": "revision", "name": b"v2.6.12", "message": b"""\ This is the final 2.6.12 release -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.2.4 (GNU/Linux) iD8DBQBCsykyF3YsRnbiHLsRAvPNAJ482tCZwuxp/bJRz7Q98MHlN83TpACdHr37 o6X/3T+vm8K3bf3driRr34c= =sBHn -----END PGP SIGNATURE----- """, "synthetic": False, } self.release_no_message = { "id": "b6f4f446715f7d9543ef54e41b62982f0db40045", "target": "9ee1c939d1cb936b1f98e8d81aeffab57bae46ab", "target_type": "revision", "name": b"v2.6.12", "author": {"name": b"Linus Torvalds", "email": b"torvalds@g5.osdl.org",}, "date": datetime.datetime(2005, 10, 27, 17, 2, 33, tzinfo=linus_tz), "message": None, } self.release_empty_message = { "id": "71a0aea72444d396575dc25ac37fec87ee3c6492", "target": "9ee1c939d1cb936b1f98e8d81aeffab57bae46ab", "target_type": "revision", "name": b"v2.6.12", "author": {"name": b"Linus Torvalds", "email": b"torvalds@g5.osdl.org",}, "date": datetime.datetime(2005, 10, 27, 17, 2, 33, tzinfo=linus_tz), "message": b"", } self.release_negative_utc = { "id": "97c8d2573a001f88e72d75f596cf86b12b82fd01", "name": b"20081029", "target": "54e9abca4c77421e2921f5f156c9fe4a9f7441c7", "target_type": "revision", "date": { "timestamp": {"seconds": 1225281976}, "offset": 0, "negative_utc": True, }, "author": { "name": b"Otavio Salvador", "email": b"otavio@debian.org", "id": 17640, }, "synthetic": False, "message": b"tagging version 20081029\n\nr56558\n", } self.release_newline_in_author = { "author": { "email": b"esycat@gmail.com", "fullname": b"Eugene Janusov\n", "name": b"Eugene Janusov\n", }, "date": { "negative_utc": None, "offset": 600, "timestamp": {"microseconds": 0, "seconds": 1377480558,}, }, "id": b"\\\x98\xf5Y\xd04\x16-\xe2->\xbe\xb9T3\xe6\xf8\x88R1", "message": b"Release of v0.3.2.", "name": b"0.3.2", "synthetic": False, "target": (b"\xc0j\xa3\xd9;x\xa2\x86\\I5\x17" b"\x000\xf8\xc2\xd79o\xd3"), "target_type": "revision", } self.release_snapshot_target = dict(self.release) self.release_snapshot_target["target_type"] = "snapshot" self.release_snapshot_target["id"] = "c29c3ddcc6769a04e54dd69d63a6fdcbc566f850" def test_release_identifier(self): self.assertEqual( identifiers.release_identifier(self.release), identifiers.identifier_to_str(self.release["id"]), ) def test_release_identifier_no_author(self): self.assertEqual( identifiers.release_identifier(self.release_no_author), identifiers.identifier_to_str(self.release_no_author["id"]), ) def test_release_identifier_no_message(self): self.assertEqual( identifiers.release_identifier(self.release_no_message), identifiers.identifier_to_str(self.release_no_message["id"]), ) def test_release_identifier_empty_message(self): self.assertEqual( identifiers.release_identifier(self.release_empty_message), identifiers.identifier_to_str(self.release_empty_message["id"]), ) def test_release_identifier_negative_utc(self): self.assertEqual( identifiers.release_identifier(self.release_negative_utc), identifiers.identifier_to_str(self.release_negative_utc["id"]), ) def test_release_identifier_newline_in_author(self): self.assertEqual( identifiers.release_identifier(self.release_newline_in_author), identifiers.identifier_to_str(self.release_newline_in_author["id"]), ) def test_release_identifier_snapshot_target(self): self.assertEqual( identifiers.release_identifier(self.release_snapshot_target), identifiers.identifier_to_str(self.release_snapshot_target["id"]), ) snapshot_example = { "id": _x("6e65b86363953b780d92b0a928f3e8fcdd10db36"), "branches": { b"directory": { "target": _x("1bd0e65f7d2ff14ae994de17a1e7fe65111dcad8"), "target_type": "directory", }, b"content": { "target": _x("fe95a46679d128ff167b7c55df5d02356c5a1ae1"), "target_type": "content", }, b"alias": {"target": b"revision", "target_type": "alias",}, b"revision": { "target": _x("aafb16d69fd30ff58afdd69036a26047f3aebdc6"), "target_type": "revision", }, b"release": { "target": _x("7045404f3d1c54e6473c71bbb716529fbad4be24"), "target_type": "release", }, b"snapshot": { "target": _x("1a8893e6a86f444e8be8e7bda6cb34fb1735a00e"), "target_type": "snapshot", }, b"dangling": None, }, } class SnapshotIdentifier(unittest.TestCase): def setUp(self): super().setUp() self.empty = { "id": "1a8893e6a86f444e8be8e7bda6cb34fb1735a00e", "branches": {}, } self.dangling_branch = { "id": "c84502e821eb21ed84e9fd3ec40973abc8b32353", "branches": {b"HEAD": None,}, } self.unresolved = { "id": "84b4548ea486e4b0a7933fa541ff1503a0afe1e0", "branches": {b"foo": {"target": b"bar", "target_type": "alias",},}, } self.all_types = snapshot_example def test_empty_snapshot(self): self.assertEqual( identifiers.snapshot_identifier(self.empty), identifiers.identifier_to_str(self.empty["id"]), ) def test_dangling_branch(self): self.assertEqual( identifiers.snapshot_identifier(self.dangling_branch), identifiers.identifier_to_str(self.dangling_branch["id"]), ) def test_unresolved(self): with self.assertRaisesRegex(ValueError, "b'foo' -> b'bar'"): identifiers.snapshot_identifier(self.unresolved) def test_unresolved_force(self): self.assertEqual( identifiers.snapshot_identifier(self.unresolved, ignore_unresolved=True,), identifiers.identifier_to_str(self.unresolved["id"]), ) def test_all_types(self): self.assertEqual( identifiers.snapshot_identifier(self.all_types), identifiers.identifier_to_str(self.all_types["id"]), ) def test_swhid(self): _snapshot_id = _x("c7c108084bc0bf3d81436bf980b46e98bd338453") _release_id = "22ece559cc7cc2364edc5e5593d63ae8bd229f9f" _revision_id = "309cf2674ee7a0749978cf8265ab91a60aea0f7d" _directory_id = "d198bc9d7a6bcf6db04f476d29314f157507d505" _content_id = "94a9ed024d3859793618152ea559a168bbcbb5e2" _snapshot = {"id": _snapshot_id} _release = {"id": _release_id} _revision = {"id": _revision_id} _directory = {"id": _directory_id} _content = {"sha1_git": _content_id} for full_type, _hash, expected_swhid, version, _meta in [ ( SNAPSHOT, _snapshot_id, "swh:1:snp:c7c108084bc0bf3d81436bf980b46e98bd338453", None, {}, ), ( RELEASE, _release_id, "swh:1:rel:22ece559cc7cc2364edc5e5593d63ae8bd229f9f", 1, {}, ), ( REVISION, _revision_id, "swh:1:rev:309cf2674ee7a0749978cf8265ab91a60aea0f7d", None, {}, ), ( DIRECTORY, _directory_id, "swh:1:dir:d198bc9d7a6bcf6db04f476d29314f157507d505", None, {}, ), ( CONTENT, _content_id, "swh:1:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2", 1, {}, ), ( SNAPSHOT, _snapshot, "swh:1:snp:c7c108084bc0bf3d81436bf980b46e98bd338453", None, {}, ), ( RELEASE, _release, "swh:1:rel:22ece559cc7cc2364edc5e5593d63ae8bd229f9f", 1, {}, ), ( REVISION, _revision, "swh:1:rev:309cf2674ee7a0749978cf8265ab91a60aea0f7d", None, {}, ), ( DIRECTORY, _directory, "swh:1:dir:d198bc9d7a6bcf6db04f476d29314f157507d505", None, {}, ), ( CONTENT, _content, "swh:1:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2", 1, {}, ), ( CONTENT, _content, "swh:1:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2;origin=1", 1, {"origin": "1"}, ), ]: if version: actual_value = identifiers.swhid( full_type, _hash, version, metadata=_meta ) else: actual_value = identifiers.swhid(full_type, _hash, metadata=_meta) self.assertEqual(actual_value, expected_swhid) def test_swhid_wrong_input(self): _snapshot_id = "notahash4bc0bf3d81436bf980b46e98bd338453" _snapshot = {"id": _snapshot_id} for _type, _hash in [ (SNAPSHOT, _snapshot_id), (SNAPSHOT, _snapshot), - ("foo", ""), + ("lines", "42"), ]: with self.assertRaises(ValidationError): identifiers.swhid(_type, _hash) def test_parse_swhid(self): for swhid, _type, _version, _hash in [ ( "swh:1:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2", CONTENT, 1, "94a9ed024d3859793618152ea559a168bbcbb5e2", ), ( "swh:1:dir:d198bc9d7a6bcf6db04f476d29314f157507d505", DIRECTORY, 1, "d198bc9d7a6bcf6db04f476d29314f157507d505", ), ( "swh:1:rev:309cf2674ee7a0749978cf8265ab91a60aea0f7d", REVISION, 1, "309cf2674ee7a0749978cf8265ab91a60aea0f7d", ), ( "swh:1:rel:22ece559cc7cc2364edc5e5593d63ae8bd229f9f", RELEASE, 1, "22ece559cc7cc2364edc5e5593d63ae8bd229f9f", ), ( "swh:1:snp:c7c108084bc0bf3d81436bf980b46e98bd338453", SNAPSHOT, 1, "c7c108084bc0bf3d81436bf980b46e98bd338453", ), ]: expected_result = SWHID( namespace="swh", scheme_version=_version, object_type=_type, object_id=_hash, metadata={}, ) actual_result = identifiers.parse_swhid(swhid) self.assertEqual(actual_result, expected_result) for swhid, _type, _version, _hash, _metadata in [ ( "swh:1:cnt:9c95815d9e9d91b8dae8e05d8bbc696fe19f796b;lines=1-18;origin=https://github.com/python/cpython", # noqa CONTENT, 1, "9c95815d9e9d91b8dae8e05d8bbc696fe19f796b", {"lines": "1-18", "origin": "https://github.com/python/cpython"}, ), ( "swh:1:dir:0b6959356d30f1a4e9b7f6bca59b9a336464c03d;origin=deb://Debian/packages/linuxdoc-tools", # noqa DIRECTORY, 1, "0b6959356d30f1a4e9b7f6bca59b9a336464c03d", {"origin": "deb://Debian/packages/linuxdoc-tools"}, ), ]: expected_result = SWHID( namespace="swh", scheme_version=_version, object_type=_type, object_id=_hash, metadata=_metadata, ) actual_result = identifiers.parse_swhid(swhid) self.assertEqual(actual_result, expected_result) self.assertEqual( expected_result.to_dict(), { "namespace": "swh", "scheme_version": _version, "object_type": _type, "object_id": _hash, "metadata": _metadata, }, ) class OriginIdentifier(unittest.TestCase): def setUp(self): self.origin = { "url": "https://github.com/torvalds/linux", } def test_content_identifier(self): self.assertEqual( identifiers.origin_identifier(self.origin), "b63a575fe3faab7692c9f38fb09d4bb45651bb0f", ) TS_DICTS = [ ( {"timestamp": 12345, "offset": 0}, { "timestamp": {"seconds": 12345, "microseconds": 0}, "offset": 0, "negative_utc": False, }, ), ( {"timestamp": 12345, "offset": 0, "negative_utc": False}, { "timestamp": {"seconds": 12345, "microseconds": 0}, "offset": 0, "negative_utc": False, }, ), ( {"timestamp": 12345, "offset": 0, "negative_utc": False}, { "timestamp": {"seconds": 12345, "microseconds": 0}, "offset": 0, "negative_utc": False, }, ), ( {"timestamp": 12345, "offset": 0, "negative_utc": None}, { "timestamp": {"seconds": 12345, "microseconds": 0}, "offset": 0, "negative_utc": False, }, ), ( {"timestamp": {"seconds": 12345}, "offset": 0, "negative_utc": None}, { "timestamp": {"seconds": 12345, "microseconds": 0}, "offset": 0, "negative_utc": False, }, ), ( { "timestamp": {"seconds": 12345, "microseconds": 0}, "offset": 0, "negative_utc": None, }, { "timestamp": {"seconds": 12345, "microseconds": 0}, "offset": 0, "negative_utc": False, }, ), ( { "timestamp": {"seconds": 12345, "microseconds": 100}, "offset": 0, "negative_utc": None, }, { "timestamp": {"seconds": 12345, "microseconds": 100}, "offset": 0, "negative_utc": False, }, ), ( {"timestamp": 12345, "offset": 0, "negative_utc": True}, { "timestamp": {"seconds": 12345, "microseconds": 0}, "offset": 0, "negative_utc": True, }, ), ( {"timestamp": 12345, "offset": 0, "negative_utc": None}, { "timestamp": {"seconds": 12345, "microseconds": 0}, "offset": 0, "negative_utc": False, }, ), ] @pytest.mark.parametrize("dict_input,expected", TS_DICTS) def test_normalize_timestamp_dict(dict_input, expected): assert normalize_timestamp(dict_input) == expected TS_DICTS_INVALID_TIMESTAMP = [ {"timestamp": 1.2, "offset": 0}, {"timestamp": "1", "offset": 0}, # these below should really also trigger a ValueError... # {"timestamp": {"seconds": "1"}, "offset": 0}, # {"timestamp": {"seconds": 1.2}, "offset": 0}, # {"timestamp": {"seconds": 1.2}, "offset": 0}, ] @pytest.mark.parametrize("dict_input", TS_DICTS_INVALID_TIMESTAMP) def test_normalize_timestamp_dict_invalid_timestamp(dict_input): with pytest.raises(ValueError, match="non-integer timestamp"): normalize_timestamp(dict_input) @pytest.mark.parametrize( "invalid_swhid", [ "swh:1:cnt", "swh:1:", "swh:", "swh:1:cnt:", "foo:1:cnt:abc8bc9d7a6bcf6db04f476d29314f157507d505", "swh:2:dir:def8bc9d7a6bcf6db04f476d29314f157507d505", "swh:1:foo:fed8bc9d7a6bcf6db04f476d29314f157507d505", "swh:1:dir:0b6959356d30f1a4e9b7f6bca59b9a336464c03d;invalid;malformed", "swh:1:snp:gh6959356d30f1a4e9b7f6bca59b9a336464c03d", "swh:1:snp:foo", # wrong qualifier: ori should be origin "swh:1:dir:0b6959356d30f1a4e9b7f6bca59b9a336464c03d;ori=something;anchor=1;visit=1;path=/", # noqa # wrong qualifier: anc should be anchor "swh:1:dir:0b6959356d30f1a4e9b7f6bca59b9a336464c03d;origin=something;anc=1;visit=1;path=/", # noqa # wrong qualifier: vis should be visit "swh:1:dir:0b6959356d30f1a4e9b7f6bca59b9a336464c03d;origin=something;anchor=1;vis=1;path=/", # noqa # wrong qualifier: pa should be path "swh:1:dir:0b6959356d30f1a4e9b7f6bca59b9a336464c03d;origin=something;anchor=1;visit=1;pa=/", # noqa # wrong qualifier: line should be lines "swh:1:dir:0b6959356d30f1a4e9b7f6bca59b9a336464c03d;line=10;origin=something;anchor=1;visit=1;path=/", # noqa # wrong qualifier value: it contains space before of after "swh:1:dir:0b6959356d30f1a4e9b7f6bca59b9a336464c03d;origin= https://some-url", # noqa "swh:1:dir:0b6959356d30f1a4e9b7f6bca59b9a336464c03d;origin=something;anchor=some-anchor ", # noqa "swh:1:dir:0b6959356d30f1a4e9b7f6bca59b9a336464c03d;origin=something;anchor=some-anchor ;visit=1", # noqa # invalid swhid: whitespaces "swh :1:dir:0b6959356d30f1a4e9b7f6bca59b9a336464c03d;ori=something;anchor=1;visit=1;path=/", # noqa "swh: 1:dir:0b6959356d30f1a4e9b7f6bca59b9a336464c03d;ori=something;anchor=1;visit=1;path=/", # noqa "swh: 1: dir:0b6959356d30f1a4e9b7f6bca59b9a336464c03d;ori=something;anchor=1;visit=1;path=/", # noqa "swh:1: dir: 0b6959356d30f1a4e9b7f6bca59b9a336464c03d", "swh:1: dir: 0b6959356d30f1a4e9b7f6bca59b9a336464c03d; origin=blah", "swh:1: dir: 0b6959356d30f1a4e9b7f6bca59b9a336464c03d;lines=12", # other whitespaces "swh\t:1:dir:0b6959356d30f1a4e9b7f6bca59b9a336464c03d;lines=12", "swh:1\n:dir:0b6959356d30f1a4e9b7f6bca59b9a336464c03d;lines=12", "swh:1:\rdir:0b6959356d30f1a4e9b7f6bca59b9a336464c03d;lines=12", "swh:1:dir:0b6959356d30f1a4e9b7f6bca59b9a336464c03d\f;lines=12", "swh:1:dir:0b6959356d30f1a4e9b7f6bca59b9a336464c03d;lines=12\v", ], ) def test_parse_swhid_parsing_error(invalid_swhid): with pytest.raises(ValidationError): identifiers.parse_swhid(invalid_swhid) @pytest.mark.parametrize( "ns,version,type,id", [ ("foo", 1, CONTENT, "abc8bc9d7a6bcf6db04f476d29314f157507d505",), ("swh", 2, DIRECTORY, "def8bc9d7a6bcf6db04f476d29314f157507d505",), ("swh", 1, "foo", "fed8bc9d7a6bcf6db04f476d29314f157507d505",), ("swh", 1, SNAPSHOT, "gh6959356d30f1a4e9b7f6bca59b9a336464c03d",), ], ) def test_SWHID_class_validation_error(ns, version, type, id): with pytest.raises(ValidationError): SWHID( namespace=ns, scheme_version=version, object_type=type, object_id=id, ) def test_swhid_hash(): object_id = "94a9ed024d3859793618152ea559a168bbcbb5e2" assert hash(SWHID(object_type="directory", object_id=object_id)) == hash( SWHID(object_type="directory", object_id=object_id) ) assert hash( - SWHID( - object_type="directory", - object_id=object_id, - metadata={"foo": "bar", "baz": "qux"}, - ) + SWHID(object_type="directory", object_id=object_id, metadata=dummy_qualifiers,) ) == hash( - SWHID( - object_type="directory", - object_id=object_id, - metadata={"foo": "bar", "baz": "qux"}, - ) + SWHID(object_type="directory", object_id=object_id, metadata=dummy_qualifiers,) ) # Different order of the dictionary, so the underlying order of the tuple in # ImmutableDict is different. assert hash( SWHID( object_type="directory", object_id=object_id, - metadata={"foo": "bar", "baz": "qux"}, + metadata={"origin": "https://example.com", "lines": "42"}, ) ) == hash( SWHID( object_type="directory", object_id=object_id, - metadata={"baz": "qux", "foo": "bar"}, + metadata={"lines": "42", "origin": "https://example.com"}, ) ) def test_swhid_eq(): object_id = "94a9ed024d3859793618152ea559a168bbcbb5e2" assert SWHID(object_type="directory", object_id=object_id) == SWHID( object_type="directory", object_id=object_id ) assert SWHID( - object_type="directory", - object_id=object_id, - metadata={"foo": "bar", "baz": "qux"}, - ) == SWHID( - object_type="directory", - object_id=object_id, - metadata={"foo": "bar", "baz": "qux"}, - ) + object_type="directory", object_id=object_id, metadata=dummy_qualifiers, + ) == SWHID(object_type="directory", object_id=object_id, metadata=dummy_qualifiers,) assert SWHID( - object_type="directory", - object_id=object_id, - metadata={"foo": "bar", "baz": "qux"}, - ) == SWHID( - object_type="directory", - object_id=object_id, - metadata={"baz": "qux", "foo": "bar"}, - ) + object_type="directory", object_id=object_id, metadata=dummy_qualifiers, + ) == SWHID(object_type="directory", object_id=object_id, metadata=dummy_qualifiers,) diff --git a/swh/model/tests/test_model.py b/swh/model/tests/test_model.py index 902f0df..393dcfd 100644 --- a/swh/model/tests/test_model.py +++ b/swh/model/tests/test_model.py @@ -1,1225 +1,1226 @@ # Copyright (C) 2019-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import copy import datetime import attr from attrs_strict import AttributeTypeError from hypothesis import given from hypothesis.strategies import binary import pytest from swh.model.hashutil import MultiHash, hash_to_bytes import swh.model.hypothesis_strategies as strategies from swh.model.identifiers import ( SWHID, directory_identifier, parse_swhid, release_identifier, revision_identifier, snapshot_identifier, ) from swh.model.model import ( BaseModel, Content, Directory, MetadataAuthority, MetadataAuthorityType, MetadataFetcher, MetadataTargetType, MissingData, Origin, OriginVisit, OriginVisitStatus, Person, RawExtrinsicMetadata, Release, Revision, SkippedContent, Snapshot, Timestamp, TimestampWithTimezone, ) from swh.model.tests.test_identifiers import ( directory_example, release_example, revision_example, snapshot_example, ) @given(strategies.objects()) def test_todict_inverse_fromdict(objtype_and_obj): (obj_type, obj) = objtype_and_obj if obj_type in ("origin", "origin_visit"): return obj_as_dict = obj.to_dict() obj_as_dict_copy = copy.deepcopy(obj_as_dict) # Check the composition of to_dict and from_dict is the identity assert obj == type(obj).from_dict(obj_as_dict) # Check from_dict() does not change the input dict assert obj_as_dict == obj_as_dict_copy # Check the composition of from_dict and to_dict is the identity assert obj_as_dict == type(obj).from_dict(obj_as_dict).to_dict() def test_unique_key(): url = "http://example.org/" date = datetime.datetime.now(tz=datetime.timezone.utc) id_ = b"42" * 10 assert Origin(url=url).unique_key() == {"url": url} assert OriginVisit(origin=url, date=date, type="git").unique_key() == { "origin": url, "date": str(date), } assert OriginVisitStatus( origin=url, visit=42, date=date, status="created", snapshot=None ).unique_key() == {"origin": url, "visit": "42", "date": str(date),} assert Snapshot.from_dict({**snapshot_example, "id": id_}).unique_key() == id_ assert Release.from_dict({**release_example, "id": id_}).unique_key() == id_ assert Revision.from_dict({**revision_example, "id": id_}).unique_key() == id_ assert Directory.from_dict({**directory_example, "id": id_}).unique_key() == id_ cont = Content.from_data(b"foo") assert cont.unique_key().hex() == "0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33" kwargs = { **cont.to_dict(), "reason": "foo", "status": "absent", } del kwargs["data"] assert SkippedContent(**kwargs).unique_key() == cont.hashes() # Anonymization @given(strategies.objects()) def test_anonymization(objtype_and_obj): (obj_type, obj) = objtype_and_obj def check_person(p): if p is not None: assert p.name is None assert p.email is None assert len(p.fullname) == 32 anon_obj = obj.anonymize() if obj_type == "person": assert anon_obj is not None check_person(anon_obj) elif obj_type == "release": assert anon_obj is not None check_person(anon_obj.author) elif obj_type == "revision": assert anon_obj is not None check_person(anon_obj.author) check_person(anon_obj.committer) else: assert anon_obj is None # Origin, OriginVisit, OriginVisitStatus @given(strategies.origins()) def test_todict_origins(origin): obj = origin.to_dict() assert "type" not in obj assert type(origin)(url=origin.url) == type(origin).from_dict(obj) @given(strategies.origin_visits()) def test_todict_origin_visits(origin_visit): obj = origin_visit.to_dict() assert origin_visit == type(origin_visit).from_dict(obj) def test_origin_visit_naive_datetime(): with pytest.raises(ValueError, match="must be a timezone-aware datetime"): OriginVisit( origin="http://foo/", date=datetime.datetime.now(), type="git", ) @given(strategies.origin_visit_statuses()) def test_todict_origin_visit_statuses(origin_visit_status): obj = origin_visit_status.to_dict() assert origin_visit_status == type(origin_visit_status).from_dict(obj) def test_origin_visit_status_naive_datetime(): with pytest.raises(ValueError, match="must be a timezone-aware datetime"): OriginVisitStatus( origin="http://foo/", visit=42, date=datetime.datetime.now(), status="ongoing", snapshot=None, ) # Timestamp @given(strategies.timestamps()) def test_timestamps_strategy(timestamp): attr.validate(timestamp) def test_timestamp_seconds(): attr.validate(Timestamp(seconds=0, microseconds=0)) with pytest.raises(AttributeTypeError): Timestamp(seconds="0", microseconds=0) attr.validate(Timestamp(seconds=2 ** 63 - 1, microseconds=0)) with pytest.raises(ValueError): Timestamp(seconds=2 ** 63, microseconds=0) attr.validate(Timestamp(seconds=-(2 ** 63), microseconds=0)) with pytest.raises(ValueError): Timestamp(seconds=-(2 ** 63) - 1, microseconds=0) def test_timestamp_microseconds(): attr.validate(Timestamp(seconds=0, microseconds=0)) with pytest.raises(AttributeTypeError): Timestamp(seconds=0, microseconds="0") attr.validate(Timestamp(seconds=0, microseconds=10 ** 6 - 1)) with pytest.raises(ValueError): Timestamp(seconds=0, microseconds=10 ** 6) with pytest.raises(ValueError): Timestamp(seconds=0, microseconds=-1) def test_timestamp_from_dict(): assert Timestamp.from_dict({"seconds": 10, "microseconds": 5}) with pytest.raises(AttributeTypeError): Timestamp.from_dict({"seconds": "10", "microseconds": 5}) with pytest.raises(AttributeTypeError): Timestamp.from_dict({"seconds": 10, "microseconds": "5"}) with pytest.raises(ValueError): Timestamp.from_dict({"seconds": 0, "microseconds": -1}) Timestamp.from_dict({"seconds": 0, "microseconds": 10 ** 6 - 1}) with pytest.raises(ValueError): Timestamp.from_dict({"seconds": 0, "microseconds": 10 ** 6}) # TimestampWithTimezone def test_timestampwithtimezone(): ts = Timestamp(seconds=0, microseconds=0) tstz = TimestampWithTimezone(timestamp=ts, offset=0, negative_utc=False) attr.validate(tstz) assert tstz.negative_utc is False attr.validate(TimestampWithTimezone(timestamp=ts, offset=10, negative_utc=False)) attr.validate(TimestampWithTimezone(timestamp=ts, offset=-10, negative_utc=False)) tstz = TimestampWithTimezone(timestamp=ts, offset=0, negative_utc=True) attr.validate(tstz) assert tstz.negative_utc is True with pytest.raises(AttributeTypeError): TimestampWithTimezone( timestamp=datetime.datetime.now(), offset=0, negative_utc=False ) with pytest.raises(AttributeTypeError): TimestampWithTimezone(timestamp=ts, offset="0", negative_utc=False) with pytest.raises(AttributeTypeError): TimestampWithTimezone(timestamp=ts, offset=1.0, negative_utc=False) with pytest.raises(AttributeTypeError): TimestampWithTimezone(timestamp=ts, offset=1, negative_utc=0) with pytest.raises(ValueError): TimestampWithTimezone(timestamp=ts, offset=1, negative_utc=True) with pytest.raises(ValueError): TimestampWithTimezone(timestamp=ts, offset=-1, negative_utc=True) def test_timestampwithtimezone_from_datetime(): tz = datetime.timezone(datetime.timedelta(minutes=+60)) date = datetime.datetime(2020, 2, 27, 14, 39, 19, tzinfo=tz) tstz = TimestampWithTimezone.from_datetime(date) assert tstz == TimestampWithTimezone( timestamp=Timestamp(seconds=1582810759, microseconds=0,), offset=60, negative_utc=False, ) def test_timestampwithtimezone_from_naive_datetime(): date = datetime.datetime(2020, 2, 27, 14, 39, 19) with pytest.raises(ValueError, match="datetime without timezone"): TimestampWithTimezone.from_datetime(date) def test_timestampwithtimezone_from_iso8601(): date = "2020-02-27 14:39:19.123456+0100" tstz = TimestampWithTimezone.from_iso8601(date) assert tstz == TimestampWithTimezone( timestamp=Timestamp(seconds=1582810759, microseconds=123456,), offset=60, negative_utc=False, ) def test_timestampwithtimezone_from_iso8601_negative_utc(): date = "2020-02-27 13:39:19-0000" tstz = TimestampWithTimezone.from_iso8601(date) assert tstz == TimestampWithTimezone( timestamp=Timestamp(seconds=1582810759, microseconds=0,), offset=0, negative_utc=True, ) def test_person_from_fullname(): """The author should have name, email and fullname filled. """ actual_person = Person.from_fullname(b"tony ") assert actual_person == Person( fullname=b"tony ", name=b"tony", email=b"ynot@dagobah", ) def test_person_from_fullname_no_email(): """The author and fullname should be the same as the input (author). """ actual_person = Person.from_fullname(b"tony") assert actual_person == Person(fullname=b"tony", name=b"tony", email=None,) def test_person_from_fullname_empty_person(): """Empty person has only its fullname filled with the empty byte-string. """ actual_person = Person.from_fullname(b"") assert actual_person == Person(fullname=b"", name=None, email=None,) def test_git_author_line_to_author(): # edge case out of the way with pytest.raises(TypeError): Person.from_fullname(None) tests = { b"a ": Person(name=b"a", email=b"b@c.com", fullname=b"a ",), b"": Person( name=None, email=b"foo@bar.com", fullname=b"", ), b"malformed ': Person( name=b"malformed", email=b'"', ), b"trailing ": Person( name=b"trailing", email=b"sp@c.e", fullname=b"trailing ", ), b"no": Person(name=b"no", email=b"sp@c.e", fullname=b"no",), b" more ": Person( name=b"more", email=b"sp@c.es", fullname=b" more ", ), b" <>": Person(name=None, email=None, fullname=b" <>",), } for person in sorted(tests): expected_person = tests[person] assert expected_person == Person.from_fullname(person) # Content def test_content_get_hash(): hashes = dict(sha1=b"foo", sha1_git=b"bar", sha256=b"baz", blake2s256=b"qux") c = Content(length=42, status="visible", **hashes) for (hash_name, hash_) in hashes.items(): assert c.get_hash(hash_name) == hash_ def test_content_hashes(): hashes = dict(sha1=b"foo", sha1_git=b"bar", sha256=b"baz", blake2s256=b"qux") c = Content(length=42, status="visible", **hashes) assert c.hashes() == hashes def test_content_data(): c = Content( length=42, status="visible", data=b"foo", sha1=b"foo", sha1_git=b"bar", sha256=b"baz", blake2s256=b"qux", ) assert c.with_data() == c def test_content_data_missing(): c = Content( length=42, status="visible", sha1=b"foo", sha1_git=b"bar", sha256=b"baz", blake2s256=b"qux", ) with pytest.raises(MissingData): c.with_data() @given(strategies.present_contents_d()) def test_content_from_dict(content_d): c = Content.from_data(**content_d) assert c assert c.ctime == content_d["ctime"] content_d2 = c.to_dict() c2 = Content.from_dict(content_d2) assert c2.ctime == c.ctime def test_content_from_dict_str_ctime(): # test with ctime as a string n = datetime.datetime(2020, 5, 6, 12, 34, tzinfo=datetime.timezone.utc) content_d = { "ctime": n.isoformat(), "data": b"", "length": 0, "sha1": b"\x00", "sha256": b"\x00", "sha1_git": b"\x00", "blake2s256": b"\x00", } c = Content.from_dict(content_d) assert c.ctime == n def test_content_from_dict_str_naive_ctime(): # test with ctime as a string n = datetime.datetime(2020, 5, 6, 12, 34) content_d = { "ctime": n.isoformat(), "data": b"", "length": 0, "sha1": b"\x00", "sha256": b"\x00", "sha1_git": b"\x00", "blake2s256": b"\x00", } with pytest.raises(ValueError, match="must be a timezone-aware datetime."): Content.from_dict(content_d) @given(binary(max_size=4096)) def test_content_from_data(data): c = Content.from_data(data) assert c.data == data assert c.length == len(data) assert c.status == "visible" for key, value in MultiHash.from_data(data).digest().items(): assert getattr(c, key) == value @given(binary(max_size=4096)) def test_hidden_content_from_data(data): c = Content.from_data(data, status="hidden") assert c.data == data assert c.length == len(data) assert c.status == "hidden" for key, value in MultiHash.from_data(data).digest().items(): assert getattr(c, key) == value def test_content_naive_datetime(): c = Content.from_data(b"foo") with pytest.raises(ValueError, match="must be a timezone-aware datetime"): Content( **c.to_dict(), ctime=datetime.datetime.now(), ) # SkippedContent @given(binary(max_size=4096)) def test_skipped_content_from_data(data): c = SkippedContent.from_data(data, reason="reason") assert c.reason == "reason" assert c.length == len(data) assert c.status == "absent" for key, value in MultiHash.from_data(data).digest().items(): assert getattr(c, key) == value @given(strategies.skipped_contents_d()) def test_skipped_content_origin_is_str(skipped_content_d): assert SkippedContent.from_dict(skipped_content_d) skipped_content_d["origin"] = "http://path/to/origin" assert SkippedContent.from_dict(skipped_content_d) skipped_content_d["origin"] = Origin(url="http://path/to/origin") with pytest.raises(ValueError, match="origin"): SkippedContent.from_dict(skipped_content_d) def test_skipped_content_naive_datetime(): c = SkippedContent.from_data(b"foo", reason="reason") with pytest.raises(ValueError, match="must be a timezone-aware datetime"): SkippedContent( **c.to_dict(), ctime=datetime.datetime.now(), ) # Revision def test_revision_extra_headers_no_headers(): rev_dict = revision_example.copy() rev_dict.pop("id") rev = Revision.from_dict(rev_dict) rev_dict = attr.asdict(rev, recurse=False) rev_model = Revision(**rev_dict) assert rev_model.metadata is None assert rev_model.extra_headers == () rev_dict["metadata"] = { "something": "somewhere", "some other thing": "stranger", } rev_model = Revision(**rev_dict) assert rev_model.metadata == rev_dict["metadata"] assert rev_model.extra_headers == () def test_revision_extra_headers_with_headers(): rev_dict = revision_example.copy() rev_dict.pop("id") rev = Revision.from_dict(rev_dict) rev_dict = attr.asdict(rev, recurse=False) rev_dict["metadata"] = { "something": "somewhere", "some other thing": "stranger", } extra_headers = ( (b"header1", b"value1"), (b"header2", b"42"), (b"header3", b"should I?\x00"), (b"header1", b"again"), ) rev_dict["extra_headers"] = extra_headers rev_model = Revision(**rev_dict) assert "extra_headers" not in rev_model.metadata assert rev_model.extra_headers == extra_headers def test_revision_extra_headers_in_metadata(): rev_dict = revision_example.copy() rev_dict.pop("id") rev = Revision.from_dict(rev_dict) rev_dict = attr.asdict(rev, recurse=False) rev_dict["metadata"] = { "something": "somewhere", "some other thing": "stranger", } extra_headers = ( (b"header1", b"value1"), (b"header2", b"42"), (b"header3", b"should I?\x00"), (b"header1", b"again"), ) # check the bw-compat init hook does the job # ie. extra_headers are given in the metadata field rev_dict["metadata"]["extra_headers"] = extra_headers rev_model = Revision(**rev_dict) assert "extra_headers" not in rev_model.metadata assert rev_model.extra_headers == extra_headers def test_revision_extra_headers_as_lists(): rev_dict = revision_example.copy() rev_dict.pop("id") rev = Revision.from_dict(rev_dict) rev_dict = attr.asdict(rev, recurse=False) rev_dict["metadata"] = {} extra_headers = ( (b"header1", b"value1"), (b"header2", b"42"), (b"header3", b"should I?\x00"), (b"header1", b"again"), ) # check Revision.extra_headers tuplify does the job rev_dict["extra_headers"] = [list(x) for x in extra_headers] rev_model = Revision(**rev_dict) assert "extra_headers" not in rev_model.metadata assert rev_model.extra_headers == extra_headers def test_revision_extra_headers_type_error(): rev_dict = revision_example.copy() rev_dict.pop("id") rev = Revision.from_dict(rev_dict) orig_rev_dict = attr.asdict(rev, recurse=False) orig_rev_dict["metadata"] = { "something": "somewhere", "some other thing": "stranger", } extra_headers = ( ("header1", b"value1"), (b"header2", 42), ("header1", "again"), ) # check headers one at a time # if given as extra_header for extra_header in extra_headers: rev_dict = copy.deepcopy(orig_rev_dict) rev_dict["extra_headers"] = (extra_header,) with pytest.raises(AttributeTypeError): Revision(**rev_dict) # if given as metadata for extra_header in extra_headers: rev_dict = copy.deepcopy(orig_rev_dict) rev_dict["metadata"]["extra_headers"] = (extra_header,) with pytest.raises(AttributeTypeError): Revision(**rev_dict) def test_revision_extra_headers_from_dict(): rev_dict = revision_example.copy() rev_dict.pop("id") rev_model = Revision.from_dict(rev_dict) assert rev_model.metadata is None assert rev_model.extra_headers == () rev_dict["metadata"] = { "something": "somewhere", "some other thing": "stranger", } rev_model = Revision.from_dict(rev_dict) assert rev_model.metadata == rev_dict["metadata"] assert rev_model.extra_headers == () extra_headers = ( (b"header1", b"value1"), (b"header2", b"42"), (b"header3", b"should I?\nmaybe\x00\xff"), (b"header1", b"again"), ) rev_dict["extra_headers"] = extra_headers rev_model = Revision.from_dict(rev_dict) assert "extra_headers" not in rev_model.metadata assert rev_model.extra_headers == extra_headers def test_revision_extra_headers_in_metadata_from_dict(): rev_dict = revision_example.copy() rev_dict.pop("id") rev_dict["metadata"] = { "something": "somewhere", "some other thing": "stranger", } extra_headers = ( (b"header1", b"value1"), (b"header2", b"42"), (b"header3", b"should I?\nmaybe\x00\xff"), (b"header1", b"again"), ) # check the bw-compat init hook does the job rev_dict["metadata"]["extra_headers"] = extra_headers rev_model = Revision.from_dict(rev_dict) assert "extra_headers" not in rev_model.metadata assert rev_model.extra_headers == extra_headers def test_revision_extra_headers_as_lists_from_dict(): rev_dict = revision_example.copy() rev_dict.pop("id") rev_model = Revision.from_dict(rev_dict) rev_dict["metadata"] = { "something": "somewhere", "some other thing": "stranger", } extra_headers = ( (b"header1", b"value1"), (b"header2", b"42"), (b"header3", b"should I?\nmaybe\x00\xff"), (b"header1", b"again"), ) # check Revision.extra_headers converter does the job rev_dict["extra_headers"] = [list(x) for x in extra_headers] rev_model = Revision.from_dict(rev_dict) assert "extra_headers" not in rev_model.metadata assert rev_model.extra_headers == extra_headers # ID computation def test_directory_model_id_computation(): dir_dict = directory_example.copy() del dir_dict["id"] dir_id = hash_to_bytes(directory_identifier(dir_dict)) dir_model = Directory.from_dict(dir_dict) assert dir_model.id == dir_id def test_revision_model_id_computation(): rev_dict = revision_example.copy() del rev_dict["id"] rev_id = hash_to_bytes(revision_identifier(rev_dict)) rev_model = Revision.from_dict(rev_dict) assert rev_model.id == rev_id def test_revision_model_id_computation_with_no_date(): """We can have revision with date to None """ rev_dict = revision_example.copy() rev_dict["date"] = None rev_dict["committer_date"] = None del rev_dict["id"] rev_id = hash_to_bytes(revision_identifier(rev_dict)) rev_model = Revision.from_dict(rev_dict) assert rev_model.date is None assert rev_model.committer_date is None assert rev_model.id == rev_id def test_release_model_id_computation(): rel_dict = release_example.copy() del rel_dict["id"] rel_id = hash_to_bytes(release_identifier(rel_dict)) rel_model = Release.from_dict(rel_dict) assert isinstance(rel_model.date, TimestampWithTimezone) assert rel_model.id == hash_to_bytes(rel_id) def test_snapshot_model_id_computation(): snp_dict = snapshot_example.copy() del snp_dict["id"] snp_id = hash_to_bytes(snapshot_identifier(snp_dict)) snp_model = Snapshot.from_dict(snp_dict) assert snp_model.id == snp_id @given(strategies.objects(split_content=True)) def test_object_type(objtype_and_obj): obj_type, obj = objtype_and_obj assert obj_type == obj.object_type def test_object_type_is_final(): object_types = set() def check_final(cls): if hasattr(cls, "object_type"): assert cls.object_type not in object_types object_types.add(cls.object_type) if cls.__subclasses__(): assert not hasattr(cls, "object_type") for subcls in cls.__subclasses__(): check_final(subcls) check_final(BaseModel) _metadata_authority = MetadataAuthority( type=MetadataAuthorityType.FORGE, url="https://forge.softwareheritage.org", ) _metadata_fetcher = MetadataFetcher(name="test-fetcher", version="0.0.1",) _content_swhid = parse_swhid("swh:1:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2") _origin_url = "https://forge.softwareheritage.org/source/swh-model.git" +_dummy_qualifiers = {"origin": "https://example.com", "lines": "42"} _common_metadata_fields = dict( discovery_date=datetime.datetime.now(tz=datetime.timezone.utc), authority=_metadata_authority, fetcher=_metadata_fetcher, format="json", - metadata=b'{"foo": "bar"}', + metadata=b'{"origin": "https://example.com", "lines": "42"}', ) def test_metadata_valid(): """Checks valid RawExtrinsicMetadata objects don't raise an error.""" # Simplest case RawExtrinsicMetadata( type=MetadataTargetType.ORIGIN, target=_origin_url, **_common_metadata_fields ) # Object with an SWHID RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, target=_content_swhid, **_common_metadata_fields, ) def test_metadata_to_dict(): """Checks valid RawExtrinsicMetadata objects don't raise an error.""" common_fields = { "authority": {"type": "forge", "url": "https://forge.softwareheritage.org"}, "fetcher": {"name": "test-fetcher", "version": "0.0.1",}, "discovery_date": _common_metadata_fields["discovery_date"], "format": "json", - "metadata": b'{"foo": "bar"}', + "metadata": b'{"origin": "https://example.com", "lines": "42"}', } m = RawExtrinsicMetadata( type=MetadataTargetType.ORIGIN, target=_origin_url, **_common_metadata_fields, ) assert m.to_dict() == { "type": "origin", "target": _origin_url, **common_fields, } assert RawExtrinsicMetadata.from_dict(m.to_dict()) == m m = RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, target=_content_swhid, **_common_metadata_fields, ) assert m.to_dict() == { "type": "content", "target": "swh:1:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2", **common_fields, } assert RawExtrinsicMetadata.from_dict(m.to_dict()) == m def test_metadata_invalid_target(): """Checks various invalid values for the 'target' field.""" # SWHID for an origin with pytest.raises(ValueError, match="expected an URL"): RawExtrinsicMetadata( type=MetadataTargetType.ORIGIN, target=_content_swhid, **_common_metadata_fields, ) # SWHID for an origin (even when passed as string) with pytest.raises(ValueError, match="expected an URL"): RawExtrinsicMetadata( type=MetadataTargetType.ORIGIN, target="swh:1:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2", **_common_metadata_fields, ) # URL for a non-origin with pytest.raises(ValueError, match="Expected SWHID, got a string"): RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, target=_origin_url, **_common_metadata_fields, ) # SWHID passed as string instead of SWHID with pytest.raises(ValueError, match="Expected SWHID, got a string"): RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, target="swh:1:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2", **_common_metadata_fields, ) # Object type does not match the SWHID with pytest.raises( ValueError, match="Expected SWHID type 'revision', got 'content'" ): RawExtrinsicMetadata( type=MetadataTargetType.REVISION, target=_content_swhid, **_common_metadata_fields, ) # Non-core SWHID with pytest.raises(ValueError, match="Expected core SWHID"): RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, target=SWHID( object_type="content", object_id="94a9ed024d3859793618152ea559a168bbcbb5e2", - metadata={"foo": "bar"}, + metadata=_dummy_qualifiers, ), **_common_metadata_fields, ) def test_metadata_naive_datetime(): with pytest.raises(ValueError, match="must be a timezone-aware datetime"): RawExtrinsicMetadata( type=MetadataTargetType.ORIGIN, target=_origin_url, **{**_common_metadata_fields, "discovery_date": datetime.datetime.now()}, ) def test_metadata_validate_context_origin(): """Checks validation of RawExtrinsicMetadata.origin.""" # Origins can't have an 'origin' context with pytest.raises( ValueError, match="Unexpected 'origin' context for origin object" ): RawExtrinsicMetadata( type=MetadataTargetType.ORIGIN, target=_origin_url, origin=_origin_url, **_common_metadata_fields, ) # but all other types can RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, target=_content_swhid, origin=_origin_url, **_common_metadata_fields, ) # SWHIDs aren't valid origin URLs with pytest.raises(ValueError, match="SWHID used as context origin URL"): RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, target=_content_swhid, origin="swh:1:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2", **_common_metadata_fields, ) def test_metadata_validate_context_visit(): """Checks validation of RawExtrinsicMetadata.visit.""" # Origins can't have a 'visit' context with pytest.raises( ValueError, match="Unexpected 'visit' context for origin object" ): RawExtrinsicMetadata( type=MetadataTargetType.ORIGIN, target=_origin_url, visit=42, **_common_metadata_fields, ) # but all other types can RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, target=_content_swhid, origin=_origin_url, visit=42, **_common_metadata_fields, ) # Missing 'origin' with pytest.raises(ValueError, match="'origin' context must be set if 'visit' is"): RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, target=_content_swhid, visit=42, **_common_metadata_fields, ) # visit id must be positive with pytest.raises(ValueError, match="Nonpositive visit id"): RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, target=_content_swhid, origin=_origin_url, visit=-42, **_common_metadata_fields, ) def test_metadata_validate_context_snapshot(): """Checks validation of RawExtrinsicMetadata.snapshot.""" # Origins can't have a 'snapshot' context with pytest.raises( ValueError, match="Unexpected 'snapshot' context for origin object" ): RawExtrinsicMetadata( type=MetadataTargetType.ORIGIN, target=_origin_url, snapshot=SWHID( object_type="snapshot", object_id="94a9ed024d3859793618152ea559a168bbcbb5e2", ), **_common_metadata_fields, ) # but content can RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, target=_content_swhid, snapshot=SWHID( object_type="snapshot", object_id="94a9ed024d3859793618152ea559a168bbcbb5e2" ), **_common_metadata_fields, ) # Non-core SWHID with pytest.raises(ValueError, match="Expected core SWHID"): RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, target=_content_swhid, snapshot=SWHID( object_type="snapshot", object_id="94a9ed024d3859793618152ea559a168bbcbb5e2", - metadata={"foo": "bar"}, + metadata=_dummy_qualifiers, ), **_common_metadata_fields, ) # SWHID type doesn't match the expected type of this context key with pytest.raises( ValueError, match="Expected SWHID type 'snapshot', got 'content'" ): RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, target=_content_swhid, snapshot=SWHID( object_type="content", object_id="94a9ed024d3859793618152ea559a168bbcbb5e2", ), **_common_metadata_fields, ) def test_metadata_validate_context_release(): """Checks validation of RawExtrinsicMetadata.release.""" # Origins can't have a 'release' context with pytest.raises( ValueError, match="Unexpected 'release' context for origin object" ): RawExtrinsicMetadata( type=MetadataTargetType.ORIGIN, target=_origin_url, release=SWHID( object_type="release", object_id="94a9ed024d3859793618152ea559a168bbcbb5e2", ), **_common_metadata_fields, ) # but content can RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, target=_content_swhid, release=SWHID( object_type="release", object_id="94a9ed024d3859793618152ea559a168bbcbb5e2" ), **_common_metadata_fields, ) # Non-core SWHID with pytest.raises(ValueError, match="Expected core SWHID"): RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, target=_content_swhid, release=SWHID( object_type="release", object_id="94a9ed024d3859793618152ea559a168bbcbb5e2", - metadata={"foo": "bar"}, + metadata=_dummy_qualifiers, ), **_common_metadata_fields, ) # SWHID type doesn't match the expected type of this context key with pytest.raises( ValueError, match="Expected SWHID type 'release', got 'content'" ): RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, target=_content_swhid, release=SWHID( object_type="content", object_id="94a9ed024d3859793618152ea559a168bbcbb5e2", ), **_common_metadata_fields, ) def test_metadata_validate_context_revision(): """Checks validation of RawExtrinsicMetadata.revision.""" # Origins can't have a 'revision' context with pytest.raises( ValueError, match="Unexpected 'revision' context for origin object" ): RawExtrinsicMetadata( type=MetadataTargetType.ORIGIN, target=_origin_url, revision=SWHID( object_type="revision", object_id="94a9ed024d3859793618152ea559a168bbcbb5e2", ), **_common_metadata_fields, ) # but content can RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, target=_content_swhid, revision=SWHID( object_type="revision", object_id="94a9ed024d3859793618152ea559a168bbcbb5e2" ), **_common_metadata_fields, ) # Non-core SWHID with pytest.raises(ValueError, match="Expected core SWHID"): RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, target=_content_swhid, revision=SWHID( object_type="revision", object_id="94a9ed024d3859793618152ea559a168bbcbb5e2", - metadata={"foo": "bar"}, + metadata=_dummy_qualifiers, ), **_common_metadata_fields, ) # SWHID type doesn't match the expected type of this context key with pytest.raises( ValueError, match="Expected SWHID type 'revision', got 'content'" ): RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, target=_content_swhid, revision=SWHID( object_type="content", object_id="94a9ed024d3859793618152ea559a168bbcbb5e2", ), **_common_metadata_fields, ) def test_metadata_validate_context_path(): """Checks validation of RawExtrinsicMetadata.path.""" # Origins can't have a 'path' context with pytest.raises(ValueError, match="Unexpected 'path' context for origin object"): RawExtrinsicMetadata( type=MetadataTargetType.ORIGIN, target=_origin_url, path=b"/foo/bar", **_common_metadata_fields, ) # but content can RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, target=_content_swhid, path=b"/foo/bar", **_common_metadata_fields, ) def test_metadata_validate_context_directory(): """Checks validation of RawExtrinsicMetadata.directory.""" # Origins can't have a 'directory' context with pytest.raises( ValueError, match="Unexpected 'directory' context for origin object" ): RawExtrinsicMetadata( type=MetadataTargetType.ORIGIN, target=_origin_url, directory=SWHID( object_type="directory", object_id="94a9ed024d3859793618152ea559a168bbcbb5e2", ), **_common_metadata_fields, ) # but content can RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, target=_content_swhid, directory=SWHID( object_type="directory", object_id="94a9ed024d3859793618152ea559a168bbcbb5e2", ), **_common_metadata_fields, ) # Non-core SWHID with pytest.raises(ValueError, match="Expected core SWHID"): RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, target=_content_swhid, directory=SWHID( object_type="directory", object_id="94a9ed024d3859793618152ea559a168bbcbb5e2", - metadata={"foo": "bar"}, + metadata=_dummy_qualifiers, ), **_common_metadata_fields, ) # SWHID type doesn't match the expected type of this context key with pytest.raises( ValueError, match="Expected SWHID type 'directory', got 'content'" ): RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, target=_content_swhid, directory=SWHID( object_type="content", object_id="94a9ed024d3859793618152ea559a168bbcbb5e2", ), **_common_metadata_fields, )