diff --git a/swh/model/hashutil.py b/swh/model/hashutil.py index 908b736..e332c23 100644 --- a/swh/model/hashutil.py +++ b/swh/model/hashutil.py @@ -1,369 +1,370 @@ # Copyright (C) 2015-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information """Module in charge of hashing function definitions. This is the base module use to compute swh's hashes. Only a subset of hashing algorithms is supported as defined in the ALGORITHMS set. Any provided algorithms not in that list will result in a ValueError explaining the error. This module defines a MultiHash class to ease the softwareheritage hashing algorithms computation. This allows to compute hashes from file object, path, data using a similar interface as what the standard hashlib module provides. Basic usage examples: - file object: MultiHash.from_file( file_object, hash_names=DEFAULT_ALGORITHMS).digest() - path (filepath): MultiHash.from_path(b'foo').hexdigest() - data (bytes): MultiHash.from_data(b'foo').bytehexdigest() "Complex" usage, defining a swh hashlib instance first: - To compute length, integrate the length to the set of algorithms to compute, for example: .. code-block:: python h = MultiHash(hash_names=set({'length'}).union(DEFAULT_ALGORITHMS)) with open(filepath, 'rb') as f: h.update(f.read(HASH_BLOCK_SIZE)) hashes = h.digest() # returns a dict of {hash_algo_name: hash_in_bytes} - Write alongside computing hashing algorithms (from a stream), example: .. code-block:: python h = MultiHash(length=length) with open(filepath, 'wb') as f: for chunk in r.iter_content(): # r a stream of sort h.update(chunk) f.write(chunk) hashes = h.hexdigest() # returns a dict of {hash_algo_name: hash_in_hex} """ import binascii import functools import hashlib from io import BytesIO import os from typing import Callable, Dict ALGORITHMS = set(["sha1", "sha256", "sha1_git", "blake2s256", "blake2b512"]) """Hashing algorithms supported by this module""" DEFAULT_ALGORITHMS = set(["sha1", "sha256", "sha1_git", "blake2s256"]) """Algorithms computed by default when calling the functions from this module. Subset of :const:`ALGORITHMS`. """ HASH_BLOCK_SIZE = 32768 """Block size for streaming hash computations made in this module""" _blake2_hash_cache = {} # type: Dict[str, Callable] class MultiHash: """Hashutil class to support multiple hashes computation. Args: hash_names (set): Set of hash algorithms (+ optionally length) to compute hashes (cf. DEFAULT_ALGORITHMS) length (int): Length of the total sum of chunks to read If the length is provided as algorithm, the length is also computed and returned. """ def __init__(self, hash_names=DEFAULT_ALGORITHMS, length=None): self.state = {} self.track_length = False for name in hash_names: if name == "length": self.state["length"] = 0 self.track_length = True else: self.state[name] = _new_hash(name, length) @classmethod def from_state(cls, state, track_length): ret = cls([]) ret.state = state ret.track_length = track_length @classmethod def from_file(cls, fobj, hash_names=DEFAULT_ALGORITHMS, length=None): ret = cls(length=length, hash_names=hash_names) while True: chunk = fobj.read(HASH_BLOCK_SIZE) if not chunk: break ret.update(chunk) return ret @classmethod def from_path(cls, path, hash_names=DEFAULT_ALGORITHMS): length = os.path.getsize(path) with open(path, "rb") as f: ret = cls.from_file(f, hash_names=hash_names, length=length) return ret @classmethod def from_data(cls, data, hash_names=DEFAULT_ALGORITHMS): length = len(data) fobj = BytesIO(data) return cls.from_file(fobj, hash_names=hash_names, length=length) def update(self, chunk): for name, h in self.state.items(): if name == "length": continue h.update(chunk) if self.track_length: self.state["length"] += len(chunk) def digest(self): return { name: h.digest() if name != "length" else h for name, h in self.state.items() } def hexdigest(self): return { name: h.hexdigest() if name != "length" else h for name, h in self.state.items() } def bytehexdigest(self): return { name: hash_to_bytehex(h.digest()) if name != "length" else h for name, h in self.state.items() } def copy(self): copied_state = { name: h.copy() if name != "length" else h for name, h in self.state.items() } return self.from_state(copied_state, self.track_length) def _new_blake2_hash(algo): """Return a function that initializes a blake2 hash. """ if algo in _blake2_hash_cache: return _blake2_hash_cache[algo]() lalgo = algo.lower() if not lalgo.startswith("blake2"): raise ValueError("Algorithm %s is not a blake2 hash" % algo) blake_family = lalgo[:7] digest_size = None if lalgo[7:]: try: digest_size, remainder = divmod(int(lalgo[7:]), 8) except ValueError: raise ValueError("Unknown digest size for algo %s" % algo) from None if remainder: raise ValueError( "Digest size for algorithm %s must be a multiple of 8" % algo ) if lalgo in hashlib.algorithms_available: # Handle the case where OpenSSL ships the given algorithm # (e.g. Python 3.5 on Debian 9 stretch) _blake2_hash_cache[algo] = lambda: hashlib.new(lalgo) else: # Try using the built-in implementation for Python 3.6+ if blake_family in hashlib.algorithms_available: blake2 = getattr(hashlib, blake_family) else: import pyblake2 blake2 = getattr(pyblake2, blake_family) _blake2_hash_cache[algo] = lambda: blake2(digest_size=digest_size) return _blake2_hash_cache[algo]() def _new_hashlib_hash(algo): """Initialize a digest object from hashlib. Handle the swh-specific names for the blake2-related algorithms """ if algo.startswith("blake2"): return _new_blake2_hash(algo) else: return hashlib.new(algo) def _new_git_hash(base_algo, git_type, length): """Initialize a digest object (as returned by python's hashlib) for the requested algorithm, and feed it with the header for a git object of the given type and length. The header for hashing a git object consists of: - The type of the object (encoded in ASCII) - One ASCII space (\x20) - The length of the object (decimal encoded in ASCII) - One NUL byte Args: base_algo (str from :const:`ALGORITHMS`): a hashlib-supported algorithm git_type: the type of the git object (supposedly one of 'blob', 'commit', 'tag', 'tree') length: the length of the git object you're encoding Returns: a hashutil.hash object """ h = _new_hashlib_hash(base_algo) git_header = "%s %d\0" % (git_type, length) h.update(git_header.encode("ascii")) return h def _new_hash(algo, length=None): """Initialize a digest object (as returned by python's hashlib) for the requested algorithm. See the constant ALGORITHMS for the list of supported algorithms. If a git-specific hashing algorithm is requested (e.g., "sha1_git"), the hashing object will be pre-fed with the needed header; for this to work, length must be given. Args: algo (str): a hashing algorithm (one of ALGORITHMS) length (int): the length of the hashed payload (needed for git-specific algorithms) Returns: a hashutil.hash object Raises: ValueError if algo is unknown, or length is missing for a git-specific hash. """ if algo not in ALGORITHMS: raise ValueError( "Unexpected hashing algorithm %s, expected one of %s" % (algo, ", ".join(sorted(ALGORITHMS))) ) if algo.endswith("_git"): if length is None: raise ValueError("Missing length for git hashing algorithm") base_algo = algo[:-4] return _new_git_hash(base_algo, "blob", length) return _new_hashlib_hash(algo) def hash_git_data(data, git_type, base_algo="sha1"): """Hash the given data as a git object of type git_type. Args: data: a bytes object git_type: the git object type base_algo: the base hashing algorithm used (default: sha1) Returns: a dict mapping each algorithm to a bytes digest Raises: ValueError if the git_type is unexpected. """ git_object_types = { "blob", "tree", "commit", "tag", "snapshot", "raw_extrinsic_metadata", + "extid", } if git_type not in git_object_types: raise ValueError( "Unexpected git object type %s, expected one of %s" % (git_type, ", ".join(sorted(git_object_types))) ) h = _new_git_hash(base_algo, git_type, len(data)) h.update(data) return h.digest() @functools.lru_cache() def hash_to_hex(hash): """Converts a hash (in hex or bytes form) to its hexadecimal ascii form Args: hash (str or bytes): a :class:`bytes` hash or a :class:`str` containing the hexadecimal form of the hash Returns: str: the hexadecimal form of the hash """ if isinstance(hash, str): return hash return binascii.hexlify(hash).decode("ascii") @functools.lru_cache() def hash_to_bytehex(hash): """Converts a hash to its hexadecimal bytes representation Args: hash (bytes): a :class:`bytes` hash Returns: bytes: the hexadecimal form of the hash, as :class:`bytes` """ return binascii.hexlify(hash) @functools.lru_cache() def hash_to_bytes(hash): """Converts a hash (in hex or bytes form) to its raw bytes form Args: hash (str or bytes): a :class:`bytes` hash or a :class:`str` containing the hexadecimal form of the hash Returns: bytes: the :class:`bytes` form of the hash """ if isinstance(hash, bytes): return hash return bytes.fromhex(hash) @functools.lru_cache() def bytehex_to_hash(hex): """Converts a hexadecimal bytes representation of a hash to that hash Args: hash (bytes): a :class:`bytes` containing the hexadecimal form of the hash encoded in ascii Returns: bytes: the :class:`bytes` form of the hash """ return hash_to_bytes(hex.decode()) diff --git a/swh/model/identifiers.py b/swh/model/identifiers.py index 434f3c8..c9f20dc 100644 --- a/swh/model/identifiers.py +++ b/swh/model/identifiers.py @@ -1,1210 +1,1241 @@ # Copyright (C) 2015-2021 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from __future__ import annotations import binascii import datetime import enum from functools import lru_cache import hashlib import re from typing import ( Any, Dict, Generic, Iterable, List, Optional, Tuple, Type, TypeVar, Union, ) import urllib.parse import attr from attrs_strict import type_validator from .exceptions import ValidationError from .hashutil import MultiHash, hash_git_data, hash_to_bytes, hash_to_hex class ObjectType(enum.Enum): """Possible object types of a QualifiedSWHID or CoreSWHID. The values of each variant is what is used in the SWHID's string representation.""" SNAPSHOT = "snp" REVISION = "rev" RELEASE = "rel" DIRECTORY = "dir" CONTENT = "cnt" class ExtendedObjectType(enum.Enum): """Possible object types of an ExtendedSWHID. The variants are a superset of :cls:`ObjectType`'s""" SNAPSHOT = "snp" REVISION = "rev" RELEASE = "rel" DIRECTORY = "dir" CONTENT = "cnt" ORIGIN = "ori" RAW_EXTRINSIC_METADATA = "emd" # The following are deprecated aliases of the variants defined in ObjectType # while transitioning from SWHID to QualifiedSWHID ORIGIN = "origin" SNAPSHOT = "snapshot" REVISION = "revision" RELEASE = "release" DIRECTORY = "directory" CONTENT = "content" RAW_EXTRINSIC_METADATA = "raw_extrinsic_metadata" SWHID_NAMESPACE = "swh" SWHID_VERSION = 1 SWHID_TYPES = ["snp", "rel", "rev", "dir", "cnt"] EXTENDED_SWHID_TYPES = SWHID_TYPES + ["ori", "emd"] SWHID_SEP = ":" SWHID_CTXT_SEP = ";" SWHID_QUALIFIERS = {"origin", "anchor", "visit", "path", "lines"} SWHID_RE_RAW = ( f"(?P{SWHID_NAMESPACE})" f"{SWHID_SEP}(?P{SWHID_VERSION})" f"{SWHID_SEP}(?P{'|'.join(EXTENDED_SWHID_TYPES)})" f"{SWHID_SEP}(?P[0-9a-f]{{40}})" f"({SWHID_CTXT_SEP}(?P\\S+))?" ) SWHID_RE = re.compile(SWHID_RE_RAW) @lru_cache() def identifier_to_bytes(identifier): """Convert a text identifier to bytes. Args: identifier: an identifier, either a 40-char hexadecimal string or a bytes object of length 20 Returns: The length 20 bytestring corresponding to the given identifier Raises: ValueError: if the identifier is of an unexpected type or length. """ if isinstance(identifier, bytes): if len(identifier) != 20: raise ValueError( "Wrong length for bytes identifier %s, expected 20" % len(identifier) ) return identifier if isinstance(identifier, str): if len(identifier) != 40: raise ValueError( "Wrong length for str identifier %s, expected 40" % len(identifier) ) return bytes.fromhex(identifier) raise ValueError( "Wrong type for identifier %s, expected bytes or str" % identifier.__class__.__name__ ) @lru_cache() def identifier_to_str(identifier): """Convert an identifier to an hexadecimal string. Args: identifier: an identifier, either a 40-char hexadecimal string or a bytes object of length 20 Returns: The length 40 string corresponding to the given identifier, hex encoded Raises: ValueError: if the identifier is of an unexpected type or length. """ if isinstance(identifier, str): if len(identifier) != 40: raise ValueError( "Wrong length for str identifier %s, expected 40" % len(identifier) ) return identifier if isinstance(identifier, bytes): if len(identifier) != 20: raise ValueError( "Wrong length for bytes identifier %s, expected 20" % len(identifier) ) return binascii.hexlify(identifier).decode() raise ValueError( "Wrong type for identifier %s, expected bytes or str" % identifier.__class__.__name__ ) def content_identifier(content): """Return the intrinsic identifier for a content. A content's identifier is the sha1, sha1_git and sha256 checksums of its data. Args: content: a content conforming to the Software Heritage schema Returns: A dictionary with all the hashes for the data Raises: KeyError: if the content doesn't have a data member. """ return MultiHash.from_data(content["data"]).digest() def directory_entry_sort_key(entry): """The sorting key for tree entries""" if entry["type"] == "dir": return entry["name"] + b"/" else: return entry["name"] @lru_cache() def _perms_to_bytes(perms): """Convert the perms value to its bytes representation""" oc = oct(perms)[2:] return oc.encode("ascii") def escape_newlines(snippet): """Escape the newlines present in snippet according to git rules. New lines in git manifests are escaped by indenting the next line by one space. """ if b"\n" in snippet: return b"\n ".join(snippet.split(b"\n")) else: return snippet def directory_identifier(directory): """Return the intrinsic identifier for a directory. A directory's identifier is the tree sha1 à la git of a directory listing, using the following algorithm, which is equivalent to the git algorithm for trees: 1. Entries of the directory are sorted using the name (or the name with '/' appended for directory entries) as key, in bytes order. 2. For each entry of the directory, the following bytes are output: - the octal representation of the permissions for the entry (stored in the 'perms' member), which is a representation of the entry type: - b'100644' (int 33188) for files - b'100755' (int 33261) for executable files - b'120000' (int 40960) for symbolic links - b'40000' (int 16384) for directories - b'160000' (int 57344) for references to revisions - an ascii space (b'\x20') - the entry's name (as raw bytes), stored in the 'name' member - a null byte (b'\x00') - the 20 byte long identifier of the object pointed at by the entry, stored in the 'target' member: - for files or executable files: their blob sha1_git - for symbolic links: the blob sha1_git of a file containing the link destination - for directories: their intrinsic identifier - for revisions: their intrinsic identifier (Note that there is no separator between entries) """ components = [] for entry in sorted(directory["entries"], key=directory_entry_sort_key): components.extend( [ _perms_to_bytes(entry["perms"]), b"\x20", entry["name"], b"\x00", identifier_to_bytes(entry["target"]), ] ) return identifier_to_str(hash_git_data(b"".join(components), "tree")) def format_date(date): """Convert a date object into an UTC timestamp encoded as ascii bytes. Git stores timestamps as an integer number of seconds since the UNIX epoch. However, Software Heritage stores timestamps as an integer number of microseconds (postgres type "datetime with timezone"). Therefore, we print timestamps with no microseconds as integers, and timestamps with microseconds as floating point values. We elide the trailing zeroes from microsecond values, to "future-proof" our representation if we ever need more precision in timestamps. """ if not isinstance(date, dict): raise ValueError("format_date only supports dicts, %r received" % date) seconds = date.get("seconds", 0) microseconds = date.get("microseconds", 0) if not microseconds: return str(seconds).encode() else: float_value = "%d.%06d" % (seconds, microseconds) return float_value.rstrip("0").encode() @lru_cache() def format_offset(offset, negative_utc=None): """Convert an integer number of minutes into an offset representation. The offset representation is [+-]hhmm where: - hh is the number of hours; - mm is the number of minutes. A null offset is represented as +0000. """ if offset < 0 or offset == 0 and negative_utc: sign = "-" else: sign = "+" hours = abs(offset) // 60 minutes = abs(offset) % 60 t = "%s%02d%02d" % (sign, hours, minutes) return t.encode() def normalize_timestamp(time_representation): """Normalize a time representation for processing by Software Heritage This function supports a numeric timestamp (representing a number of seconds since the UNIX epoch, 1970-01-01 at 00:00 UTC), a :obj:`datetime.datetime` object (with timezone information), or a normalized Software Heritage time representation (idempotency). Args: time_representation: the representation of a timestamp Returns: dict: a normalized dictionary with three keys: - timestamp: a dict with two optional keys: - seconds: the integral number of seconds since the UNIX epoch - microseconds: the integral number of microseconds - offset: the timezone offset as a number of minutes relative to UTC - negative_utc: a boolean representing whether the offset is -0000 when offset = 0. """ if time_representation is None: return None negative_utc = False if isinstance(time_representation, dict): ts = time_representation["timestamp"] if isinstance(ts, dict): seconds = ts.get("seconds", 0) microseconds = ts.get("microseconds", 0) elif isinstance(ts, int): seconds = ts microseconds = 0 else: raise ValueError( "normalize_timestamp received non-integer timestamp member:" " %r" % ts ) offset = time_representation["offset"] if "negative_utc" in time_representation: negative_utc = time_representation["negative_utc"] if negative_utc is None: negative_utc = False elif isinstance(time_representation, datetime.datetime): seconds = int(time_representation.timestamp()) microseconds = time_representation.microsecond utcoffset = time_representation.utcoffset() if utcoffset is None: raise ValueError( "normalize_timestamp received datetime without timezone: %s" % time_representation ) # utcoffset is an integer number of minutes seconds_offset = utcoffset.total_seconds() offset = int(seconds_offset) // 60 elif isinstance(time_representation, int): seconds = time_representation microseconds = 0 offset = 0 else: raise ValueError( "normalize_timestamp received non-integer timestamp:" " %r" % time_representation ) return { "timestamp": {"seconds": seconds, "microseconds": microseconds,}, "offset": offset, "negative_utc": negative_utc, } def format_author(author): """Format the specification of an author. An author is either a byte string (passed unchanged), or a dict with three keys, fullname, name and email. If the fullname exists, return it; if it doesn't, we construct a fullname using the following heuristics: if the name value is None, we return the email in angle brackets, else, we return the name, a space, and the email in angle brackets. """ if isinstance(author, bytes) or author is None: return author if "fullname" in author: return author["fullname"] ret = [] if author["name"] is not None: ret.append(author["name"]) if author["email"] is not None: ret.append(b"".join([b"<", author["email"], b">"])) return b" ".join(ret) def format_manifest( headers: Iterable[Tuple[bytes, bytes]], message: Optional[bytes] = None, ) -> bytes: """Format a manifest comprised of a sequence of `headers` and an optional `message`. The manifest format, compatible with the git format for tag and commit objects, is as follows: - for each `key`, `value` in `headers`, emit: - the `key`, literally - an ascii space (``\\x20``) - the `value`, with newlines escaped using :func:`escape_newlines`, - an ascii newline (``\\x0a``) - if the `message` is not None, emit: - an ascii newline (``\\x0a``) - the `message`, literally Args: headers: a sequence of key/value headers stored in the manifest; message: an optional message used to trail the manifest. Returns: the formatted manifest as bytes """ entries: List[bytes] = [] for key, value in headers: entries.extend((key, b" ", escape_newlines(value), b"\n")) if message is not None: entries.extend((b"\n", message)) return b"".join(entries) def hash_manifest( type: str, headers: Iterable[Tuple[bytes, bytes]], message: Optional[bytes] = None, ): """Hash the manifest of an object of type `type`, comprised of a sequence of `headers` and an optional `message`. Before hashing, the manifest is serialized with the :func:`format_manifest` function. We then use the git "salted sha1" (:func:`swh.model.hashutil.hash_git_data`) with the given `type` to hash the manifest. Args: type: the type of object for which we're computing a manifest (e.g. "tag", "commit", ...) headers: a sequence of key/value headers stored in the manifest; message: an optional message used to trail the manifest. """ manifest = format_manifest(headers, message) return hash_git_data(manifest, type) def format_author_data(author, date_offset) -> bytes: """Format authorship data according to git standards. Git authorship data has two components: - an author specification, usually a name and email, but in practice an arbitrary bytestring - optionally, a timestamp with a UTC offset specification The authorship data is formatted thus:: `name and email`[ `timestamp` `utc_offset`] The timestamp is encoded as a (decimal) number of seconds since the UNIX epoch (1970-01-01 at 00:00 UTC). As an extension to the git format, we support fractional timestamps, using a dot as the separator for the decimal part. The utc offset is a number of minutes encoded as '[+-]HHMM'. Note that some tools can pass a negative offset corresponding to the UTC timezone ('-0000'), which is valid and is encoded as such. Args: author: an author specification (dict with two bytes values: name and email, or byte value) date_offset: a normalized date/time representation as returned by :func:`normalize_timestamp`. Returns: the byte string containing the authorship data """ ret = [format_author(author)] date_offset = normalize_timestamp(date_offset) if date_offset is not None: date_f = format_date(date_offset["timestamp"]) offset_f = format_offset(date_offset["offset"], date_offset["negative_utc"]) ret.extend([b" ", date_f, b" ", offset_f]) return b"".join(ret) def revision_identifier(revision): """Return the intrinsic identifier for a revision. The fields used for the revision identifier computation are: - directory - parents - author - author_date - committer - committer_date - extra_headers or metadata -> extra_headers - message A revision's identifier is the 'git'-checksum of a commit manifest constructed as follows (newlines are a single ASCII newline character):: tree [for each parent in parents] parent [end for each parents] author committer [for each key, value in extra_headers] [end for each extra_headers] The directory identifier is the ascii representation of its hexadecimal encoding. Author and committer are formatted with the :func:`format_author` function. Dates are formatted with the :func:`format_offset` function. Extra headers are an ordered list of [key, value] pairs. Keys are strings and get encoded to utf-8 for identifier computation. Values are either byte strings, unicode strings (that get encoded to utf-8), or integers (that get encoded to their utf-8 decimal representation). Multiline extra header values are escaped by indenting the continuation lines with one ascii space. If the message is None, the manifest ends with the last header. Else, the message is appended to the headers after an empty line. The checksum of the full manifest is computed using the 'commit' git object type. """ headers = [(b"tree", identifier_to_str(revision["directory"]).encode())] for parent in revision["parents"]: if parent: headers.append((b"parent", identifier_to_str(parent).encode())) headers.append( (b"author", format_author_data(revision["author"], revision["date"])) ) headers.append( ( b"committer", format_author_data(revision["committer"], revision["committer_date"]), ) ) # Handle extra headers metadata = revision.get("metadata") or {} extra_headers = revision.get("extra_headers", ()) if not extra_headers and "extra_headers" in metadata: extra_headers = metadata["extra_headers"] headers.extend(extra_headers) return identifier_to_str(hash_manifest("commit", headers, revision["message"])) def target_type_to_git(target_type): """Convert a software heritage target type to a git object type""" return { "content": b"blob", "directory": b"tree", "revision": b"commit", "release": b"tag", "snapshot": b"refs", }[target_type] def release_identifier(release): """Return the intrinsic identifier for a release.""" headers = [ (b"object", identifier_to_str(release["target"]).encode()), (b"type", target_type_to_git(release["target_type"])), (b"tag", release["name"]), ] if "author" in release and release["author"]: headers.append( (b"tagger", format_author_data(release["author"], release["date"])) ) return identifier_to_str(hash_manifest("tag", headers, release["message"])) def snapshot_identifier(snapshot, *, ignore_unresolved=False): """Return the intrinsic identifier for a snapshot. Snapshots are a set of named branches, which are pointers to objects at any level of the Software Heritage DAG. As well as pointing to other objects in the Software Heritage DAG, branches can also be *alias*es, in which case their target is the name of another branch in the same snapshot, or *dangling*, in which case the target is unknown (and represented by the ``None`` value). A snapshot identifier is a salted sha1 (using the git hashing algorithm with the ``snapshot`` object type) of a manifest following the algorithm: 1. Branches are sorted using the name as key, in bytes order. 2. For each branch, the following bytes are output: - the type of the branch target: - ``content``, ``directory``, ``revision``, ``release`` or ``snapshot`` for the corresponding entries in the DAG; - ``alias`` for branches referencing another branch; - ``dangling`` for dangling branches - an ascii space (``\\x20``) - the branch name (as raw bytes) - a null byte (``\\x00``) - the length of the target identifier, as an ascii-encoded decimal number (``20`` for current intrinsic identifiers, ``0`` for dangling branches, the length of the target branch name for branch aliases) - a colon (``:``) - the identifier of the target object pointed at by the branch, stored in the 'target' member: - for contents: their *sha1_git* - for directories, revisions, releases or snapshots: their intrinsic identifier - for branch aliases, the name of the target branch (as raw bytes) - for dangling branches, the empty string Note that, akin to directory manifests, there is no separator between entries. Because of symbolic branches, identifiers are of arbitrary length but are length-encoded to avoid ambiguity. Args: snapshot (dict): the snapshot of which to compute the identifier. A single entry is needed, ``'branches'``, which is itself a :class:`dict` mapping each branch to its target ignore_unresolved (bool): if `True`, ignore unresolved branch aliases. Returns: str: the intrinsic identifier for `snapshot` """ unresolved = [] lines = [] for name, target in sorted(snapshot["branches"].items()): if not target: target_type = b"dangling" target_id = b"" elif target["target_type"] == "alias": target_type = b"alias" target_id = target["target"] if target_id not in snapshot["branches"] or target_id == name: unresolved.append((name, target_id)) else: target_type = target["target_type"].encode() target_id = identifier_to_bytes(target["target"]) lines.extend( [ target_type, b"\x20", name, b"\x00", ("%d:" % len(target_id)).encode(), target_id, ] ) if unresolved and not ignore_unresolved: raise ValueError( "Branch aliases unresolved: %s" % ", ".join("%s -> %s" % x for x in unresolved), unresolved, ) return identifier_to_str(hash_git_data(b"".join(lines), "snapshot")) def origin_identifier(origin): """Return the intrinsic identifier for an origin. An origin's identifier is the sha1 checksum of the entire origin URL """ return hashlib.sha1(origin["url"].encode("utf-8")).hexdigest() def raw_extrinsic_metadata_identifier(metadata: Dict[str, Any]) -> str: """Return the intrinsic identifier for a RawExtrinsicMetadata object. A raw_extrinsic_metadata identifier is a salted sha1 (using the git hashing algorithm with the ``raw_extrinsic_metadata`` object type) of a manifest following the format: ``` target $ExtendedSwhid discovery_date $Timestamp authority $StrWithoutSpaces $IRI fetcher $Str $Version format $StrWithoutSpaces origin $IRI <- optional visit $IntInDecimal <- optional snapshot $CoreSwhid <- optional release $CoreSwhid <- optional revision $CoreSwhid <- optional path $Bytes <- optional directory $CoreSwhid <- optional $MetadataBytes ``` $IRI must be RFC 3987 IRIs (so they may contain newlines, that are escaped as described below) $StrWithoutSpaces and $Version are ASCII strings, and may not contain spaces. $Str is an UTF-8 string. $CoreSwhid are core SWHIDs, as defined in :ref:`persistent-identifiers`. $ExtendedSwhid is a core SWHID, with extra types allowed ('ori' for origins and 'emd' for raw extrinsic metadata) $Timestamp is a decimal representation of the rounded-down integer number of seconds since the UNIX epoch (1970-01-01 00:00:00 UTC), with no leading '0' (unless the timestamp value is zero) and no timezone. It may be negative by prefixing it with a '-', which must not be followed by a '0'. Newlines in $Bytes, $Str, and $Iri are escaped as with other git fields, ie. by adding a space after them. Returns: str: the intrinsic identifier for `metadata` """ # equivalent to using math.floor(dt.timestamp()) to round down, # as int(dt.timestamp()) rounds toward zero, # which would map two seconds on the 0 timestamp. # # This should never be an issue in practice as Software Heritage didn't # start collecting metadata before 2015. timestamp = ( metadata["discovery_date"] .astimezone(datetime.timezone.utc) .replace(microsecond=0) .timestamp() ) assert timestamp.is_integer() headers = [ (b"target", str(metadata["target"]).encode()), (b"discovery_date", str(int(timestamp)).encode("ascii")), ( b"authority", f"{metadata['authority']['type']} {metadata['authority']['url']}".encode(), ), ( b"fetcher", f"{metadata['fetcher']['name']} {metadata['fetcher']['version']}".encode(), ), (b"format", metadata["format"].encode()), ] for key in ( "origin", "visit", "snapshot", "release", "revision", "path", "directory", ): if metadata.get(key) is not None: value: bytes if key == "path": value = metadata[key] else: value = str(metadata[key]).encode() headers.append((key.encode("ascii"), value)) return identifier_to_str( hash_manifest("raw_extrinsic_metadata", headers, metadata["metadata"]) ) +def extid_identifier(extid: Dict[str, Any]) -> str: + """Return the intrinsic identifier for an ExtID object. + + An ExtID identifier is a salted sha1 (using the git hashing algorithm with + the ``extid`` object type) of a manifest following the format: + + ``` + extid_type $StrWithoutSpaces + extid $Bytes + target $CoreSwhid + ``` + + $StrWithoutSpaces is an ASCII string, and may not contain spaces. + + Newlines in $Bytes are escaped as with other git fields, ie. by adding a + space after them. + + Returns: + str: the intrinsic identifier for `extid` + + """ + + headers = [ + (b"extid_type", extid["extid_type"].encode("ascii")), + (b"extid", extid["extid"]), + (b"target", str(extid["target"]).encode("ascii")), + ] + + return identifier_to_str(hash_manifest("extid", headers)) + + # type of the "object_type" attribute of the SWHID class; either # ObjectType or ExtendedObjectType _TObjectType = TypeVar("_TObjectType", ObjectType, ExtendedObjectType) # the SWHID class itself (this is used so that X.from_string() can return X # for all X subclass of _BaseSWHID) _TSWHID = TypeVar("_TSWHID", bound="_BaseSWHID") @attr.s(frozen=True, kw_only=True) class _BaseSWHID(Generic[_TObjectType]): """Common base class for CoreSWHID, QualifiedSWHID, and ExtendedSWHID. This is an "abstract" class and should not be instantiated directly; it only exists to deduplicate code between these three SWHID classes.""" namespace = attr.ib(type=str, default=SWHID_NAMESPACE) """the namespace of the identifier, defaults to ``swh``""" scheme_version = attr.ib(type=int, default=SWHID_VERSION) """the scheme version of the identifier, defaults to 1""" # overridden by subclasses object_type: _TObjectType """the type of object the identifier points to""" object_id = attr.ib(type=bytes, validator=type_validator()) """object's identifier""" @namespace.validator def check_namespace(self, attribute, value): if value != SWHID_NAMESPACE: raise ValidationError( "Invalid SWHID: invalid namespace: %(namespace)s", params={"namespace": value}, ) @scheme_version.validator def check_scheme_version(self, attribute, value): if value != SWHID_VERSION: raise ValidationError( "Invalid SWHID: invalid version: %(version)s", params={"version": value} ) @object_id.validator def check_object_id(self, attribute, value): if len(value) != 20: raise ValidationError( "Invalid SWHID: invalid checksum: %(object_id)s", params={"object_id": hash_to_hex(value)}, ) def __str__(self) -> str: return SWHID_SEP.join( [ self.namespace, str(self.scheme_version), self.object_type.value, hash_to_hex(self.object_id), ] ) @classmethod def from_string(cls: Type[_TSWHID], s: str) -> _TSWHID: parts = _parse_swhid(s) if parts.pop("qualifiers"): raise ValidationError(f"{cls.__name__} does not support qualifiers.") try: return cls(**parts) except ValueError as e: raise ValidationError( "ValueError: %(args)", params={"args": e.args} ) from None @attr.s(frozen=True, kw_only=True) class CoreSWHID(_BaseSWHID[ObjectType]): """ Dataclass holding the relevant info associated to a SoftWare Heritage persistent IDentifier (SWHID). Unlike `QualifiedSWHID`, it is restricted to core SWHIDs, ie. SWHIDs with no qualifiers. Raises: swh.model.exceptions.ValidationError: In case of invalid object type or id To get the raw SWHID string from an instance of this class, use the :func:`str` function: >>> swhid = CoreSWHID( ... object_type=ObjectType.CONTENT, ... object_id=bytes.fromhex('8ff44f081d43176474b267de5451f2c2e88089d0'), ... ) >>> str(swhid) 'swh:1:cnt:8ff44f081d43176474b267de5451f2c2e88089d0' And vice-versa with :meth:`CoreSWHID.from_string`: >>> swhid == CoreSWHID.from_string( ... "swh:1:cnt:8ff44f081d43176474b267de5451f2c2e88089d0" ... ) True """ object_type = attr.ib( type=ObjectType, validator=type_validator(), converter=ObjectType ) """the type of object the identifier points to""" def to_extended(self) -> ExtendedSWHID: """Converts this CoreSWHID into an ExtendedSWHID. As ExtendedSWHID is a superset of CoreSWHID, this is lossless.""" return ExtendedSWHID( namespace=self.namespace, scheme_version=self.scheme_version, object_type=ExtendedObjectType(self.object_type.value), object_id=self.object_id, ) def _parse_core_swhid(swhid: Union[str, CoreSWHID, None]) -> Optional[CoreSWHID]: if swhid is None or isinstance(swhid, CoreSWHID): return swhid else: return CoreSWHID.from_string(swhid) def _parse_lines_qualifier( lines: Union[str, Tuple[int, Optional[int]], None] ) -> Optional[Tuple[int, Optional[int]]]: try: if lines is None or isinstance(lines, tuple): return lines elif "-" in lines: (from_, to) = lines.split("-", 2) return (int(from_), int(to)) else: return (int(lines), None) except ValueError: raise ValidationError( "Invalid format for the lines qualifier: %(lines)", params={"lines": lines} ) def _parse_path_qualifier(path: Union[str, bytes, None]) -> Optional[bytes]: if path is None or isinstance(path, bytes): return path else: return urllib.parse.unquote_to_bytes(path) @attr.s(frozen=True, kw_only=True) class QualifiedSWHID(_BaseSWHID[ObjectType]): """ Dataclass holding the relevant info associated to a SoftWare Heritage persistent IDentifier (SWHID) Raises: swh.model.exceptions.ValidationError: In case of invalid object type or id To get the raw SWHID string from an instance of this class, use the :func:`str` function: >>> swhid = QualifiedSWHID( ... object_type=ObjectType.CONTENT, ... object_id=bytes.fromhex('8ff44f081d43176474b267de5451f2c2e88089d0'), ... lines=(5, 10), ... ) >>> str(swhid) 'swh:1:cnt:8ff44f081d43176474b267de5451f2c2e88089d0;lines=5-10' And vice-versa with :meth:`QualifiedSWHID.from_string`: >>> swhid == QualifiedSWHID.from_string( ... "swh:1:cnt:8ff44f081d43176474b267de5451f2c2e88089d0;lines=5-10" ... ) True """ object_type = attr.ib( type=ObjectType, validator=type_validator(), converter=ObjectType ) """the type of object the identifier points to""" # qualifiers: origin = attr.ib(type=Optional[str], default=None, validator=type_validator()) """the software origin where an object has been found or observed in the wild, as an URI""" visit = attr.ib(type=Optional[CoreSWHID], default=None, converter=_parse_core_swhid) """the core identifier of a snapshot corresponding to a specific visit of a repository containing the designated object""" anchor = attr.ib( type=Optional[CoreSWHID], default=None, validator=type_validator(), converter=_parse_core_swhid, ) """a designated node in the Merkle DAG relative to which a path to the object is specified, as the core identifier of a directory, a revision, a release, or a snapshot""" path = attr.ib( type=Optional[bytes], default=None, validator=type_validator(), converter=_parse_path_qualifier, ) """the absolute file path, from the root directory associated to the anchor node, to the object; when the anchor denotes a directory or a revision, and almost always when it’s a release, the root directory is uniquely determined; when the anchor denotes a snapshot, the root directory is the one pointed to by HEAD (possibly indirectly), and undefined if such a reference is missing""" lines = attr.ib( type=Optional[Tuple[int, Optional[int]]], default=None, validator=type_validator(), converter=_parse_lines_qualifier, ) """lines: line number(s) of interest, usually within a content object""" @visit.validator def check_visit(self, attribute, value): if value and value.object_type != ObjectType.SNAPSHOT: raise ValidationError( "The 'visit' qualifier must be a 'snp' SWHID, not '%(type)s'", params={"type": value.object_type.value}, ) @anchor.validator def check_anchor(self, attribute, value): if value and value.object_type not in ( ObjectType.DIRECTORY, ObjectType.REVISION, ObjectType.RELEASE, ObjectType.SNAPSHOT, ): raise ValidationError( "The 'visit' qualifier must be a 'dir', 'rev', 'rel', or 'snp' SWHID, " "not '%s(type)s'", params={"type": value.object_type.value}, ) def qualifiers(self) -> Dict[str, str]: origin = self.origin if origin: unescaped_origin = origin origin = origin.replace(";", "%3B") assert urllib.parse.unquote_to_bytes( origin ) == urllib.parse.unquote_to_bytes( unescaped_origin ), "Escaping ';' in the origin qualifier corrupted the origin URL." d: Dict[str, Optional[str]] = { "origin": origin, "visit": str(self.visit) if self.visit else None, "anchor": str(self.anchor) if self.anchor else None, "path": ( urllib.parse.quote_from_bytes(self.path) if self.path is not None else None ), "lines": ( "-".join(str(line) for line in self.lines if line is not None) if self.lines else None ), } return {k: v for (k, v) in d.items() if v is not None} def __str__(self) -> str: swhid = SWHID_SEP.join( [ self.namespace, str(self.scheme_version), self.object_type.value, hash_to_hex(self.object_id), ] ) qualifiers = self.qualifiers() if qualifiers: for k, v in qualifiers.items(): swhid += "%s%s=%s" % (SWHID_CTXT_SEP, k, v) return swhid @classmethod def from_string(cls, s: str) -> QualifiedSWHID: parts = _parse_swhid(s) qualifiers = parts.pop("qualifiers") invalid_qualifiers = set(qualifiers) - SWHID_QUALIFIERS if invalid_qualifiers: raise ValidationError( "Invalid qualifier(s): %(qualifiers)", params={"qualifiers": ", ".join(invalid_qualifiers)}, ) try: return QualifiedSWHID(**parts, **qualifiers) except ValueError as e: raise ValidationError( "ValueError: %(args)s", params={"args": e.args} ) from None @attr.s(frozen=True, kw_only=True) class ExtendedSWHID(_BaseSWHID[ExtendedObjectType]): """ Dataclass holding the relevant info associated to a SoftWare Heritage persistent IDentifier (SWHID). It extends `CoreSWHID`, by allowing non-standard object types; and should only be used internally to Software Heritage. Raises: swh.model.exceptions.ValidationError: In case of invalid object type or id To get the raw SWHID string from an instance of this class, use the :func:`str` function: >>> swhid = ExtendedSWHID( ... object_type=ExtendedObjectType.CONTENT, ... object_id=bytes.fromhex('8ff44f081d43176474b267de5451f2c2e88089d0'), ... ) >>> str(swhid) 'swh:1:cnt:8ff44f081d43176474b267de5451f2c2e88089d0' And vice-versa with :meth:`CoreSWHID.from_string`: >>> swhid == ExtendedSWHID.from_string( ... "swh:1:cnt:8ff44f081d43176474b267de5451f2c2e88089d0" ... ) True """ object_type = attr.ib( type=ExtendedObjectType, validator=type_validator(), converter=ExtendedObjectType, ) """the type of object the identifier points to""" def _parse_swhid(swhid: str) -> Dict[str, Any]: """Parse a Software Heritage identifier (SWHID) from string (see: :ref:`persistent-identifiers`.) This is for internal use; use :meth:`CoreSWHID.from_string`, :meth:`QualifiedSWHID.from_string`, or :meth:`ExtendedSWHID.from_string` instead, as they perform validation and build a dataclass. Args: swhid (str): A persistent identifier Raises: swh.model.exceptions.ValidationError: if passed string is not a valid SWHID """ m = SWHID_RE.fullmatch(swhid) if not m: raise ValidationError( "Invalid SWHID: invalid syntax: %(swhid)s", params={"swhid": swhid} ) parts: Dict[str, Any] = m.groupdict() qualifiers_raw = parts["qualifiers"] parts["qualifiers"] = {} if qualifiers_raw: for qualifier in qualifiers_raw.split(SWHID_CTXT_SEP): try: k, v = qualifier.split("=") except ValueError: raise ValidationError( "Invalid SWHID: invalid qualifier: %(qualifier)s", params={"qualifier": qualifier}, ) parts["qualifiers"][k] = v parts["scheme_version"] = int(parts["scheme_version"]) parts["object_id"] = hash_to_bytes(parts["object_id"]) return parts diff --git a/swh/model/model.py b/swh/model/model.py index 1c46f2a..da05471 100644 --- a/swh/model/model.py +++ b/swh/model/model.py @@ -1,1098 +1,1121 @@ # Copyright (C) 2018-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from abc import ABCMeta, abstractmethod import datetime from enum import Enum from hashlib import sha256 from typing import Any, Dict, Iterable, Optional, Tuple, TypeVar, Union import attr from attrs_strict import type_validator import dateutil.parser import iso8601 from typing_extensions import Final from .collections import ImmutableDict from .hashutil import DEFAULT_ALGORITHMS, MultiHash, hash_to_bytes from .identifiers import ( directory_identifier, + extid_identifier, normalize_timestamp, origin_identifier, raw_extrinsic_metadata_identifier, release_identifier, revision_identifier, snapshot_identifier, ) from .identifiers import CoreSWHID from .identifiers import ExtendedObjectType as SwhidExtendedObjectType from .identifiers import ExtendedSWHID from .identifiers import ObjectType as SwhidObjectType class MissingData(Exception): """Raised by `Content.with_data` when it has no way of fetching the data (but not when fetching the data fails).""" pass KeyType = Union[Dict[str, str], Dict[str, bytes], bytes] """The type returned by BaseModel.unique_key().""" SHA1_SIZE = 20 # TODO: Limit this to 20 bytes Sha1Git = bytes Sha1 = bytes KT = TypeVar("KT") VT = TypeVar("VT") def freeze_optional_dict( d: Union[None, Dict[KT, VT], ImmutableDict[KT, VT]] # type: ignore ) -> Optional[ImmutableDict[KT, VT]]: if isinstance(d, dict): return ImmutableDict(d) else: return d def dictify(value): "Helper function used by BaseModel.to_dict()" if isinstance(value, BaseModel): return value.to_dict() elif isinstance(value, (CoreSWHID, ExtendedSWHID)): return str(value) elif isinstance(value, Enum): return value.value elif isinstance(value, (dict, ImmutableDict)): return {k: dictify(v) for k, v in value.items()} elif isinstance(value, tuple): return tuple(dictify(v) for v in value) else: return value ModelType = TypeVar("ModelType", bound="BaseModel") class BaseModel: """Base class for SWH model classes. Provides serialization/deserialization to/from Python dictionaries, that are suitable for JSON/msgpack-like formats.""" __slots__ = () def to_dict(self): """Wrapper of `attr.asdict` that can be overridden by subclasses that have special handling of some of the fields.""" return dictify(attr.asdict(self, recurse=False)) @classmethod def from_dict(cls, d): """Takes a dictionary representing a tree of SWH objects, and recursively builds the corresponding objects.""" return cls(**d) def anonymize(self: ModelType) -> Optional[ModelType]: """Returns an anonymized version of the object, if needed. If the object model does not need/support anonymization, returns None. """ return None def unique_key(self) -> KeyType: """Returns a unique key for this object, that can be used for deduplication.""" raise NotImplementedError(f"unique_key for {self}") class HashableObject(metaclass=ABCMeta): """Mixin to automatically compute object identifier hash when the associated model is instantiated.""" __slots__ = () @abstractmethod def compute_hash(self) -> bytes: """Derived model classes must implement this to compute the object hash. This method is called by the object initialization if the `id` attribute is set to an empty value. """ pass def __attrs_post_init__(self): if not self.id: obj_id = self.compute_hash() object.__setattr__(self, "id", obj_id) def unique_key(self) -> KeyType: return self.id # type: ignore @attr.s(frozen=True, slots=True) class Person(BaseModel): """Represents the author/committer of a revision or release.""" object_type: Final = "person" fullname = attr.ib(type=bytes, validator=type_validator()) name = attr.ib(type=Optional[bytes], validator=type_validator()) email = attr.ib(type=Optional[bytes], validator=type_validator()) @classmethod def from_fullname(cls, fullname: bytes): """Returns a Person object, by guessing the name and email from the fullname, in the `name ` format. The fullname is left unchanged.""" if fullname is None: raise TypeError("fullname is None.") name: Optional[bytes] email: Optional[bytes] try: open_bracket = fullname.index(b"<") except ValueError: name = fullname email = None else: raw_name = fullname[:open_bracket] raw_email = fullname[open_bracket + 1 :] if not raw_name: name = None else: name = raw_name.strip() try: close_bracket = raw_email.rindex(b">") except ValueError: email = raw_email else: email = raw_email[:close_bracket] return Person(name=name or None, email=email or None, fullname=fullname,) def anonymize(self) -> "Person": """Returns an anonymized version of the Person object. Anonymization is simply a Person which fullname is the hashed, with unset name or email. """ return Person(fullname=sha256(self.fullname).digest(), name=None, email=None,) @attr.s(frozen=True, slots=True) class Timestamp(BaseModel): """Represents a naive timestamp from a VCS.""" object_type: Final = "timestamp" seconds = attr.ib(type=int, validator=type_validator()) microseconds = attr.ib(type=int, validator=type_validator()) @seconds.validator def check_seconds(self, attribute, value): """Check that seconds fit in a 64-bits signed integer.""" if not (-(2 ** 63) <= value < 2 ** 63): raise ValueError("Seconds must be a signed 64-bits integer.") @microseconds.validator def check_microseconds(self, attribute, value): """Checks that microseconds are positive and < 1000000.""" if not (0 <= value < 10 ** 6): raise ValueError("Microseconds must be in [0, 1000000[.") @attr.s(frozen=True, slots=True) class TimestampWithTimezone(BaseModel): """Represents a TZ-aware timestamp from a VCS.""" object_type: Final = "timestamp_with_timezone" timestamp = attr.ib(type=Timestamp, validator=type_validator()) offset = attr.ib(type=int, validator=type_validator()) negative_utc = attr.ib(type=bool, validator=type_validator()) @offset.validator def check_offset(self, attribute, value): """Checks the offset is a 16-bits signed integer (in theory, it should always be between -14 and +14 hours).""" if not (-(2 ** 15) <= value < 2 ** 15): # max 14 hours offset in theory, but you never know what # you'll find in the wild... raise ValueError("offset too large: %d minutes" % value) @negative_utc.validator def check_negative_utc(self, attribute, value): if self.offset and value: raise ValueError("negative_utc can only be True is offset=0") @classmethod def from_dict(cls, obj: Union[Dict, datetime.datetime, int]): """Builds a TimestampWithTimezone from any of the formats accepted by :func:`swh.model.normalize_timestamp`.""" # TODO: this accept way more types than just dicts; find a better # name d = normalize_timestamp(obj) return cls( timestamp=Timestamp.from_dict(d["timestamp"]), offset=d["offset"], negative_utc=d["negative_utc"], ) @classmethod def from_datetime(cls, dt: datetime.datetime): return cls.from_dict(dt) @classmethod def from_iso8601(cls, s): """Builds a TimestampWithTimezone from an ISO8601-formatted string. """ dt = iso8601.parse_date(s) tstz = cls.from_datetime(dt) if dt.tzname() == "-00:00": tstz = attr.evolve(tstz, negative_utc=True) return tstz @attr.s(frozen=True, slots=True) class Origin(BaseModel): """Represents a software source: a VCS and an URL.""" object_type: Final = "origin" url = attr.ib(type=str, validator=type_validator()) def unique_key(self) -> KeyType: return {"url": self.url} def swhid(self) -> ExtendedSWHID: """Returns a SWHID representing this origin.""" return ExtendedSWHID( object_type=SwhidExtendedObjectType.ORIGIN, object_id=hash_to_bytes(origin_identifier(self.unique_key())), ) @attr.s(frozen=True, slots=True) class OriginVisit(BaseModel): """Represents an origin visit with a given type at a given point in time, by a SWH loader.""" object_type: Final = "origin_visit" origin = attr.ib(type=str, validator=type_validator()) date = attr.ib(type=datetime.datetime, validator=type_validator()) type = attr.ib(type=str, validator=type_validator()) """Should not be set before calling 'origin_visit_add()'.""" visit = attr.ib(type=Optional[int], validator=type_validator(), default=None) @date.validator def check_date(self, attribute, value): """Checks the date has a timezone.""" if value is not None and value.tzinfo is None: raise ValueError("date must be a timezone-aware datetime.") def to_dict(self): """Serializes the date as a string and omits the visit id if it is `None`.""" ov = super().to_dict() if ov["visit"] is None: del ov["visit"] return ov def unique_key(self) -> KeyType: return {"origin": self.origin, "date": str(self.date)} @attr.s(frozen=True, slots=True) class OriginVisitStatus(BaseModel): """Represents a visit update of an origin at a given point in time. """ object_type: Final = "origin_visit_status" origin = attr.ib(type=str, validator=type_validator()) visit = attr.ib(type=int, validator=type_validator()) date = attr.ib(type=datetime.datetime, validator=type_validator()) status = attr.ib( type=str, validator=attr.validators.in_( ["created", "ongoing", "full", "partial", "not_found", "failed"] ), ) snapshot = attr.ib(type=Optional[Sha1Git], validator=type_validator()) # Type is optional be to able to use it before adding it to the database model type = attr.ib(type=Optional[str], validator=type_validator(), default=None) metadata = attr.ib( type=Optional[ImmutableDict[str, object]], validator=type_validator(), converter=freeze_optional_dict, default=None, ) @date.validator def check_date(self, attribute, value): """Checks the date has a timezone.""" if value is not None and value.tzinfo is None: raise ValueError("date must be a timezone-aware datetime.") def unique_key(self) -> KeyType: return {"origin": self.origin, "visit": str(self.visit), "date": str(self.date)} class TargetType(Enum): """The type of content pointed to by a snapshot branch. Usually a revision or an alias.""" CONTENT = "content" DIRECTORY = "directory" REVISION = "revision" RELEASE = "release" SNAPSHOT = "snapshot" ALIAS = "alias" class ObjectType(Enum): """The type of content pointed to by a release. Usually a revision""" CONTENT = "content" DIRECTORY = "directory" REVISION = "revision" RELEASE = "release" SNAPSHOT = "snapshot" @attr.s(frozen=True, slots=True) class SnapshotBranch(BaseModel): """Represents one of the branches of a snapshot.""" object_type: Final = "snapshot_branch" target = attr.ib(type=bytes, validator=type_validator()) target_type = attr.ib(type=TargetType, validator=type_validator()) @target.validator def check_target(self, attribute, value): """Checks the target type is not an alias, checks the target is a valid sha1_git.""" if self.target_type != TargetType.ALIAS and self.target is not None: if len(value) != 20: raise ValueError("Wrong length for bytes identifier: %d" % len(value)) @classmethod def from_dict(cls, d): return cls(target=d["target"], target_type=TargetType(d["target_type"])) @attr.s(frozen=True, slots=True) class Snapshot(HashableObject, BaseModel): """Represents the full state of an origin at a given point in time.""" object_type: Final = "snapshot" branches = attr.ib( type=ImmutableDict[bytes, Optional[SnapshotBranch]], validator=type_validator(), converter=freeze_optional_dict, ) id = attr.ib(type=Sha1Git, validator=type_validator(), default=b"") def compute_hash(self) -> bytes: return hash_to_bytes(snapshot_identifier(self.to_dict())) @classmethod def from_dict(cls, d): d = d.copy() return cls( branches=ImmutableDict( (name, SnapshotBranch.from_dict(branch) if branch else None) for (name, branch) in d.pop("branches").items() ), **d, ) def swhid(self) -> CoreSWHID: """Returns a SWHID representing this object.""" return CoreSWHID(object_type=SwhidObjectType.SNAPSHOT, object_id=self.id) @attr.s(frozen=True, slots=True) class Release(HashableObject, BaseModel): object_type: Final = "release" name = attr.ib(type=bytes, validator=type_validator()) message = attr.ib(type=Optional[bytes], validator=type_validator()) target = attr.ib(type=Optional[Sha1Git], validator=type_validator()) target_type = attr.ib(type=ObjectType, validator=type_validator()) synthetic = attr.ib(type=bool, validator=type_validator()) author = attr.ib(type=Optional[Person], validator=type_validator(), default=None) date = attr.ib( type=Optional[TimestampWithTimezone], validator=type_validator(), default=None ) metadata = attr.ib( type=Optional[ImmutableDict[str, object]], validator=type_validator(), converter=freeze_optional_dict, default=None, ) id = attr.ib(type=Sha1Git, validator=type_validator(), default=b"") def compute_hash(self) -> bytes: return hash_to_bytes(release_identifier(self.to_dict())) @author.validator def check_author(self, attribute, value): """If the author is `None`, checks the date is `None` too.""" if self.author is None and self.date is not None: raise ValueError("release date must be None if author is None.") def to_dict(self): rel = super().to_dict() if rel["metadata"] is None: del rel["metadata"] return rel @classmethod def from_dict(cls, d): d = d.copy() if d.get("author"): d["author"] = Person.from_dict(d["author"]) if d.get("date"): d["date"] = TimestampWithTimezone.from_dict(d["date"]) return cls(target_type=ObjectType(d.pop("target_type")), **d) def swhid(self) -> CoreSWHID: """Returns a SWHID representing this object.""" return CoreSWHID(object_type=SwhidObjectType.RELEASE, object_id=self.id) def anonymize(self) -> "Release": """Returns an anonymized version of the Release object. Anonymization consists in replacing the author with an anonymized Person object. """ author = self.author and self.author.anonymize() return attr.evolve(self, author=author) class RevisionType(Enum): GIT = "git" TAR = "tar" DSC = "dsc" SUBVERSION = "svn" MERCURIAL = "hg" def tuplify_extra_headers(value: Iterable): return tuple((k, v) for k, v in value) @attr.s(frozen=True, slots=True) class Revision(HashableObject, BaseModel): object_type: Final = "revision" message = attr.ib(type=Optional[bytes], validator=type_validator()) author = attr.ib(type=Person, validator=type_validator()) committer = attr.ib(type=Person, validator=type_validator()) date = attr.ib(type=Optional[TimestampWithTimezone], validator=type_validator()) committer_date = attr.ib( type=Optional[TimestampWithTimezone], validator=type_validator() ) type = attr.ib(type=RevisionType, validator=type_validator()) directory = attr.ib(type=Sha1Git, validator=type_validator()) synthetic = attr.ib(type=bool, validator=type_validator()) metadata = attr.ib( type=Optional[ImmutableDict[str, object]], validator=type_validator(), converter=freeze_optional_dict, default=None, ) parents = attr.ib(type=Tuple[Sha1Git, ...], validator=type_validator(), default=()) id = attr.ib(type=Sha1Git, validator=type_validator(), default=b"") extra_headers = attr.ib( type=Tuple[Tuple[bytes, bytes], ...], validator=type_validator(), converter=tuplify_extra_headers, default=(), ) def __attrs_post_init__(self): super().__attrs_post_init__() # ensure metadata is a deep copy of whatever was given, and if needed # extract extra_headers from there if self.metadata: metadata = self.metadata if not self.extra_headers and "extra_headers" in metadata: (extra_headers, metadata) = metadata.copy_pop("extra_headers") object.__setattr__( self, "extra_headers", tuplify_extra_headers(extra_headers), ) attr.validate(self) object.__setattr__(self, "metadata", metadata) def compute_hash(self) -> bytes: return hash_to_bytes(revision_identifier(self.to_dict())) @classmethod def from_dict(cls, d): d = d.copy() date = d.pop("date") if date: date = TimestampWithTimezone.from_dict(date) committer_date = d.pop("committer_date") if committer_date: committer_date = TimestampWithTimezone.from_dict(committer_date) return cls( author=Person.from_dict(d.pop("author")), committer=Person.from_dict(d.pop("committer")), date=date, committer_date=committer_date, type=RevisionType(d.pop("type")), parents=tuple(d.pop("parents")), # for BW compat **d, ) def swhid(self) -> CoreSWHID: """Returns a SWHID representing this object.""" return CoreSWHID(object_type=SwhidObjectType.REVISION, object_id=self.id) def anonymize(self) -> "Revision": """Returns an anonymized version of the Revision object. Anonymization consists in replacing the author and committer with an anonymized Person object. """ return attr.evolve( self, author=self.author.anonymize(), committer=self.committer.anonymize() ) @attr.s(frozen=True, slots=True) class DirectoryEntry(BaseModel): object_type: Final = "directory_entry" name = attr.ib(type=bytes, validator=type_validator()) type = attr.ib(type=str, validator=attr.validators.in_(["file", "dir", "rev"])) target = attr.ib(type=Sha1Git, validator=type_validator()) perms = attr.ib(type=int, validator=type_validator()) """Usually one of the values of `swh.model.from_disk.DentryPerms`.""" @attr.s(frozen=True, slots=True) class Directory(HashableObject, BaseModel): object_type: Final = "directory" entries = attr.ib(type=Tuple[DirectoryEntry, ...], validator=type_validator()) id = attr.ib(type=Sha1Git, validator=type_validator(), default=b"") def compute_hash(self) -> bytes: return hash_to_bytes(directory_identifier(self.to_dict())) @classmethod def from_dict(cls, d): d = d.copy() return cls( entries=tuple( DirectoryEntry.from_dict(entry) for entry in d.pop("entries") ), **d, ) def swhid(self) -> CoreSWHID: """Returns a SWHID representing this object.""" return CoreSWHID(object_type=SwhidObjectType.DIRECTORY, object_id=self.id) @attr.s(frozen=True, slots=True) class BaseContent(BaseModel): status = attr.ib( type=str, validator=attr.validators.in_(["visible", "hidden", "absent"]) ) @staticmethod def _hash_data(data: bytes): """Hash some data, returning most of the fields of a content object""" d = MultiHash.from_data(data).digest() d["data"] = data d["length"] = len(data) return d @classmethod def from_dict(cls, d, use_subclass=True): if use_subclass: # Chooses a subclass to instantiate instead. if d["status"] == "absent": return SkippedContent.from_dict(d) else: return Content.from_dict(d) else: return super().from_dict(d) def get_hash(self, hash_name): if hash_name not in DEFAULT_ALGORITHMS: raise ValueError("{} is not a valid hash name.".format(hash_name)) return getattr(self, hash_name) def hashes(self) -> Dict[str, bytes]: """Returns a dictionary {hash_name: hash_value}""" return {algo: getattr(self, algo) for algo in DEFAULT_ALGORITHMS} @attr.s(frozen=True, slots=True) class Content(BaseContent): object_type: Final = "content" sha1 = attr.ib(type=bytes, validator=type_validator()) sha1_git = attr.ib(type=Sha1Git, validator=type_validator()) sha256 = attr.ib(type=bytes, validator=type_validator()) blake2s256 = attr.ib(type=bytes, validator=type_validator()) length = attr.ib(type=int, validator=type_validator()) status = attr.ib( type=str, validator=attr.validators.in_(["visible", "hidden"]), default="visible", ) data = attr.ib(type=Optional[bytes], validator=type_validator(), default=None) ctime = attr.ib( type=Optional[datetime.datetime], validator=type_validator(), default=None, eq=False, ) @length.validator def check_length(self, attribute, value): """Checks the length is positive.""" if value < 0: raise ValueError("Length must be positive.") @ctime.validator def check_ctime(self, attribute, value): """Checks the ctime has a timezone.""" if value is not None and value.tzinfo is None: raise ValueError("ctime must be a timezone-aware datetime.") def to_dict(self): content = super().to_dict() if content["data"] is None: del content["data"] if content["ctime"] is None: del content["ctime"] return content @classmethod def from_data(cls, data, status="visible", ctime=None) -> "Content": """Generate a Content from a given `data` byte string. This populates the Content with the hashes and length for the data passed as argument, as well as the data itself. """ d = cls._hash_data(data) d["status"] = status d["ctime"] = ctime return cls(**d) @classmethod def from_dict(cls, d): if isinstance(d.get("ctime"), str): d = d.copy() d["ctime"] = dateutil.parser.parse(d["ctime"]) return super().from_dict(d, use_subclass=False) def with_data(self) -> "Content": """Loads the `data` attribute; meaning that it is guaranteed not to be None after this call. This call is almost a no-op, but subclasses may overload this method to lazy-load data (eg. from disk or objstorage).""" if self.data is None: raise MissingData("Content data is None.") return self def unique_key(self) -> KeyType: return self.sha1 # TODO: use a dict of hashes def swhid(self) -> CoreSWHID: """Returns a SWHID representing this object.""" return CoreSWHID(object_type=SwhidObjectType.CONTENT, object_id=self.sha1_git) @attr.s(frozen=True, slots=True) class SkippedContent(BaseContent): object_type: Final = "skipped_content" sha1 = attr.ib(type=Optional[bytes], validator=type_validator()) sha1_git = attr.ib(type=Optional[Sha1Git], validator=type_validator()) sha256 = attr.ib(type=Optional[bytes], validator=type_validator()) blake2s256 = attr.ib(type=Optional[bytes], validator=type_validator()) length = attr.ib(type=Optional[int], validator=type_validator()) status = attr.ib(type=str, validator=attr.validators.in_(["absent"])) reason = attr.ib(type=Optional[str], validator=type_validator(), default=None) origin = attr.ib(type=Optional[str], validator=type_validator(), default=None) ctime = attr.ib( type=Optional[datetime.datetime], validator=type_validator(), default=None, eq=False, ) @reason.validator def check_reason(self, attribute, value): """Checks the reason is full if status != absent.""" assert self.reason == value if value is None: raise ValueError("Must provide a reason if content is absent.") @length.validator def check_length(self, attribute, value): """Checks the length is positive or -1.""" if value < -1: raise ValueError("Length must be positive or -1.") @ctime.validator def check_ctime(self, attribute, value): """Checks the ctime has a timezone.""" if value is not None and value.tzinfo is None: raise ValueError("ctime must be a timezone-aware datetime.") def to_dict(self): content = super().to_dict() if content["origin"] is None: del content["origin"] if content["ctime"] is None: del content["ctime"] return content @classmethod def from_data( cls, data: bytes, reason: str, ctime: Optional[datetime.datetime] = None ) -> "SkippedContent": """Generate a SkippedContent from a given `data` byte string. This populates the SkippedContent with the hashes and length for the data passed as argument. You can use `attr.evolve` on such a generated content to nullify some of its attributes, e.g. for tests. """ d = cls._hash_data(data) del d["data"] d["status"] = "absent" d["reason"] = reason d["ctime"] = ctime return cls(**d) @classmethod def from_dict(cls, d): d2 = d.copy() if d2.pop("data", None) is not None: raise ValueError('SkippedContent has no "data" attribute %r' % d) return super().from_dict(d2, use_subclass=False) def unique_key(self) -> KeyType: return self.hashes() class MetadataAuthorityType(Enum): DEPOSIT_CLIENT = "deposit_client" FORGE = "forge" REGISTRY = "registry" @attr.s(frozen=True, slots=True) class MetadataAuthority(BaseModel): """Represents an entity that provides metadata about an origin or software artifact.""" object_type: Final = "metadata_authority" type = attr.ib(type=MetadataAuthorityType, validator=type_validator()) url = attr.ib(type=str, validator=type_validator()) metadata = attr.ib( type=Optional[ImmutableDict[str, Any]], default=None, validator=type_validator(), converter=freeze_optional_dict, ) def to_dict(self): d = super().to_dict() if d["metadata"] is None: del d["metadata"] return d @classmethod def from_dict(cls, d): d = { **d, "type": MetadataAuthorityType(d["type"]), } return super().from_dict(d) def unique_key(self) -> KeyType: return {"type": self.type.value, "url": self.url} @attr.s(frozen=True, slots=True) class MetadataFetcher(BaseModel): """Represents a software component used to fetch metadata from a metadata authority, and ingest them into the Software Heritage archive.""" object_type: Final = "metadata_fetcher" name = attr.ib(type=str, validator=type_validator()) version = attr.ib(type=str, validator=type_validator()) metadata = attr.ib( type=Optional[ImmutableDict[str, Any]], default=None, validator=type_validator(), converter=freeze_optional_dict, ) def to_dict(self): d = super().to_dict() if d["metadata"] is None: del d["metadata"] return d def unique_key(self) -> KeyType: return {"name": self.name, "version": self.version} @attr.s(frozen=True, slots=True) class RawExtrinsicMetadata(HashableObject, BaseModel): object_type: Final = "raw_extrinsic_metadata" # target object target = attr.ib(type=ExtendedSWHID, validator=type_validator()) # source discovery_date = attr.ib(type=datetime.datetime, validator=type_validator()) authority = attr.ib(type=MetadataAuthority, validator=type_validator()) fetcher = attr.ib(type=MetadataFetcher, validator=type_validator()) # the metadata itself format = attr.ib(type=str, validator=type_validator()) metadata = attr.ib(type=bytes, validator=type_validator()) # context origin = attr.ib(type=Optional[str], default=None, validator=type_validator()) visit = attr.ib(type=Optional[int], default=None, validator=type_validator()) snapshot = attr.ib( type=Optional[CoreSWHID], default=None, validator=type_validator() ) release = attr.ib( type=Optional[CoreSWHID], default=None, validator=type_validator() ) revision = attr.ib( type=Optional[CoreSWHID], default=None, validator=type_validator() ) path = attr.ib(type=Optional[bytes], default=None, validator=type_validator()) directory = attr.ib( type=Optional[CoreSWHID], default=None, validator=type_validator() ) id = attr.ib(type=Sha1Git, validator=type_validator(), default=b"") def compute_hash(self) -> bytes: return hash_to_bytes(raw_extrinsic_metadata_identifier(self.to_dict())) @discovery_date.validator def check_discovery_date(self, attribute, value): """Checks the discovery_date has a timezone.""" if value is not None and value.tzinfo is None: raise ValueError("discovery_date must be a timezone-aware datetime.") @origin.validator def check_origin(self, attribute, value): if value is None: return if self.target.object_type not in ( SwhidExtendedObjectType.SNAPSHOT, SwhidExtendedObjectType.RELEASE, SwhidExtendedObjectType.REVISION, SwhidExtendedObjectType.DIRECTORY, SwhidExtendedObjectType.CONTENT, ): raise ValueError( f"Unexpected 'origin' context for " f"{self.target.object_type.name.lower()} object: {value}" ) if value.startswith("swh:"): # Technically this is valid; but: # 1. SWHIDs are URIs, not URLs # 2. if a SWHID gets here, it's very likely to be a mistake # (and we can remove this check if it turns out there is a # legitimate use for it). raise ValueError(f"SWHID used as context origin URL: {value}") @visit.validator def check_visit(self, attribute, value): if value is None: return if self.target.object_type not in ( SwhidExtendedObjectType.SNAPSHOT, SwhidExtendedObjectType.RELEASE, SwhidExtendedObjectType.REVISION, SwhidExtendedObjectType.DIRECTORY, SwhidExtendedObjectType.CONTENT, ): raise ValueError( f"Unexpected 'visit' context for " f"{self.target.object_type.name.lower()} object: {value}" ) if self.origin is None: raise ValueError("'origin' context must be set if 'visit' is.") if value <= 0: raise ValueError("Nonpositive visit id") @snapshot.validator def check_snapshot(self, attribute, value): if value is None: return if self.target.object_type not in ( SwhidExtendedObjectType.RELEASE, SwhidExtendedObjectType.REVISION, SwhidExtendedObjectType.DIRECTORY, SwhidExtendedObjectType.CONTENT, ): raise ValueError( f"Unexpected 'snapshot' context for " f"{self.target.object_type.name.lower()} object: {value}" ) self._check_swhid(SwhidObjectType.SNAPSHOT, value) @release.validator def check_release(self, attribute, value): if value is None: return if self.target.object_type not in ( SwhidExtendedObjectType.REVISION, SwhidExtendedObjectType.DIRECTORY, SwhidExtendedObjectType.CONTENT, ): raise ValueError( f"Unexpected 'release' context for " f"{self.target.object_type.name.lower()} object: {value}" ) self._check_swhid(SwhidObjectType.RELEASE, value) @revision.validator def check_revision(self, attribute, value): if value is None: return if self.target.object_type not in ( SwhidExtendedObjectType.DIRECTORY, SwhidExtendedObjectType.CONTENT, ): raise ValueError( f"Unexpected 'revision' context for " f"{self.target.object_type.name.lower()} object: {value}" ) self._check_swhid(SwhidObjectType.REVISION, value) @path.validator def check_path(self, attribute, value): if value is None: return if self.target.object_type not in ( SwhidExtendedObjectType.DIRECTORY, SwhidExtendedObjectType.CONTENT, ): raise ValueError( f"Unexpected 'path' context for " f"{self.target.object_type.name.lower()} object: {value}" ) @directory.validator def check_directory(self, attribute, value): if value is None: return if self.target.object_type not in (SwhidExtendedObjectType.CONTENT,): raise ValueError( f"Unexpected 'directory' context for " f"{self.target.object_type.name.lower()} object: {value}" ) self._check_swhid(SwhidObjectType.DIRECTORY, value) def _check_swhid(self, expected_object_type, swhid): if isinstance(swhid, str): raise ValueError(f"Expected SWHID, got a string: {swhid}") if swhid.object_type != expected_object_type: raise ValueError( f"Expected SWHID type '{expected_object_type.name.lower()}', " f"got '{swhid.object_type.name.lower()}' in {swhid}" ) def to_dict(self): d = super().to_dict() context_keys = ( "origin", "visit", "snapshot", "release", "revision", "directory", "path", ) for context_key in context_keys: if d[context_key] is None: del d[context_key] return d @classmethod def from_dict(cls, d): d = { **d, "target": ExtendedSWHID.from_string(d["target"]), "authority": MetadataAuthority.from_dict(d["authority"]), "fetcher": MetadataFetcher.from_dict(d["fetcher"]), } swhid_keys = ("snapshot", "release", "revision", "directory") for swhid_key in swhid_keys: if d.get(swhid_key): d[swhid_key] = CoreSWHID.from_string(d[swhid_key]) return super().from_dict(d) + + +@attr.s(frozen=True, slots=True) +class ExtID(HashableObject, BaseModel): + object_type: Final = "extid" + + extid_type = attr.ib(type=str, validator=type_validator()) + extid = attr.ib(type=bytes, validator=type_validator()) + target = attr.ib(type=CoreSWHID, validator=type_validator()) + + id = attr.ib(type=Sha1Git, validator=type_validator(), default=b"") + + @classmethod + def from_dict(cls, d): + return cls( + extid=d["extid"], + extid_type=d["extid_type"], + target=CoreSWHID.from_string(d["target"]), + ) + + def compute_hash(self) -> bytes: + return hash_to_bytes(extid_identifier(self.to_dict())) diff --git a/swh/model/tests/swh_model_data.py b/swh/model/tests/swh_model_data.py index ae6f963..c4700cb 100644 --- a/swh/model/tests/swh_model_data.py +++ b/swh/model/tests/swh_model_data.py @@ -1,343 +1,350 @@ # Copyright (C) 2019-2021 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime from typing import Dict, Sequence import attr from swh.model.hashutil import MultiHash, hash_to_bytes from swh.model.identifiers import ExtendedSWHID from swh.model.model import ( BaseModel, Content, Directory, DirectoryEntry, + ExtID, MetadataAuthority, MetadataAuthorityType, MetadataFetcher, ObjectType, Origin, OriginVisit, OriginVisitStatus, Person, RawExtrinsicMetadata, Release, Revision, RevisionType, SkippedContent, Snapshot, SnapshotBranch, TargetType, Timestamp, TimestampWithTimezone, ) UTC = datetime.timezone.utc CONTENTS = [ Content( length=4, data=f"foo{i}".encode(), status="visible", **MultiHash.from_data(f"foo{i}".encode()).digest(), ) for i in range(10) ] + [ Content( length=14, data=f"forbidden foo{i}".encode(), status="hidden", **MultiHash.from_data(f"forbidden foo{i}".encode()).digest(), ) for i in range(10) ] SKIPPED_CONTENTS = [ SkippedContent( length=4, status="absent", reason=f"because chr({i}) != '*'", **MultiHash.from_data(f"bar{i}".encode()).digest(), ) for i in range(2) ] duplicate_content1 = Content( length=4, sha1=hash_to_bytes("44973274ccef6ab4dfaaf86599792fa9c3fe4689"), sha1_git=b"another-foo", blake2s256=b"another-bar", sha256=b"another-baz", status="visible", ) # Craft a sha1 collision sha1_array = bytearray(duplicate_content1.sha1_git) sha1_array[0] += 1 duplicate_content2 = attr.evolve(duplicate_content1, sha1_git=bytes(sha1_array)) DUPLICATE_CONTENTS = [duplicate_content1, duplicate_content2] COMMITTERS = [ Person(fullname=b"foo", name=b"foo", email=b""), Person(fullname=b"bar", name=b"bar", email=b""), ] DATES = [ TimestampWithTimezone( timestamp=Timestamp(seconds=1234567891, microseconds=0,), offset=120, negative_utc=False, ), TimestampWithTimezone( timestamp=Timestamp(seconds=1234567892, microseconds=0,), offset=120, negative_utc=False, ), ] REVISIONS = [ Revision( id=hash_to_bytes("4ca486e65eb68e4986aeef8227d2db1d56ce51b3"), message=b"hello", date=DATES[0], committer=COMMITTERS[0], author=COMMITTERS[0], committer_date=DATES[0], type=RevisionType.GIT, directory=b"\x01" * 20, synthetic=False, metadata=None, parents=(), ), Revision( id=hash_to_bytes("677063f5c405d6fc1781fc56379c9a9adf43d3a0"), message=b"hello again", date=DATES[1], committer=COMMITTERS[1], author=COMMITTERS[1], committer_date=DATES[1], type=RevisionType.MERCURIAL, directory=b"\x02" * 20, synthetic=False, metadata=None, parents=(), extra_headers=((b"foo", b"bar"),), ), ] +EXTIDS = [ + ExtID(extid_type="git256", extid=b"\x03" * 32, target=REVISIONS[0].swhid(),), + ExtID(extid_type="hg", extid=b"\x04" * 20, target=REVISIONS[1].swhid(),), +] + RELEASES = [ Release( id=hash_to_bytes("8059dc4e17fcd0e51ca3bcd6b80f4577d281fd08"), name=b"v0.0.1", date=TimestampWithTimezone( timestamp=Timestamp(seconds=1234567890, microseconds=0,), offset=120, negative_utc=False, ), author=COMMITTERS[0], target_type=ObjectType.REVISION, target=b"\x04" * 20, message=b"foo", synthetic=False, ), ] ORIGINS = [ Origin(url="https://somewhere.org/den/fox",), Origin(url="https://overtherainbow.org/fox/den",), ] ORIGIN_VISITS = [ OriginVisit( origin=ORIGINS[0].url, date=datetime.datetime(2013, 5, 7, 4, 20, 39, 369271, tzinfo=UTC), visit=1, type="git", ), OriginVisit( origin=ORIGINS[1].url, date=datetime.datetime(2014, 11, 27, 17, 20, 39, tzinfo=UTC), visit=1, type="hg", ), OriginVisit( origin=ORIGINS[0].url, date=datetime.datetime(2018, 11, 27, 17, 20, 39, tzinfo=UTC), visit=2, type="git", ), OriginVisit( origin=ORIGINS[0].url, date=datetime.datetime(2018, 11, 27, 17, 20, 39, tzinfo=UTC), visit=3, type="git", ), OriginVisit( origin=ORIGINS[1].url, date=datetime.datetime(2015, 11, 27, 17, 20, 39, tzinfo=UTC), visit=2, type="hg", ), ] # The origin-visit-status dates needs to be shifted slightly in the future from their # visit dates counterpart. Otherwise, we are hitting storage-wise the "on conflict" # ignore policy (because origin-visit-add creates an origin-visit-status with the same # parameters from the origin-visit {origin, visit, date}... ORIGIN_VISIT_STATUSES = [ OriginVisitStatus( origin=ORIGINS[0].url, date=datetime.datetime(2013, 5, 7, 4, 20, 39, 432222, tzinfo=UTC), visit=1, type="git", status="ongoing", snapshot=None, metadata=None, ), OriginVisitStatus( origin=ORIGINS[1].url, date=datetime.datetime(2014, 11, 27, 17, 21, 12, tzinfo=UTC), visit=1, type="hg", status="ongoing", snapshot=None, metadata=None, ), OriginVisitStatus( origin=ORIGINS[0].url, date=datetime.datetime(2018, 11, 27, 17, 20, 59, tzinfo=UTC), visit=2, type="git", status="ongoing", snapshot=None, metadata=None, ), OriginVisitStatus( origin=ORIGINS[0].url, date=datetime.datetime(2018, 11, 27, 17, 20, 49, tzinfo=UTC), visit=3, type="git", status="full", snapshot=hash_to_bytes("17d0066a4a80aba4a0e913532ee8ff2014f006a9"), metadata=None, ), OriginVisitStatus( origin=ORIGINS[1].url, date=datetime.datetime(2015, 11, 27, 17, 22, 18, tzinfo=UTC), visit=2, type="hg", status="partial", snapshot=hash_to_bytes("8ce268b87faf03850693673c3eb5c9bb66e1ca38"), metadata=None, ), ] DIRECTORIES = [ Directory(id=hash_to_bytes("4b825dc642cb6eb9a060e54bf8d69288fbee4904"), entries=()), Directory( id=hash_to_bytes("21416d920e0ebf0df4a7888bed432873ed5cb3a7"), entries=( DirectoryEntry( name=b"file1.ext", perms=0o644, type="file", target=CONTENTS[0].sha1_git, ), DirectoryEntry( name=b"dir1", perms=0o755, type="dir", target=hash_to_bytes("4b825dc642cb6eb9a060e54bf8d69288fbee4904"), ), DirectoryEntry( name=b"subprepo1", perms=0o160000, type="rev", target=REVISIONS[1].id, ), ), ), ] SNAPSHOTS = [ Snapshot( id=hash_to_bytes("17d0066a4a80aba4a0e913532ee8ff2014f006a9"), branches={ b"master": SnapshotBranch( target_type=TargetType.REVISION, target=REVISIONS[0].id ) }, ), Snapshot( id=hash_to_bytes("8ce268b87faf03850693673c3eb5c9bb66e1ca38"), branches={ b"target/revision": SnapshotBranch( target_type=TargetType.REVISION, target=REVISIONS[0].id, ), b"target/alias": SnapshotBranch( target_type=TargetType.ALIAS, target=b"target/revision" ), b"target/directory": SnapshotBranch( target_type=TargetType.DIRECTORY, target=DIRECTORIES[0].id, ), b"target/release": SnapshotBranch( target_type=TargetType.RELEASE, target=RELEASES[0].id ), b"target/snapshot": SnapshotBranch( target_type=TargetType.SNAPSHOT, target=hash_to_bytes("17d0066a4a80aba4a0e913532ee8ff2014f006a9"), ), }, ), ] METADATA_AUTHORITIES = [ MetadataAuthority( type=MetadataAuthorityType.FORGE, url="http://example.org/", metadata={}, ), ] METADATA_FETCHERS = [ MetadataFetcher(name="test-fetcher", version="1.0.0", metadata={},) ] RAW_EXTRINSIC_METADATA = [ RawExtrinsicMetadata( target=Origin("http://example.org/foo.git").swhid(), discovery_date=datetime.datetime(2020, 7, 30, 17, 8, 20, tzinfo=UTC), authority=attr.evolve(METADATA_AUTHORITIES[0], metadata=None), fetcher=attr.evolve(METADATA_FETCHERS[0], metadata=None), format="json", metadata=b'{"foo": "bar"}', ), RawExtrinsicMetadata( target=ExtendedSWHID.from_string(str(CONTENTS[0].swhid())), discovery_date=datetime.datetime(2020, 7, 30, 17, 8, 20, tzinfo=UTC), authority=attr.evolve(METADATA_AUTHORITIES[0], metadata=None), fetcher=attr.evolve(METADATA_FETCHERS[0], metadata=None), format="json", metadata=b'{"foo": "bar"}', ), ] TEST_OBJECTS: Dict[str, Sequence[BaseModel]] = { "content": CONTENTS, "directory": DIRECTORIES, + "extid": EXTIDS, "metadata_authority": METADATA_AUTHORITIES, "metadata_fetcher": METADATA_FETCHERS, "origin": ORIGINS, "origin_visit": ORIGIN_VISITS, "origin_visit_status": ORIGIN_VISIT_STATUSES, "raw_extrinsic_metadata": RAW_EXTRINSIC_METADATA, "release": RELEASES, "revision": REVISIONS, "snapshot": SNAPSHOTS, "skipped_content": SKIPPED_CONTENTS, }