diff --git a/swh/model/cli.py b/swh/model/cli.py index e547aeb..ede67e2 100644 --- a/swh/model/cli.py +++ b/swh/model/cli.py @@ -1,322 +1,311 @@ # Copyright (C) 2018-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import os import sys from typing import Dict, Iterable, Optional # WARNING: do not import unnecessary things here to keep cli startup time under # control try: import click except ImportError: print( "Cannot run swh-identify; the Click package is not installed." "Please install 'swh.model[cli]' for full functionality.", file=sys.stderr, ) exit(1) try: from swh.core.cli import swh as swh_cli_group except ImportError: # stub so that swh-identify can be used when swh-core isn't installed swh_cli_group = click # type: ignore from swh.model.from_disk import Directory -from swh.model.identifiers import CoreSWHID, ObjectType +from swh.model.swhids import CoreSWHID CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"]) # Mapping between dulwich types and Software Heritage ones. Used by snapshot ID # computation. _DULWICH_TYPES = { b"blob": "content", b"tree": "directory", b"commit": "revision", b"tag": "release", } class CoreSWHIDParamType(click.ParamType): """Click argument that accepts a core SWHID and returns them as - :class:`swh.model.identifiers.CoreSWHID` instances """ + :class:`swh.model.swhids.CoreSWHID` instances """ name = "SWHID" def convert(self, value, param, ctx) -> CoreSWHID: from swh.model.exceptions import ValidationError try: return CoreSWHID.from_string(value) except ValidationError as e: self.fail(f'"{value}" is not a valid core SWHID: {e}', param, ctx) def swhid_of_file(path) -> CoreSWHID: from swh.model.from_disk import Content object = Content.from_file(path=path) return object.swhid() def swhid_of_file_content(data) -> CoreSWHID: from swh.model.from_disk import Content object = Content.from_bytes(mode=644, data=data) return object.swhid() def model_of_dir(path: bytes, exclude_patterns: Iterable[bytes] = None) -> Directory: from swh.model.from_disk import accept_all_directories, ignore_directories_patterns dir_filter = ( ignore_directories_patterns(path, exclude_patterns) if exclude_patterns else accept_all_directories ) return Directory.from_disk(path=path, dir_filter=dir_filter) def swhid_of_dir(path: bytes, exclude_patterns: Iterable[bytes] = None) -> CoreSWHID: obj = model_of_dir(path, exclude_patterns) return obj.swhid() def swhid_of_origin(url): - from swh.model.hashutil import hash_to_bytes - from swh.model.identifiers import ( - ExtendedObjectType, - ExtendedSWHID, - origin_identifier, - ) + from swh.model.model import Origin - return ExtendedSWHID( - object_type=ExtendedObjectType.ORIGIN, - object_id=hash_to_bytes(origin_identifier({"url": url})), - ) + return Origin(url).swhid() def swhid_of_git_repo(path) -> CoreSWHID: try: import dulwich.repo except ImportError: raise click.ClickException( "Cannot compute snapshot identifier; the Dulwich package is not installed. " "Please install 'swh.model[cli]' for full functionality.", ) from swh.model import hashutil - from swh.model.identifiers import snapshot_identifier + from swh.model.model import Snapshot repo = dulwich.repo.Repo(path) branches: Dict[bytes, Optional[Dict]] = {} for ref, target in repo.refs.as_dict().items(): obj = repo[target] if obj: branches[ref] = { "target": hashutil.bytehex_to_hash(target), "target_type": _DULWICH_TYPES[obj.type_name], } else: branches[ref] = None for ref, target in repo.refs.get_symrefs().items(): branches[ref] = { "target": target, "target_type": "alias", } snapshot = {"branches": branches} - return CoreSWHID( - object_type=ObjectType.SNAPSHOT, - object_id=hashutil.hash_to_bytes(snapshot_identifier(snapshot)), - ) + return Snapshot.from_dict(snapshot).swhid() def identify_object( obj_type: str, follow_symlinks: bool, exclude_patterns: Iterable[bytes], obj ) -> str: from urllib.parse import urlparse if obj_type == "auto": if obj == "-" or os.path.isfile(obj): obj_type = "content" elif os.path.isdir(obj): obj_type = "directory" else: try: # URL parsing if urlparse(obj).scheme: obj_type = "origin" else: raise ValueError except ValueError: raise click.BadParameter("cannot detect object type for %s" % obj) if obj == "-": content = sys.stdin.buffer.read() swhid = str(swhid_of_file_content(content)) elif obj_type in ["content", "directory"]: path = obj.encode(sys.getfilesystemencoding()) if follow_symlinks and os.path.islink(obj): path = os.path.realpath(obj) if obj_type == "content": swhid = str(swhid_of_file(path)) elif obj_type == "directory": swhid = str(swhid_of_dir(path, exclude_patterns)) elif obj_type == "origin": swhid = str(swhid_of_origin(obj)) elif obj_type == "snapshot": swhid = str(swhid_of_git_repo(obj)) else: # shouldn't happen, due to option validation raise click.BadParameter("invalid object type: " + obj_type) # note: we return original obj instead of path here, to preserve user-given # file name in output return swhid @swh_cli_group.command(context_settings=CONTEXT_SETTINGS) @click.option( "--dereference/--no-dereference", "follow_symlinks", default=True, help="follow (or not) symlinks for OBJECTS passed as arguments " + "(default: follow)", ) @click.option( "--filename/--no-filename", "show_filename", default=True, help="show/hide file name (default: show)", ) @click.option( "--type", "-t", "obj_type", default="auto", type=click.Choice(["auto", "content", "directory", "origin", "snapshot"]), help="type of object to identify (default: auto)", ) @click.option( "--exclude", "-x", "exclude_patterns", metavar="PATTERN", multiple=True, help="Exclude directories using glob patterns \ (e.g., ``*.git`` to exclude all .git directories)", ) @click.option( "--verify", "-v", metavar="SWHID", type=CoreSWHIDParamType(), help="reference identifier to be compared with computed one", ) @click.option( "-r", "--recursive", is_flag=True, help="compute SWHID recursively", ) @click.argument("objects", nargs=-1, required=True) def identify( obj_type, verify, show_filename, follow_symlinks, objects, exclude_patterns, recursive, ): """Compute the Software Heritage persistent identifier (SWHID) for the given source code object(s). For more details about SWHIDs see: \b https://docs.softwareheritage.org/devel/swh-model/persistent-identifiers.html Tip: you can pass "-" to identify the content of standard input. \b Examples:: \b $ swh identify fork.c kmod.c sched/deadline.c swh:1:cnt:2e391c754ae730bd2d8520c2ab497c403220c6e3 fork.c swh:1:cnt:0277d1216f80ae1adeed84a686ed34c9b2931fc2 kmod.c swh:1:cnt:57b939c81bce5d06fa587df8915f05affbe22b82 sched/deadline.c \b $ swh identify --no-filename /usr/src/linux/kernel/ swh:1:dir:f9f858a48d663b3809c9e2f336412717496202ab \b $ git clone --mirror https://forge.softwareheritage.org/source/helloworld.git $ swh identify --type snapshot helloworld.git/ swh:1:snp:510aa88bdc517345d258c1fc2babcd0e1f905e93 helloworld.git """ # NoQA # overlong lines in shell examples are fine from functools import partial import logging if exclude_patterns: exclude_patterns = set(pattern.encode() for pattern in exclude_patterns) if verify and len(objects) != 1: raise click.BadParameter("verification requires a single object") if recursive and not os.path.isdir(objects[0]): recursive = False logging.warn("recursive option disabled, input is not a directory object") if recursive: if verify: raise click.BadParameter( "verification of recursive object identification is not supported" ) if not obj_type == ("auto" or "directory"): raise click.BadParameter( "recursive identification is supported only for directories" ) path = os.fsencode(objects[0]) dir_obj = model_of_dir(path, exclude_patterns) for sub_obj in dir_obj.iter_tree(): path_name = "path" if "path" in sub_obj.data.keys() else "data" path = os.fsdecode(sub_obj.data[path_name]) swhid = str(sub_obj.swhid()) msg = f"{swhid}\t{path}" if show_filename else f"{swhid}" click.echo(msg) else: results = zip( objects, map( partial(identify_object, obj_type, follow_symlinks, exclude_patterns), objects, ), ) if verify: swhid = next(results)[1] if str(verify) == swhid: click.echo("SWHID match: %s" % swhid) sys.exit(0) else: click.echo("SWHID mismatch: %s != %s" % (verify, swhid)) sys.exit(1) else: for (obj, swhid) in results: msg = swhid if show_filename: msg = "%s\t%s" % (swhid, os.fsdecode(obj)) click.echo(msg) if __name__ == "__main__": identify() diff --git a/swh/model/from_disk.py b/swh/model/from_disk.py index 8fdd55e..2ae893f 100644 --- a/swh/model/from_disk.py +++ b/swh/model/from_disk.py @@ -1,553 +1,549 @@ # Copyright (C) 2017-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime import enum import fnmatch import glob import os import re import stat from typing import Any, Iterable, Iterator, List, Optional, Pattern, Tuple import attr from attrs_strict import type_validator from typing_extensions import Final from . import model from .exceptions import InvalidDirectoryPath -from .hashutil import MultiHash, hash_to_bytes, hash_to_hex -from .identifiers import ( - CoreSWHID, - ObjectType, - directory_entry_sort_key, - directory_identifier, -) +from .git_objects import directory_entry_sort_key +from .hashutil import MultiHash, hash_to_hex from .merkle import MerkleLeaf, MerkleNode +from .swhids import CoreSWHID, ObjectType @attr.s(frozen=True, slots=True) class DiskBackedContent(model.BaseContent): """Content-like class, which allows lazy-loading data from the disk.""" object_type: Final = "content_file" sha1 = attr.ib(type=bytes, validator=type_validator()) sha1_git = attr.ib(type=model.Sha1Git, validator=type_validator()) sha256 = attr.ib(type=bytes, validator=type_validator()) blake2s256 = attr.ib(type=bytes, validator=type_validator()) length = attr.ib(type=int, validator=type_validator()) status = attr.ib( type=str, validator=attr.validators.in_(["visible", "hidden"]), default="visible", ) ctime = attr.ib( type=Optional[datetime.datetime], validator=type_validator(), default=None, eq=False, ) path = attr.ib(type=Optional[bytes], default=None) @classmethod def from_dict(cls, d): return cls(**d) def __attrs_post_init__(self): if self.path is None: raise TypeError("path must not be None.") def with_data(self) -> model.Content: args = self.to_dict() del args["path"] assert self.path is not None with open(self.path, "rb") as fd: return model.Content.from_dict({**args, "data": fd.read()}) class DentryPerms(enum.IntEnum): """Admissible permissions for directory entries.""" content = 0o100644 """Content""" executable_content = 0o100755 """Executable content (e.g. executable script)""" symlink = 0o120000 """Symbolic link""" directory = 0o040000 """Directory""" revision = 0o160000 """Revision (e.g. submodule)""" def mode_to_perms(mode): """Convert a file mode to a permission compatible with Software Heritage directory entries Args: mode (int): a file mode as returned by :func:`os.stat` in :attr:`os.stat_result.st_mode` Returns: DentryPerms: one of the following values: :const:`DentryPerms.content`: plain file :const:`DentryPerms.executable_content`: executable file :const:`DentryPerms.symlink`: symbolic link :const:`DentryPerms.directory`: directory """ if stat.S_ISLNK(mode): return DentryPerms.symlink if stat.S_ISDIR(mode): return DentryPerms.directory else: # file is executable in any way if mode & (0o111): return DentryPerms.executable_content else: return DentryPerms.content class Content(MerkleLeaf): """Representation of a Software Heritage content as a node in a Merkle tree. The current Merkle hash for the Content nodes is the `sha1_git`, which makes it consistent with what :class:`Directory` uses for its own hash computation. """ __slots__ = [] # type: List[str] object_type: Final = "content" @classmethod def from_bytes(cls, *, mode, data): """Convert data (raw :class:`bytes`) to a Software Heritage content entry Args: mode (int): a file mode (passed to :func:`mode_to_perms`) data (bytes): raw contents of the file """ ret = MultiHash.from_data(data).digest() ret["length"] = len(data) ret["perms"] = mode_to_perms(mode) ret["data"] = data ret["status"] = "visible" return cls(ret) @classmethod def from_symlink(cls, *, path, mode): """Convert a symbolic link to a Software Heritage content entry""" return cls.from_bytes(mode=mode, data=os.readlink(path)) @classmethod def from_file(cls, *, path, max_content_length=None): """Compute the Software Heritage content entry corresponding to an on-disk file. The returned dictionary contains keys useful for both: - loading the content in the archive (hashes, `length`) - using the content as a directory entry in a directory Args: save_path (bool): add the file path to the entry max_content_length (Optional[int]): if given, all contents larger than this will be skipped. """ file_stat = os.lstat(path) mode = file_stat.st_mode length = file_stat.st_size too_large = max_content_length is not None and length > max_content_length if stat.S_ISLNK(mode): # Symbolic link: return a file whose contents are the link target if too_large: # Unlike large contents, we can't stream symlinks to # MultiHash, and we don't want to fit them in memory if # they exceed max_content_length either. # Thankfully, this should not happen for reasonable values of # max_content_length because of OS/filesystem limitations, # so let's just raise an error. raise Exception(f"Symlink too large ({length} bytes)") return cls.from_symlink(path=path, mode=mode) elif not stat.S_ISREG(mode): # not a regular file: return the empty file instead return cls.from_bytes(mode=mode, data=b"") if too_large: skip_reason = "Content too large" else: skip_reason = None hashes = MultiHash.from_path(path).digest() if skip_reason: ret = { **hashes, "status": "absent", "reason": skip_reason, } else: ret = { **hashes, "status": "visible", } ret["path"] = path ret["perms"] = mode_to_perms(mode) ret["length"] = length obj = cls(ret) return obj def swhid(self) -> CoreSWHID: """Return node identifier as a SWHID """ return CoreSWHID(object_type=ObjectType.CONTENT, object_id=self.hash) def __repr__(self): return "Content(id=%s)" % hash_to_hex(self.hash) def compute_hash(self): return self.data["sha1_git"] def to_model(self) -> model.BaseContent: """Builds a `model.BaseContent` object based on this leaf.""" data = self.get_data().copy() data.pop("perms", None) if data["status"] == "absent": data.pop("path", None) return model.SkippedContent.from_dict(data) elif "data" in data: return model.Content.from_dict(data) else: return DiskBackedContent.from_dict(data) def accept_all_directories(dirpath: str, dirname: str, entries: Iterable[Any]) -> bool: """Default filter for :func:`Directory.from_disk` accepting all directories Args: dirname (bytes): directory name entries (list): directory entries """ return True def ignore_empty_directories( dirpath: str, dirname: str, entries: Iterable[Any] ) -> bool: """Filter for :func:`directory_to_objects` ignoring empty directories Args: dirname (bytes): directory name entries (list): directory entries Returns: True if the directory is not empty, false if the directory is empty """ return bool(entries) def ignore_named_directories(names, *, case_sensitive=True): """Filter for :func:`directory_to_objects` to ignore directories named one of names. Args: names (list of bytes): names to ignore case_sensitive (bool): whether to do the filtering in a case sensitive way Returns: a directory filter for :func:`directory_to_objects` """ if not case_sensitive: names = [name.lower() for name in names] def named_filter( dirpath: str, dirname: str, entries: Iterable[Any], names: Iterable[Any] = names, case_sensitive: bool = case_sensitive, ): if case_sensitive: return dirname not in names else: return dirname.lower() not in names return named_filter # TODO: `extract_regex_objs` has been copied and adapted from `swh.scanner`. # In the future `swh.scanner` should use the `swh.model` version and remove its own. def extract_regex_objs( root_path: bytes, patterns: Iterable[bytes] ) -> Iterator[Pattern[bytes]]: """Generates a regex object for each pattern given in input and checks if the path is a subdirectory or relative to the root path. Args: root_path (bytes): path to the root directory patterns (list of byte): patterns to match Yields: an SRE_Pattern object """ absolute_root_path = os.path.abspath(root_path) for pattern in patterns: for path in glob.glob(pattern): absolute_path = os.path.abspath(path) if not absolute_path.startswith(absolute_root_path): error_msg = ( b'The path "' + path + b'" is not a subdirectory or relative ' b'to the root directory path: "' + root_path + b'"' ) raise InvalidDirectoryPath(error_msg) regex = fnmatch.translate((pattern.decode())) yield re.compile(regex.encode()) def ignore_directories_patterns(root_path: bytes, patterns: Iterable[bytes]): """Filter for :func:`directory_to_objects` to ignore directories matching certain patterns. Args: root_path (bytes): path of the root directory patterns (list of byte): patterns to ignore Returns: a directory filter for :func:`directory_to_objects` """ sre_patterns = set(extract_regex_objs(root_path, patterns)) def pattern_filter( dirpath: bytes, dirname: bytes, entries: Iterable[Any], patterns: Iterable[Any] = sre_patterns, root_path: bytes = os.path.abspath(root_path), ): full_path = os.path.abspath(dirpath) relative_path = os.path.relpath(full_path, root_path) return not any([pattern.match(relative_path) for pattern in patterns]) return pattern_filter def iter_directory( directory, ) -> Tuple[List[model.Content], List[model.SkippedContent], List[model.Directory]]: """Return the directory listing from a disk-memory directory instance. Raises: TypeError in case an unexpected object type is listed. Returns: Tuple of respectively iterable of content, skipped content and directories. """ contents: List[model.Content] = [] skipped_contents: List[model.SkippedContent] = [] directories: List[model.Directory] = [] for obj in directory.iter_tree(): obj = obj.to_model() obj_type = obj.object_type if obj_type in (model.Content.object_type, DiskBackedContent.object_type): # FIXME: read the data from disk later (when the # storage buffer is flushed). obj = obj.with_data() contents.append(obj) elif obj_type == model.SkippedContent.object_type: skipped_contents.append(obj) elif obj_type == model.Directory.object_type: directories.append(obj) else: raise TypeError(f"Unexpected object type from disk: {obj}") return contents, skipped_contents, directories class Directory(MerkleNode): """Representation of a Software Heritage directory as a node in a Merkle Tree. This class can be used to generate, from an on-disk directory, all the objects that need to be sent to the Software Heritage archive. The :func:`from_disk` constructor allows you to generate the data structure from a directory on disk. The resulting :class:`Directory` can then be manipulated as a dictionary, using the path as key. The :func:`collect` method is used to retrieve all the objects that need to be added to the Software Heritage archive since the last collection, by class (contents and directories). When using the dict-like methods to update the contents of the directory, the affected levels of hierarchy are reset and can be collected again using the same method. This enables the efficient collection of updated nodes, for instance when the client is applying diffs. """ __slots__ = ["__entries"] object_type: Final = "directory" @classmethod def from_disk( cls, *, path, dir_filter=accept_all_directories, max_content_length=None ): """Compute the Software Heritage objects for a given directory tree Args: path (bytes): the directory to traverse data (bool): whether to add the data to the content objects save_path (bool): whether to add the path to the content objects dir_filter (function): a filter to ignore some directories by name or contents. Takes two arguments: dirname and entries, and returns True if the directory should be added, False if the directory should be ignored. max_content_length (Optional[int]): if given, all contents larger than this will be skipped. """ top_path = path dirs = {} for root, dentries, fentries in os.walk(top_path, topdown=False): entries = {} # Join fentries and dentries in the same processing, as symbolic # links to directories appear in dentries... for name in fentries + dentries: path = os.path.join(root, name) if not os.path.isdir(path) or os.path.islink(path): content = Content.from_file( path=path, max_content_length=max_content_length ) entries[name] = content else: if dir_filter(path, name, dirs[path].entries): entries[name] = dirs[path] dirs[root] = cls({"name": os.path.basename(root), "path": root}) dirs[root].update(entries) return dirs[top_path] def __init__(self, data=None): super().__init__(data=data) self.__entries = None def invalidate_hash(self): self.__entries = None super().invalidate_hash() @staticmethod def child_to_directory_entry(name, child): if child.object_type == "directory": return { "type": "dir", "perms": DentryPerms.directory, "target": child.hash, "name": name, } elif child.object_type == "content": return { "type": "file", "perms": child.data["perms"], "target": child.hash, "name": name, } else: raise ValueError(f"unknown child {child}") def get_data(self, **kwargs): return { "id": self.hash, "entries": self.entries, } @property def entries(self): - """Child nodes, sorted by name in the same way `directory_identifier` - does.""" + """Child nodes, sorted by name in the same way + :func:`swh.model.git_objects.directory_git_object` does.""" if self.__entries is None: self.__entries = sorted( ( self.child_to_directory_entry(name, child) for name, child in self.items() ), key=directory_entry_sort_key, ) return self.__entries def swhid(self) -> CoreSWHID: """Return node identifier as a SWHID """ return CoreSWHID(object_type=ObjectType.DIRECTORY, object_id=self.hash) def compute_hash(self): - return hash_to_bytes(directory_identifier({"entries": self.entries})) + return model.Directory.from_dict({"entries": self.entries}).id def to_model(self) -> model.Directory: """Builds a `model.Directory` object based on this node; ignoring its children.""" return model.Directory.from_dict(self.get_data()) def __getitem__(self, key): if not isinstance(key, bytes): raise ValueError("Can only get a bytes from Directory") # Convenience shortcut if key == b"": return self if b"/" not in key: return super().__getitem__(key) else: key1, key2 = key.split(b"/", 1) return self.__getitem__(key1)[key2] def __setitem__(self, key, value): if not isinstance(key, bytes): raise ValueError("Can only set a bytes Directory entry") if not isinstance(value, (Content, Directory)): raise ValueError( "Can only set a Directory entry to a Content or " "Directory" ) if key == b"": raise ValueError("Directory entry must have a name") if b"\x00" in key: raise ValueError("Directory entry name must not contain nul bytes") if b"/" not in key: return super().__setitem__(key, value) else: key1, key2 = key.rsplit(b"/", 1) self[key1].__setitem__(key2, value) def __delitem__(self, key): if not isinstance(key, bytes): raise ValueError("Can only delete a bytes Directory entry") if b"/" not in key: super().__delitem__(key) else: key1, key2 = key.rsplit(b"/", 1) del self[key1][key2] def __repr__(self): return "Directory(id=%s, entries=[%s])" % ( hash_to_hex(self.hash), ", ".join(str(entry) for entry in self), ) diff --git a/swh/model/git_objects.py b/swh/model/git_objects.py index 16e69e7..1fb668a 100644 --- a/swh/model/git_objects.py +++ b/swh/model/git_objects.py @@ -1,403 +1,583 @@ # Copyright (C) 2015-2021 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from __future__ import annotations import datetime from functools import lru_cache from typing import Iterable, List, Optional, Tuple from . import model from .collections import ImmutableDict from .hashutil import git_object_header, hash_to_bytehex def directory_entry_sort_key(entry: model.DirectoryEntry): """The sorting key for tree entries""" if isinstance(entry, dict): # For backward compatibility entry = model.DirectoryEntry.from_dict(entry) if entry.type == "dir": return entry.name + b"/" else: return entry.name @lru_cache() def _perms_to_bytes(perms): - """Convert the perms value to its bytes representation""" + """Convert the perms value to its canonical bytes representation""" oc = oct(perms)[2:] return oc.encode("ascii") def escape_newlines(snippet): """Escape the newlines present in snippet according to git rules. New lines in git manifests are escaped by indenting the next line by one space. """ if b"\n" in snippet: return b"\n ".join(snippet.split(b"\n")) else: return snippet def format_date(date: model.Timestamp) -> bytes: """Convert a date object into an UTC timestamp encoded as ascii bytes. Git stores timestamps as an integer number of seconds since the UNIX epoch. However, Software Heritage stores timestamps as an integer number of microseconds (postgres type "datetime with timezone"). Therefore, we print timestamps with no microseconds as integers, and timestamps with microseconds as floating point values. We elide the trailing zeroes from microsecond values, to "future-proof" our representation if we ever need more precision in timestamps. """ if isinstance(date, dict): # For backward compatibility date = model.Timestamp.from_dict(date) if not date.microseconds: return str(date.seconds).encode() else: float_value = "%d.%06d" % (date.seconds, date.microseconds) return float_value.rstrip("0").encode() @lru_cache() def format_offset(offset: int, negative_utc: Optional[bool] = None) -> bytes: """Convert an integer number of minutes into an offset representation. The offset representation is [+-]hhmm where: - hh is the number of hours; - mm is the number of minutes. A null offset is represented as +0000. """ if offset < 0 or offset == 0 and negative_utc: sign = "-" else: sign = "+" hours = abs(offset) // 60 minutes = abs(offset) % 60 t = "%s%02d%02d" % (sign, hours, minutes) return t.encode() def normalize_timestamp(time_representation): """Normalize a time representation for processing by Software Heritage This function supports a numeric timestamp (representing a number of seconds since the UNIX epoch, 1970-01-01 at 00:00 UTC), a :obj:`datetime.datetime` object (with timezone information), or a normalized Software Heritage time representation (idempotency). Args: time_representation: the representation of a timestamp Returns: dict: a normalized dictionary with three keys: - timestamp: a dict with two optional keys: - seconds: the integral number of seconds since the UNIX epoch - microseconds: the integral number of microseconds - offset: the timezone offset as a number of minutes relative to UTC - negative_utc: a boolean representing whether the offset is -0000 when offset = 0. - """ if time_representation is None: return None else: return model.TimestampWithTimezone.from_dict(time_representation).to_dict() def directory_git_object(directory: model.Directory) -> bytes: + """Formats a directory as a git tree. + + A directory's identifier is the tree sha1 à la git of a directory listing, + using the following algorithm, which is equivalent to the git algorithm for + trees: + + 1. Entries of the directory are sorted using the name (or the name with '/' + appended for directory entries) as key, in bytes order. + + 2. For each entry of the directory, the following bytes are output: + + - the octal representation of the permissions for the entry (stored in + the 'perms' member), which is a representation of the entry type: + + - b'100644' (int 33188) for files + - b'100755' (int 33261) for executable files + - b'120000' (int 40960) for symbolic links + - b'40000' (int 16384) for directories + - b'160000' (int 57344) for references to revisions + + - an ascii space (b'\x20') + - the entry's name (as raw bytes), stored in the 'name' member + - a null byte (b'\x00') + - the 20 byte long identifier of the object pointed at by the entry, + stored in the 'target' member: + + - for files or executable files: their blob sha1_git + - for symbolic links: the blob sha1_git of a file containing the link + destination + - for directories: their intrinsic identifier + - for revisions: their intrinsic identifier + + (Note that there is no separator between entries) + + """ if isinstance(directory, dict): # For backward compatibility directory = model.Directory.from_dict(directory) components = [] for entry in sorted(directory.entries, key=directory_entry_sort_key): components.extend( [_perms_to_bytes(entry.perms), b"\x20", entry.name, b"\x00", entry.target,] ) return format_git_object_from_parts("tree", components) def format_git_object_from_headers( git_type: str, headers: Iterable[Tuple[bytes, bytes]], message: Optional[bytes] = None, ) -> bytes: """Format a git_object comprised of a git header and a manifest, which is itself a sequence of `headers`, and an optional `message`. The git_object format, compatible with the git format for tag and commit objects, is as follows: - for each `key`, `value` in `headers`, emit: - the `key`, literally - an ascii space (``\\x20``) - the `value`, with newlines escaped using :func:`escape_newlines`, - an ascii newline (``\\x0a``) - if the `message` is not None, emit: - an ascii newline (``\\x0a``) - the `message`, literally Args: headers: a sequence of key/value headers stored in the manifest; message: an optional message used to trail the manifest. Returns: the formatted git_object as bytes """ entries: List[bytes] = [] for key, value in headers: entries.extend((key, b" ", escape_newlines(value), b"\n")) if message is not None: entries.extend((b"\n", message)) concatenated_entries = b"".join(entries) header = git_object_header(git_type, len(concatenated_entries)) return header + concatenated_entries def format_git_object_from_parts(git_type: str, parts: Iterable[bytes]) -> bytes: """Similar to :func:`format_git_object_from_headers`, but for manifests made of a flat list of entries, instead of key-value + message, ie. trees and snapshots.""" concatenated_parts = b"".join(parts) header = git_object_header(git_type, len(concatenated_parts)) return header + concatenated_parts def format_author_data( author: model.Person, date_offset: Optional[model.TimestampWithTimezone] ) -> bytes: """Format authorship data according to git standards. Git authorship data has two components: - an author specification, usually a name and email, but in practice an arbitrary bytestring - optionally, a timestamp with a UTC offset specification The authorship data is formatted thus:: `name and email`[ `timestamp` `utc_offset`] The timestamp is encoded as a (decimal) number of seconds since the UNIX epoch (1970-01-01 at 00:00 UTC). As an extension to the git format, we support fractional timestamps, using a dot as the separator for the decimal part. The utc offset is a number of minutes encoded as '[+-]HHMM'. Note that some tools can pass a negative offset corresponding to the UTC timezone ('-0000'), which is valid and is encoded as such. Returns: the byte string containing the authorship data - """ ret = [author.fullname] if date_offset is not None: date_f = format_date(date_offset.timestamp) offset_f = format_offset(date_offset.offset, date_offset.negative_utc) ret.extend([b" ", date_f, b" ", offset_f]) return b"".join(ret) def revision_git_object(revision: model.Revision) -> bytes: - """Formats the git_object of a revision. See :func:`revision_identifier` for details - on the format.""" + """Formats a revision as a git tree. + + The fields used for the revision identifier computation are: + + - directory + - parents + - author + - author_date + - committer + - committer_date + - extra_headers or metadata -> extra_headers + - message + + A revision's identifier is the 'git'-checksum of a commit manifest + constructed as follows (newlines are a single ASCII newline character):: + + tree + [for each parent in parents] + parent + [end for each parents] + author + committer + [for each key, value in extra_headers] + + [end for each extra_headers] + + + + The directory identifier is the ascii representation of its hexadecimal + encoding. + + Author and committer are formatted using the :attr:`Person.fullname` attribute only. + Dates are formatted with the :func:`format_offset` function. + + Extra headers are an ordered list of [key, value] pairs. Keys are strings + and get encoded to utf-8 for identifier computation. Values are either byte + strings, unicode strings (that get encoded to utf-8), or integers (that get + encoded to their utf-8 decimal representation). + + Multiline extra header values are escaped by indenting the continuation + lines with one ascii space. + + If the message is None, the manifest ends with the last header. Else, the + message is appended to the headers after an empty line. + + The checksum of the full manifest is computed using the 'commit' git object + type. + + """ if isinstance(revision, dict): # For backward compatibility revision = model.Revision.from_dict(revision) headers = [(b"tree", hash_to_bytehex(revision.directory))] for parent in revision.parents: if parent: headers.append((b"parent", hash_to_bytehex(parent))) headers.append((b"author", format_author_data(revision.author, revision.date))) headers.append( (b"committer", format_author_data(revision.committer, revision.committer_date),) ) # Handle extra headers metadata = revision.metadata or ImmutableDict() extra_headers = revision.extra_headers or () if not extra_headers and "extra_headers" in metadata: extra_headers = metadata["extra_headers"] headers.extend(extra_headers) return format_git_object_from_headers("commit", headers, revision.message) def target_type_to_git(target_type: model.ObjectType) -> bytes: """Convert a software heritage target type to a git object type""" return { model.ObjectType.CONTENT: b"blob", model.ObjectType.DIRECTORY: b"tree", model.ObjectType.REVISION: b"commit", model.ObjectType.RELEASE: b"tag", model.ObjectType.SNAPSHOT: b"refs", }[target_type] def release_git_object(release: model.Release) -> bytes: if isinstance(release, dict): # For backward compatibility release = model.Release.from_dict(release) headers = [ (b"object", hash_to_bytehex(release.target)), (b"type", target_type_to_git(release.target_type)), (b"tag", release.name), ] if release.author is not None: headers.append((b"tagger", format_author_data(release.author, release.date))) return format_git_object_from_headers("tag", headers, release.message) def snapshot_git_object(snapshot: model.Snapshot) -> bytes: - """Formats the git_object of a revision. See :func:`snapshot_identifier` for details - on the format.""" + """Formats a snapshot as a git-like object. + + Snapshots are a set of named branches, which are pointers to objects at any + level of the Software Heritage DAG. + + As well as pointing to other objects in the Software Heritage DAG, branches + can also be *alias*es, in which case their target is the name of another + branch in the same snapshot, or *dangling*, in which case the target is + unknown (and represented by the ``None`` value). + + A snapshot identifier is a salted sha1 (using the git hashing algorithm + with the ``snapshot`` object type) of a manifest following the algorithm: + + 1. Branches are sorted using the name as key, in bytes order. + + 2. For each branch, the following bytes are output: + + - the type of the branch target: + + - ``content``, ``directory``, ``revision``, ``release`` or ``snapshot`` + for the corresponding entries in the DAG; + - ``alias`` for branches referencing another branch; + - ``dangling`` for dangling branches + + - an ascii space (``\\x20``) + - the branch name (as raw bytes) + - a null byte (``\\x00``) + - the length of the target identifier, as an ascii-encoded decimal number + (``20`` for current intrinsic identifiers, ``0`` for dangling + branches, the length of the target branch name for branch aliases) + - a colon (``:``) + - the identifier of the target object pointed at by the branch, + stored in the 'target' member: + + - for contents: their *sha1_git* + - for directories, revisions, releases or snapshots: their intrinsic + identifier + - for branch aliases, the name of the target branch (as raw bytes) + - for dangling branches, the empty string + + Note that, akin to directory manifests, there is no separator between + entries. Because of symbolic branches, identifiers are of arbitrary + length but are length-encoded to avoid ambiguity. + """ if isinstance(snapshot, dict): # For backward compatibility snapshot = model.Snapshot.from_dict(snapshot) unresolved = [] lines = [] for name, target in sorted(snapshot.branches.items()): if not target: target_type = b"dangling" target_id = b"" elif target.target_type == model.TargetType.ALIAS: target_type = b"alias" target_id = target.target if target_id not in snapshot.branches or target_id == name: unresolved.append((name, target_id)) else: target_type = target.target_type.value.encode() target_id = target.target lines.extend( [ target_type, b"\x20", name, b"\x00", ("%d:" % len(target_id)).encode(), target_id, ] ) if unresolved: raise ValueError( "Branch aliases unresolved: %s" % ", ".join("%r -> %r" % x for x in unresolved), unresolved, ) return format_git_object_from_parts("snapshot", lines) def raw_extrinsic_metadata_git_object(metadata: model.RawExtrinsicMetadata) -> bytes: - """Formats the git_object of a raw_extrinsic_metadata object. - See :func:`raw_extrinsic_metadata_identifier` for details - on the format.""" + """Formats RawExtrinsicMetadata as a git-like object. + + A raw_extrinsic_metadata identifier is a salted sha1 (using the git + hashing algorithm with the ``raw_extrinsic_metadata`` object type) of + a manifest following the format:: + + target $ExtendedSwhid + discovery_date $Timestamp + authority $StrWithoutSpaces $IRI + fetcher $Str $Version + format $StrWithoutSpaces + origin $IRI <- optional + visit $IntInDecimal <- optional + snapshot $CoreSwhid <- optional + release $CoreSwhid <- optional + revision $CoreSwhid <- optional + path $Bytes <- optional + directory $CoreSwhid <- optional + + $MetadataBytes + + $IRI must be RFC 3987 IRIs (so they may contain newlines, that are escaped as + described below) + + $StrWithoutSpaces and $Version are ASCII strings, and may not contain spaces. + + $Str is an UTF-8 string. + + $CoreSwhid are core SWHIDs, as defined in :ref:`persistent-identifiers`. + $ExtendedSwhid is a core SWHID, with extra types allowed ('ori' for + origins and 'emd' for raw extrinsic metadata) + + $Timestamp is a decimal representation of the rounded-down integer number of + seconds since the UNIX epoch (1970-01-01 00:00:00 UTC), + with no leading '0' (unless the timestamp value is zero) and no timezone. + It may be negative by prefixing it with a '-', which must not be followed + by a '0'. + + Newlines in $Bytes, $Str, and $Iri are escaped as with other git fields, + ie. by adding a space after them. + """ if isinstance(metadata, dict): # For backward compatibility metadata = model.RawExtrinsicMetadata.from_dict(metadata) # equivalent to using math.floor(dt.timestamp()) to round down, # as int(dt.timestamp()) rounds toward zero, # which would map two seconds on the 0 timestamp. # # This should never be an issue in practice as Software Heritage didn't # start collecting metadata before 2015. timestamp = ( metadata.discovery_date.astimezone(datetime.timezone.utc) .replace(microsecond=0) .timestamp() ) assert timestamp.is_integer() headers = [ (b"target", str(metadata.target).encode()), (b"discovery_date", str(int(timestamp)).encode("ascii")), ( b"authority", f"{metadata.authority.type.value} {metadata.authority.url}".encode(), ), (b"fetcher", f"{metadata.fetcher.name} {metadata.fetcher.version}".encode(),), (b"format", metadata.format.encode()), ] for key in ( "origin", "visit", "snapshot", "release", "revision", "path", "directory", ): if getattr(metadata, key, None) is not None: value: bytes if key == "path": value = getattr(metadata, key) else: value = str(getattr(metadata, key)).encode() headers.append((key.encode("ascii"), value)) return format_git_object_from_headers( "raw_extrinsic_metadata", headers, metadata.metadata ) def extid_git_object(extid: model.ExtID) -> bytes: + """Formats an extid as a gi-like object. + + An ExtID identifier is a salted sha1 (using the git hashing algorithm with + the ``extid`` object type) of a manifest following the format: + + ``` + extid_type $StrWithoutSpaces + [extid_version $Str] + extid $Bytes + target $CoreSwhid + ``` + + $StrWithoutSpaces is an ASCII string, and may not contain spaces. + + Newlines in $Bytes are escaped as with other git fields, ie. by adding a + space after them. + + The extid_version line is only generated if the version is non-zero. + """ + headers = [ (b"extid_type", extid.extid_type.encode("ascii")), ] extid_version = extid.extid_version if extid_version != 0: headers.append((b"extid_version", str(extid_version).encode("ascii"))) headers.extend( [(b"extid", extid.extid), (b"target", str(extid.target).encode("ascii")),] ) return format_git_object_from_headers("extid", headers) diff --git a/swh/model/hypothesis_strategies.py b/swh/model/hypothesis_strategies.py index 67da0e0..c8644a3 100644 --- a/swh/model/hypothesis_strategies.py +++ b/swh/model/hypothesis_strategies.py @@ -1,516 +1,515 @@ # Copyright (C) 2019-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime import string from hypothesis import assume from hypothesis.extra.dateutil import timezones from hypothesis.strategies import ( binary, booleans, builds, characters, composite, datetimes, dictionaries, from_regex, integers, just, lists, none, one_of, sampled_from, sets, text, tuples, ) from .from_disk import DentryPerms -from .hashutil import hash_to_bytes -from .identifiers import ExtendedObjectType, ExtendedSWHID, snapshot_identifier from .model import ( BaseContent, Content, Directory, DirectoryEntry, MetadataAuthority, MetadataFetcher, ObjectType, Origin, OriginVisit, OriginVisitStatus, Person, RawExtrinsicMetadata, Release, Revision, RevisionType, SkippedContent, Snapshot, SnapshotBranch, TargetType, Timestamp, TimestampWithTimezone, ) +from .swhids import ExtendedObjectType, ExtendedSWHID pgsql_alphabet = characters( blacklist_categories=("Cs",), blacklist_characters=["\u0000"] ) # postgresql does not like these def optional(strategy): return one_of(none(), strategy) def pgsql_text(): return text(alphabet=pgsql_alphabet) def sha1_git(): return binary(min_size=20, max_size=20) def sha1(): return binary(min_size=20, max_size=20) @composite def extended_swhids(draw): object_type = draw(sampled_from(ExtendedObjectType)) object_id = draw(sha1_git()) return ExtendedSWHID(object_type=object_type, object_id=object_id) def aware_datetimes(): # datetimes in Software Heritage are not used for software artifacts # (which may be much older than 2000), but only for objects like scheduler # task runs, and origin visits, which were created by Software Heritage, # so at least in 2015. # We're forbidding old datetimes, because until 1956, many timezones had seconds # in their "UTC offsets" (see # ), which is not # encodable in ISO8601; and we need our datetimes to be ISO8601-encodable in the # RPC protocol min_value = datetime.datetime(2000, 1, 1, 0, 0, 0) return datetimes(min_value=min_value, timezones=timezones()) @composite def iris(draw): protocol = draw(sampled_from(["git", "http", "https", "deb"])) domain = draw(from_regex(r"\A([a-z]([a-z0-9é🏛️-]*)\.){1,3}([a-z0-9é])+\Z")) return "%s://%s" % (protocol, domain) @composite def persons_d(draw): fullname = draw(binary()) email = draw(optional(binary())) name = draw(optional(binary())) assume(not (len(fullname) == 32 and email is None and name is None)) return dict(fullname=fullname, name=name, email=email) def persons(): return persons_d().map(Person.from_dict) def timestamps_d(): max_seconds = datetime.datetime.max.replace( tzinfo=datetime.timezone.utc ).timestamp() min_seconds = datetime.datetime.min.replace( tzinfo=datetime.timezone.utc ).timestamp() return builds( dict, seconds=integers(min_seconds, max_seconds), microseconds=integers(0, 1000000), ) def timestamps(): return timestamps_d().map(Timestamp.from_dict) @composite def timestamps_with_timezone_d( draw, timestamp=timestamps_d(), offset=integers(min_value=-14 * 60, max_value=14 * 60), negative_utc=booleans(), ): timestamp = draw(timestamp) offset = draw(offset) negative_utc = draw(negative_utc) assume(not (negative_utc and offset)) return dict(timestamp=timestamp, offset=offset, negative_utc=negative_utc) timestamps_with_timezone = timestamps_with_timezone_d().map( TimestampWithTimezone.from_dict ) def origins_d(): return builds(dict, url=iris()) def origins(): return origins_d().map(Origin.from_dict) def origin_visits_d(): return builds( dict, visit=integers(1, 1000), origin=iris(), date=aware_datetimes(), type=pgsql_text(), ) def origin_visits(): return origin_visits_d().map(OriginVisit.from_dict) def metadata_dicts(): return dictionaries(pgsql_text(), pgsql_text()) def origin_visit_statuses_d(): return builds( dict, visit=integers(1, 1000), origin=iris(), type=optional(sampled_from(["git", "svn", "pypi", "debian"])), status=sampled_from( ["created", "ongoing", "full", "partial", "not_found", "failed"] ), date=aware_datetimes(), snapshot=optional(sha1_git()), metadata=optional(metadata_dicts()), ) def origin_visit_statuses(): return origin_visit_statuses_d().map(OriginVisitStatus.from_dict) @composite def releases_d(draw): target_type = sampled_from([x.value for x in ObjectType]) name = binary() message = optional(binary()) synthetic = booleans() target = sha1_git() metadata = optional(revision_metadata()) return draw( one_of( builds( dict, name=name, message=message, synthetic=synthetic, author=none(), date=none(), target=target, target_type=target_type, metadata=metadata, ), builds( dict, name=name, message=message, synthetic=synthetic, date=timestamps_with_timezone_d(), author=persons_d(), target=target, target_type=target_type, metadata=metadata, ), ) ) def releases(): return releases_d().map(Release.from_dict) revision_metadata = metadata_dicts def extra_headers(): return lists( tuples(binary(min_size=0, max_size=50), binary(min_size=0, max_size=500)) ).map(tuple) def revisions_d(): return builds( dict, message=optional(binary()), synthetic=booleans(), author=persons_d(), committer=persons_d(), date=timestamps_with_timezone_d(), committer_date=timestamps_with_timezone_d(), parents=tuples(sha1_git()), directory=sha1_git(), type=sampled_from([x.value for x in RevisionType]), metadata=optional(revision_metadata()), extra_headers=extra_headers(), ) # TODO: metadata['extra_headers'] can have binary keys and values def revisions(): return revisions_d().map(Revision.from_dict) def directory_entries_d(): return builds( dict, name=binary(), target=sha1_git(), type=sampled_from(["file", "dir", "rev"]), perms=sampled_from([perm.value for perm in DentryPerms]), ) def directory_entries(): return directory_entries_d().map(DirectoryEntry) def directories_d(): return builds(dict, entries=tuples(directory_entries_d())) def directories(): return directories_d().map(Directory.from_dict) def contents_d(): return one_of(present_contents_d(), skipped_contents_d()) def contents(): return one_of(present_contents(), skipped_contents()) def present_contents_d(): return builds( dict, data=binary(max_size=4096), ctime=optional(aware_datetimes()), status=one_of(just("visible"), just("hidden")), ) def present_contents(): return present_contents_d().map(lambda d: Content.from_data(**d)) @composite def skipped_contents_d(draw): result = BaseContent._hash_data(draw(binary(max_size=4096))) result.pop("data") nullify_attrs = draw( sets(sampled_from(["sha1", "sha1_git", "sha256", "blake2s256"])) ) for k in nullify_attrs: result[k] = None result["reason"] = draw(pgsql_text()) result["status"] = "absent" result["ctime"] = draw(optional(aware_datetimes())) return result def skipped_contents(): return skipped_contents_d().map(SkippedContent.from_dict) def branch_names(): return binary(min_size=1) def branch_targets_object_d(): return builds( dict, target=sha1_git(), target_type=sampled_from( [x.value for x in TargetType if x.value not in ("alias",)] ), ) def branch_targets_alias_d(): return builds( dict, target=sha1_git(), target_type=just("alias") ) # TargetType.ALIAS.value)) def branch_targets_d(*, only_objects=False): if only_objects: return branch_targets_object_d() else: return one_of(branch_targets_alias_d(), branch_targets_object_d()) def branch_targets(*, only_objects=False): return builds(SnapshotBranch.from_dict, branch_targets_d(only_objects=only_objects)) @composite def snapshots_d(draw, *, min_size=0, max_size=100, only_objects=False): branches = draw( dictionaries( keys=branch_names(), values=optional(branch_targets_d(only_objects=only_objects)), min_size=min_size, max_size=max_size, ) ) if not only_objects: # Make sure aliases point to actual branches unresolved_aliases = { branch: target["target"] for branch, target in branches.items() if ( target and target["target_type"] == "alias" and target["target"] not in branches ) } for alias_name, alias_target in unresolved_aliases.items(): # Override alias branch with one pointing to a real object # if max_size constraint is reached alias = alias_target if len(branches) < max_size else alias_name branches[alias] = draw(branch_targets_d(only_objects=True)) # Ensure no cycles between aliases while True: try: - id_ = snapshot_identifier( + snapshot = Snapshot.from_dict( { "branches": { name: branch or None for (name, branch) in branches.items() } } ) except ValueError as e: for (source, target) in e.args[1]: branches[source] = draw(branch_targets_d(only_objects=True)) else: break - return dict(id=hash_to_bytes(id_), branches=branches) + return snapshot.to_dict() def snapshots(*, min_size=0, max_size=100, only_objects=False): return snapshots_d( min_size=min_size, max_size=max_size, only_objects=only_objects ).map(Snapshot.from_dict) def metadata_authorities(): return builds(MetadataAuthority, url=iris(), metadata=just(None)) def metadata_fetchers(): return builds( MetadataFetcher, name=text(min_size=1, alphabet=string.printable), version=text( min_size=1, alphabet=string.ascii_letters + string.digits + string.punctuation, ), metadata=just(None), ) def raw_extrinsic_metadata(): return builds( RawExtrinsicMetadata, target=extended_swhids(), discovery_date=aware_datetimes(), authority=metadata_authorities(), fetcher=metadata_fetchers(), format=text(min_size=1, alphabet=string.printable), ) def raw_extrinsic_metadata_d(): return raw_extrinsic_metadata().map(RawExtrinsicMetadata.to_dict) def objects(blacklist_types=("origin_visit_status",), split_content=False): """generates a random couple (type, obj) which obj is an instance of the Model class corresponding to obj_type. `blacklist_types` is a list of obj_type to exclude from the strategy. If `split_content` is True, generates Content and SkippedContent under different obj_type, resp. "content" and "skipped_content". """ strategies = [ ("origin", origins), ("origin_visit", origin_visits), ("origin_visit_status", origin_visit_statuses), ("snapshot", snapshots), ("release", releases), ("revision", revisions), ("directory", directories), ("raw_extrinsic_metadata", raw_extrinsic_metadata), ] if split_content: strategies.append(("content", present_contents)) strategies.append(("skipped_content", skipped_contents)) else: strategies.append(("content", contents)) args = [ obj_gen().map(lambda x, obj_type=obj_type: (obj_type, x)) for (obj_type, obj_gen) in strategies if obj_type not in blacklist_types ] return one_of(*args) def object_dicts(blacklist_types=("origin_visit_status",), split_content=False): """generates a random couple (type, dict) which dict is suitable for .from_dict() factory methods. `blacklist_types` is a list of obj_type to exclude from the strategy. If `split_content` is True, generates Content and SkippedContent under different obj_type, resp. "content" and "skipped_content". """ strategies = [ ("origin", origins_d), ("origin_visit", origin_visits_d), ("origin_visit_status", origin_visit_statuses_d), ("snapshot", snapshots_d), ("release", releases_d), ("revision", revisions_d), ("directory", directories_d), ("raw_extrinsic_metadata", raw_extrinsic_metadata_d), ] if split_content: strategies.append(("content", present_contents_d)) strategies.append(("skipped_content", skipped_contents_d)) else: strategies.append(("content", contents_d)) args = [ obj_gen().map(lambda x, obj_type=obj_type: (obj_type, x)) for (obj_type, obj_gen) in strategies if obj_type not in blacklist_types ] return one_of(*args) diff --git a/swh/model/identifiers.py b/swh/model/identifiers.py index 194bc25..6fa6366 100644 --- a/swh/model/identifiers.py +++ b/swh/model/identifiers.py @@ -1,287 +1,91 @@ # Copyright (C) 2015-2021 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information -from __future__ import annotations - from typing import Any, Dict +import warnings from . import model # Reexport for backward compatibility from .git_objects import * # noqa from .hashutil import MultiHash, hash_to_hex # Reexport for backward compatibility from .swhids import * # noqa +warnings.warn( + "The swh.model.identifiers module is deprecated. " + "SWHID-related classes were moved to swh.model.swhids, and identifier " + "computation is now done directly with swh.model.model classes.", + DeprecationWarning, + stacklevel=2, +) + # The following are deprecated aliases of the variants defined in ObjectType # while transitioning from SWHID to QualifiedSWHID ORIGIN = "origin" SNAPSHOT = "snapshot" REVISION = "revision" RELEASE = "release" DIRECTORY = "directory" CONTENT = "content" RAW_EXTRINSIC_METADATA = "raw_extrinsic_metadata" def content_identifier(content: Dict[str, Any]) -> Dict[str, bytes]: - """Return the intrinsic identifier for a content. - - A content's identifier is the sha1, sha1_git and sha256 checksums of its - data. - - Args: - content: a content conforming to the Software Heritage schema - - Returns: - A dictionary with all the hashes for the data - - Raises: - KeyError: if the content doesn't have a data member. - + """Deprecated, use :class:`swh.model.Content` instead: + ``content_identifier(d)`` is equivalent to: + ``{k: hash_to_hex(v) for (k, v) in Content.from_data(d["data"]).hashes().items()}`` """ - return MultiHash.from_data(content["data"]).digest() def directory_identifier(directory: Dict[str, Any]) -> str: - """Return the intrinsic identifier for a directory. - - A directory's identifier is the tree sha1 à la git of a directory listing, - using the following algorithm, which is equivalent to the git algorithm for - trees: + """Deprecated, use :class:`swh.model.Directory` instead: + ``directory_identifier(d)`` is equivalent to: + ``hash_to_hex(Directory.from_dict(d).id)``. - 1. Entries of the directory are sorted using the name (or the name with '/' - appended for directory entries) as key, in bytes order. - - 2. For each entry of the directory, the following bytes are output: - - - the octal representation of the permissions for the entry (stored in - the 'perms' member), which is a representation of the entry type: - - - b'100644' (int 33188) for files - - b'100755' (int 33261) for executable files - - b'120000' (int 40960) for symbolic links - - b'40000' (int 16384) for directories - - b'160000' (int 57344) for references to revisions - - - an ascii space (b'\x20') - - the entry's name (as raw bytes), stored in the 'name' member - - a null byte (b'\x00') - - the 20 byte long identifier of the object pointed at by the entry, - stored in the 'target' member: - - - for files or executable files: their blob sha1_git - - for symbolic links: the blob sha1_git of a file containing the link - destination - - for directories: their intrinsic identifier - - for revisions: their intrinsic identifier - - (Note that there is no separator between entries) - - """ + See :func:`swh.model.git_objects.directory_git_object` for details of the + format used to generate this identifier.""" return hash_to_hex(model.Directory.from_dict(directory).id) def revision_identifier(revision: Dict[str, Any]) -> str: - """Return the intrinsic identifier for a revision. - - The fields used for the revision identifier computation are: + """Deprecated, use :class:`swh.model.Revision` instead: + ``revision_identifier(d)`` is equivalent to: + ``hash_to_hex(Revision.from_dict(d).id)``. - - directory - - parents - - author - - author_date - - committer - - committer_date - - extra_headers or metadata -> extra_headers - - message - - A revision's identifier is the 'git'-checksum of a commit manifest - constructed as follows (newlines are a single ASCII newline character):: - - tree - [for each parent in parents] - parent - [end for each parents] - author - committer - [for each key, value in extra_headers] - - [end for each extra_headers] - - - - The directory identifier is the ascii representation of its hexadecimal - encoding. - - Author and committer are formatted using the :attr:`Person.fullname` attribute only. - Dates are formatted with the :func:`format_offset` function. - - Extra headers are an ordered list of [key, value] pairs. Keys are strings - and get encoded to utf-8 for identifier computation. Values are either byte - strings, unicode strings (that get encoded to utf-8), or integers (that get - encoded to their utf-8 decimal representation). - - Multiline extra header values are escaped by indenting the continuation - lines with one ascii space. - - If the message is None, the manifest ends with the last header. Else, the - message is appended to the headers after an empty line. - - The checksum of the full manifest is computed using the 'commit' git object - type. - - """ + See :func:`swh.model.git_objects.revision_git_object` for details of the + format used to generate this identifier.""" return hash_to_hex(model.Revision.from_dict(revision).id) def release_identifier(release: Dict[str, Any]) -> str: - """Return the intrinsic identifier for a release.""" + """Deprecated, use :class:`swh.model.Release` instead: + ``release_identifier(d)`` is equivalent to: + ``hash_to_hex(Release.from_dict(d).id)``. + + See :func:`swh.model.git_objects.release_git_object` for details of the + format used to generate this identifier.""" return hash_to_hex(model.Release.from_dict(release).id) def snapshot_identifier(snapshot: Dict[str, Any]) -> str: - """Return the intrinsic identifier for a snapshot. - - Snapshots are a set of named branches, which are pointers to objects at any - level of the Software Heritage DAG. - - As well as pointing to other objects in the Software Heritage DAG, branches - can also be *alias*es, in which case their target is the name of another - branch in the same snapshot, or *dangling*, in which case the target is - unknown (and represented by the ``None`` value). - - A snapshot identifier is a salted sha1 (using the git hashing algorithm - with the ``snapshot`` object type) of a manifest following the algorithm: - - 1. Branches are sorted using the name as key, in bytes order. - - 2. For each branch, the following bytes are output: - - - the type of the branch target: + """Deprecated, use :class:`swh.model.Snapshot` instead: + ``snapshot_identifier(d)`` is equivalent to: + ``hash_to_hex(Snapshot.from_dict(d).id)``. - - ``content``, ``directory``, ``revision``, ``release`` or ``snapshot`` - for the corresponding entries in the DAG; - - ``alias`` for branches referencing another branch; - - ``dangling`` for dangling branches - - - an ascii space (``\\x20``) - - the branch name (as raw bytes) - - a null byte (``\\x00``) - - the length of the target identifier, as an ascii-encoded decimal number - (``20`` for current intrinsic identifiers, ``0`` for dangling - branches, the length of the target branch name for branch aliases) - - a colon (``:``) - - the identifier of the target object pointed at by the branch, - stored in the 'target' member: - - - for contents: their *sha1_git* - - for directories, revisions, releases or snapshots: their intrinsic - identifier - - for branch aliases, the name of the target branch (as raw bytes) - - for dangling branches, the empty string - - Note that, akin to directory manifests, there is no separator between - entries. Because of symbolic branches, identifiers are of arbitrary - length but are length-encoded to avoid ambiguity. - - Args: - snapshot (dict): the snapshot of which to compute the identifier. A - single entry is needed, ``'branches'``, which is itself a :class:`dict` - mapping each branch to its target - - Returns: - str: the intrinsic identifier for `snapshot` - - """ + See :func:`swh.model.git_objects.snapshot_git_object` for details of the + format used to generate this identifier.""" return hash_to_hex(model.Snapshot.from_dict(snapshot).id) def origin_identifier(origin): - """Return the intrinsic identifier for an origin. - - An origin's identifier is the sha1 checksum of the entire origin URL - + """Deprecated, use :class:`swh.model.Origin` instead: + ``origin_identifier(url)`` is equivalent to: + ``hash_to_hex(Origin(url=url).id)``. """ - return hash_to_hex(model.Origin.from_dict(origin).id) - - -def raw_extrinsic_metadata_identifier(metadata: Dict[str, Any]) -> str: - """Return the intrinsic identifier for a RawExtrinsicMetadata object. - - A raw_extrinsic_metadata identifier is a salted sha1 (using the git - hashing algorithm with the ``raw_extrinsic_metadata`` object type) of - a manifest following the format:: - - target $ExtendedSwhid - discovery_date $Timestamp - authority $StrWithoutSpaces $IRI - fetcher $Str $Version - format $StrWithoutSpaces - origin $IRI <- optional - visit $IntInDecimal <- optional - snapshot $CoreSwhid <- optional - release $CoreSwhid <- optional - revision $CoreSwhid <- optional - path $Bytes <- optional - directory $CoreSwhid <- optional - - $MetadataBytes - - $IRI must be RFC 3987 IRIs (so they may contain newlines, that are escaped as - described below) - - $StrWithoutSpaces and $Version are ASCII strings, and may not contain spaces. - - $Str is an UTF-8 string. - $CoreSwhid are core SWHIDs, as defined in :ref:`persistent-identifiers`. - $ExtendedSwhid is a core SWHID, with extra types allowed ('ori' for - origins and 'emd' for raw extrinsic metadata) - - $Timestamp is a decimal representation of the rounded-down integer number of - seconds since the UNIX epoch (1970-01-01 00:00:00 UTC), - with no leading '0' (unless the timestamp value is zero) and no timezone. - It may be negative by prefixing it with a '-', which must not be followed - by a '0'. - - Newlines in $Bytes, $Str, and $Iri are escaped as with other git fields, - ie. by adding a space after them. - - Returns: - str: the intrinsic identifier for ``metadata`` - - """ - return hash_to_hex(model.RawExtrinsicMetadata.from_dict(metadata).id) - - -def extid_identifier(extid: Dict[str, Any]) -> str: - """Return the intrinsic identifier for an ExtID object. - - An ExtID identifier is a salted sha1 (using the git hashing algorithm with - the ``extid`` object type) of a manifest following the format: - - ``` - extid_type $StrWithoutSpaces - [extid_version $Str] - extid $Bytes - target $CoreSwhid - ``` - - $StrWithoutSpaces is an ASCII string, and may not contain spaces. - - Newlines in $Bytes are escaped as with other git fields, ie. by adding a - space after them. - - The extid_version line is only generated if the version is non-zero. - - Returns: - str: the intrinsic identifier for `extid` - - """ - - return hash_to_hex(model.ExtID.from_dict(extid).id) + return hash_to_hex(model.Origin.from_dict(origin).id) diff --git a/swh/model/tests/swh_model_data.py b/swh/model/tests/swh_model_data.py index 1f5dded..d920c1e 100644 --- a/swh/model/tests/swh_model_data.py +++ b/swh/model/tests/swh_model_data.py @@ -1,385 +1,385 @@ # Copyright (C) 2019-2021 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime from typing import Dict, Sequence import attr from swh.model.hashutil import MultiHash, hash_to_bytes -from swh.model.identifiers import ExtendedSWHID from swh.model.model import ( BaseModel, Content, Directory, DirectoryEntry, ExtID, MetadataAuthority, MetadataAuthorityType, MetadataFetcher, ObjectType, Origin, OriginVisit, OriginVisitStatus, Person, RawExtrinsicMetadata, Release, Revision, RevisionType, SkippedContent, Snapshot, SnapshotBranch, TargetType, Timestamp, TimestampWithTimezone, ) +from swh.model.swhids import ExtendedSWHID UTC = datetime.timezone.utc CONTENTS = [ Content( length=4, data=f"foo{i}".encode(), status="visible", **MultiHash.from_data(f"foo{i}".encode()).digest(), ) for i in range(10) ] + [ Content( length=14, data=f"forbidden foo{i}".encode(), status="hidden", **MultiHash.from_data(f"forbidden foo{i}".encode()).digest(), ) for i in range(10) ] SKIPPED_CONTENTS = [ SkippedContent( length=4, status="absent", reason=f"because chr({i}) != '*'", **MultiHash.from_data(f"bar{i}".encode()).digest(), ) for i in range(2) ] duplicate_content1 = Content( length=4, sha1=hash_to_bytes("44973274ccef6ab4dfaaf86599792fa9c3fe4689"), sha1_git=b"another-foo", blake2s256=b"another-bar", sha256=b"another-baz", status="visible", ) # Craft a sha1 collision sha1_array = bytearray(duplicate_content1.sha1_git) sha1_array[0] += 1 duplicate_content2 = attr.evolve(duplicate_content1, sha1_git=bytes(sha1_array)) DUPLICATE_CONTENTS = [duplicate_content1, duplicate_content2] COMMITTERS = [ Person(fullname=b"foo", name=b"foo", email=b""), Person(fullname=b"bar", name=b"bar", email=b""), ] DATES = [ TimestampWithTimezone( timestamp=Timestamp(seconds=1234567891, microseconds=0,), offset=120, negative_utc=False, ), TimestampWithTimezone( timestamp=Timestamp(seconds=1234567892, microseconds=0,), offset=120, negative_utc=False, ), ] REVISIONS = [ Revision( id=hash_to_bytes("66c7c1cd9673275037140f2abff7b7b11fc9439c"), message=b"hello", date=DATES[0], committer=COMMITTERS[0], author=COMMITTERS[0], committer_date=DATES[0], type=RevisionType.GIT, directory=b"\x01" * 20, synthetic=False, metadata=None, parents=( hash_to_bytes("9b918dd063cec85c2bc63cc7f167e29f5894dcbc"), hash_to_bytes("757f38bdcd8473aaa12df55357f5e2f1a318e672"), ), ), Revision( id=hash_to_bytes("c7f96242d73c267adc77c2908e64e0c1cb6a4431"), message=b"hello again", date=DATES[1], committer=COMMITTERS[1], author=COMMITTERS[1], committer_date=DATES[1], type=RevisionType.MERCURIAL, directory=b"\x02" * 20, synthetic=False, metadata=None, parents=(), extra_headers=((b"foo", b"bar"),), ), ] EXTIDS = [ ExtID(extid_type="git256", extid=b"\x03" * 32, target=REVISIONS[0].swhid(),), ExtID(extid_type="hg", extid=b"\x04" * 20, target=REVISIONS[1].swhid(),), ExtID( extid_type="hg-nodeid", extid=b"\x05" * 20, target=REVISIONS[1].swhid(), extid_version=1, ), ] RELEASES = [ Release( id=hash_to_bytes("8059dc4e17fcd0e51ca3bcd6b80f4577d281fd08"), name=b"v0.0.1", date=TimestampWithTimezone( timestamp=Timestamp(seconds=1234567890, microseconds=0,), offset=120, negative_utc=False, ), author=COMMITTERS[0], target_type=ObjectType.REVISION, target=b"\x04" * 20, message=b"foo", synthetic=False, ), Release( id=hash_to_bytes("ee4d20e80af850cc0f417d25dc5073792c5010d2"), name=b"this-is-a/tag/1.0", date=None, author=None, target_type=ObjectType.DIRECTORY, target=b"\x05" * 20, message=b"bar", synthetic=False, ), ] ORIGINS = [ Origin(url="https://somewhere.org/den/fox",), Origin(url="https://overtherainbow.org/fox/den",), ] ORIGIN_VISITS = [ OriginVisit( origin=ORIGINS[0].url, date=datetime.datetime(2013, 5, 7, 4, 20, 39, 369271, tzinfo=UTC), visit=1, type="git", ), OriginVisit( origin=ORIGINS[1].url, date=datetime.datetime(2014, 11, 27, 17, 20, 39, tzinfo=UTC), visit=1, type="hg", ), OriginVisit( origin=ORIGINS[0].url, date=datetime.datetime(2018, 11, 27, 17, 20, 39, tzinfo=UTC), visit=2, type="git", ), OriginVisit( origin=ORIGINS[0].url, date=datetime.datetime(2018, 11, 27, 17, 20, 39, tzinfo=UTC), visit=3, type="git", ), OriginVisit( origin=ORIGINS[1].url, date=datetime.datetime(2015, 11, 27, 17, 20, 39, tzinfo=UTC), visit=2, type="hg", ), ] # The origin-visit-status dates needs to be shifted slightly in the future from their # visit dates counterpart. Otherwise, we are hitting storage-wise the "on conflict" # ignore policy (because origin-visit-add creates an origin-visit-status with the same # parameters from the origin-visit {origin, visit, date}... ORIGIN_VISIT_STATUSES = [ OriginVisitStatus( origin=ORIGINS[0].url, date=datetime.datetime(2013, 5, 7, 4, 20, 39, 432222, tzinfo=UTC), visit=1, type="git", status="ongoing", snapshot=None, metadata=None, ), OriginVisitStatus( origin=ORIGINS[1].url, date=datetime.datetime(2014, 11, 27, 17, 21, 12, tzinfo=UTC), visit=1, type="hg", status="ongoing", snapshot=None, metadata=None, ), OriginVisitStatus( origin=ORIGINS[0].url, date=datetime.datetime(2018, 11, 27, 17, 20, 59, tzinfo=UTC), visit=2, type="git", status="ongoing", snapshot=None, metadata=None, ), OriginVisitStatus( origin=ORIGINS[0].url, date=datetime.datetime(2018, 11, 27, 17, 20, 49, tzinfo=UTC), visit=3, type="git", status="full", snapshot=hash_to_bytes("9e78d7105c5e0f886487511e2a92377b4ee4c32a"), metadata=None, ), OriginVisitStatus( origin=ORIGINS[1].url, date=datetime.datetime(2015, 11, 27, 17, 22, 18, tzinfo=UTC), visit=2, type="hg", status="partial", snapshot=hash_to_bytes("0e7f84ede9a254f2cd55649ad5240783f557e65f"), metadata=None, ), ] DIRECTORIES = [ Directory(id=hash_to_bytes("4b825dc642cb6eb9a060e54bf8d69288fbee4904"), entries=()), Directory( id=hash_to_bytes("87b339104f7dc2a8163dec988445e3987995545f"), entries=( DirectoryEntry( name=b"file1.ext", perms=0o644, type="file", target=CONTENTS[0].sha1_git, ), DirectoryEntry( name=b"dir1", perms=0o755, type="dir", target=hash_to_bytes("4b825dc642cb6eb9a060e54bf8d69288fbee4904"), ), DirectoryEntry( name=b"subprepo1", perms=0o160000, type="rev", target=REVISIONS[1].id, ), ), ), ] SNAPSHOTS = [ Snapshot( id=hash_to_bytes("9e78d7105c5e0f886487511e2a92377b4ee4c32a"), branches={ b"master": SnapshotBranch( target_type=TargetType.REVISION, target=REVISIONS[0].id ) }, ), Snapshot( id=hash_to_bytes("0e7f84ede9a254f2cd55649ad5240783f557e65f"), branches={ b"target/revision": SnapshotBranch( target_type=TargetType.REVISION, target=REVISIONS[0].id, ), b"target/alias": SnapshotBranch( target_type=TargetType.ALIAS, target=b"target/revision" ), b"target/directory": SnapshotBranch( target_type=TargetType.DIRECTORY, target=DIRECTORIES[0].id, ), b"target/release": SnapshotBranch( target_type=TargetType.RELEASE, target=RELEASES[0].id ), b"target/snapshot": SnapshotBranch( target_type=TargetType.SNAPSHOT, target=hash_to_bytes("9e78d7105c5e0f886487511e2a92377b4ee4c32a"), ), }, ), ] METADATA_AUTHORITIES = [ MetadataAuthority( type=MetadataAuthorityType.FORGE, url="http://example.org/", metadata={}, ), ] METADATA_FETCHERS = [ MetadataFetcher(name="test-fetcher", version="1.0.0", metadata={},) ] RAW_EXTRINSIC_METADATA = [ RawExtrinsicMetadata( target=Origin("http://example.org/foo.git").swhid(), discovery_date=datetime.datetime(2020, 7, 30, 17, 8, 20, tzinfo=UTC), authority=attr.evolve(METADATA_AUTHORITIES[0], metadata=None), fetcher=attr.evolve(METADATA_FETCHERS[0], metadata=None), format="json", metadata=b'{"foo": "bar"}', ), RawExtrinsicMetadata( target=ExtendedSWHID.from_string(str(CONTENTS[0].swhid())), discovery_date=datetime.datetime(2020, 7, 30, 17, 8, 20, tzinfo=UTC), authority=attr.evolve(METADATA_AUTHORITIES[0], metadata=None), fetcher=attr.evolve(METADATA_FETCHERS[0], metadata=None), format="json", metadata=b'{"foo": "bar"}', ), ] TEST_OBJECTS: Dict[str, Sequence[BaseModel]] = { "content": CONTENTS, "directory": DIRECTORIES, "extid": EXTIDS, "metadata_authority": METADATA_AUTHORITIES, "metadata_fetcher": METADATA_FETCHERS, "origin": ORIGINS, "origin_visit": ORIGIN_VISITS, "origin_visit_status": ORIGIN_VISIT_STATUSES, "raw_extrinsic_metadata": RAW_EXTRINSIC_METADATA, "release": RELEASES, "revision": REVISIONS, "snapshot": SNAPSHOTS, "skipped_content": SKIPPED_CONTENTS, } SAMPLE_FOLDER_SWHIDS = [ "swh:1:dir:e8b0f1466af8608c8a3fb9879db172b887e80759", "swh:1:cnt:7d5c08111e21c8a9f71540939998551683375fad", "swh:1:cnt:68769579c3eaadbe555379b9c3538e6628bae1eb", "swh:1:cnt:e86b45e538d9b6888c969c89fbd22a85aa0e0366", "swh:1:dir:3c1f578394f4623f74a0ba7fe761729f59fc6ec4", "swh:1:dir:c3020f6bf135a38c6df3afeb5fb38232c5e07087", "swh:1:cnt:133693b125bad2b4ac318535b84901ebb1f6b638", "swh:1:dir:4b825dc642cb6eb9a060e54bf8d69288fbee4904", "swh:1:cnt:19102815663d23f8b75a47e7a01965dcdc96468c", "swh:1:dir:2b41c40f0d1fbffcba12497db71fba83fcca96e5", "swh:1:cnt:8185dfb2c0c2c597d16f75a8a0c37668567c3d7e", "swh:1:cnt:7c4c57ba9ff496ad179b8f65b1d286edbda34c9a", "swh:1:cnt:acac326ddd63b0bc70840659d4ac43619484e69f", ] diff --git a/swh/model/tests/test_identifiers.py b/swh/model/tests/test_identifiers.py index c065b0d..f5da0a9 100644 --- a/swh/model/tests/test_identifiers.py +++ b/swh/model/tests/test_identifiers.py @@ -1,1796 +1,1824 @@ # Copyright (C) 2015-2021 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime import hashlib import itertools from typing import Dict import unittest import attr import pytest -from swh.model import hashutil, identifiers +from swh.model import git_objects, hashutil from swh.model.exceptions import ValidationError from swh.model.hashutil import hash_to_bytes as _x -from swh.model.hashutil import hash_to_hex -from swh.model.identifiers import ( +from swh.model.model import ( + Content, + Directory, + ExtID, + Origin, + RawExtrinsicMetadata, + Release, + Revision, + Snapshot, + TimestampWithTimezone, +) +from swh.model.swhids import ( SWHID_QUALIFIERS, CoreSWHID, ExtendedObjectType, ExtendedSWHID, ObjectType, QualifiedSWHID, - normalize_timestamp, ) def remove_id(d: Dict) -> Dict: """Returns a (shallow) copy of a dict with the 'id' key removed.""" d = d.copy() if "id" in d: del d["id"] return d class UtilityFunctionsDateOffset(unittest.TestCase): def setUp(self): self.dates = { b"1448210036": {"seconds": 1448210036, "microseconds": 0,}, b"1448210036.002342": {"seconds": 1448210036, "microseconds": 2342,}, b"1448210036.12": {"seconds": 1448210036, "microseconds": 120000,}, } self.offsets = { 0: b"+0000", -630: b"-1030", 800: b"+1320", } def test_format_date(self): for date_repr, date in self.dates.items(): - self.assertEqual(identifiers.format_date(date), date_repr) + self.assertEqual(git_objects.format_date(date), date_repr) def test_format_offset(self): for offset, res in self.offsets.items(): - self.assertEqual(identifiers.format_offset(offset), res) + self.assertEqual(git_objects.format_offset(offset), res) content_example = { "status": "visible", "length": 5, "data": b"1984\n", "ctime": datetime.datetime(2015, 11, 22, 16, 33, 56, tzinfo=datetime.timezone.utc), } class ContentIdentifier(unittest.TestCase): def setUp(self): self.content_id = hashutil.MultiHash.from_data(content_example["data"]).digest() def test_content_identifier(self): self.assertEqual( - identifiers.content_identifier(content_example), self.content_id + Content.from_data(content_example["data"]).hashes(), self.content_id ) directory_example = { "id": _x("d7ed3d2c31d608823be58b1cbe57605310615231"), "entries": [ { "type": "file", "perms": 33188, "name": b"README", "target": _x("37ec8ea2110c0b7a32fbb0e872f6e7debbf95e21"), }, { "type": "file", "perms": 33188, "name": b"Rakefile", "target": _x("3bb0e8592a41ae3185ee32266c860714980dbed7"), }, { "type": "dir", "perms": 16384, "name": b"app", "target": _x("61e6e867f5d7ba3b40540869bc050b0c4fed9e95"), }, { "type": "file", "perms": 33188, "name": b"1.megabyte", "target": _x("7c2b2fbdd57d6765cdc9d84c2d7d333f11be7fb3"), }, { "type": "dir", "perms": 16384, "name": b"config", "target": _x("591dfe784a2e9ccc63aaba1cb68a765734310d98"), }, { "type": "dir", "perms": 16384, "name": b"public", "target": _x("9588bf4522c2b4648bfd1c61d175d1f88c1ad4a5"), }, { "type": "file", "perms": 33188, "name": b"development.sqlite3", "target": _x("e69de29bb2d1d6434b8b29ae775ad8c2e48c5391"), }, { "type": "dir", "perms": 16384, "name": b"doc", "target": _x("154705c6aa1c8ead8c99c7915373e3c44012057f"), }, { "type": "dir", "perms": 16384, "name": b"db", "target": _x("85f157bdc39356b7bc7de9d0099b4ced8b3b382c"), }, { "type": "dir", "perms": 16384, "name": b"log", "target": _x("5e3d3941c51cce73352dff89c805a304ba96fffe"), }, { "type": "dir", "perms": 16384, "name": b"script", "target": _x("1b278423caf176da3f3533592012502aa10f566c"), }, { "type": "dir", "perms": 16384, "name": b"test", "target": _x("035f0437c080bfd8711670b3e8677e686c69c763"), }, { "type": "dir", "perms": 16384, "name": b"vendor", "target": _x("7c0dc9ad978c1af3f9a4ce061e50f5918bd27138"), }, { "type": "rev", "perms": 57344, "name": b"will_paginate", "target": _x("3d531e169db92a16a9a8974f0ae6edf52e52659e"), }, # in git order, the dir named "order" should be between the files # named "order." and "order0" { "type": "dir", "perms": 16384, "name": b"order", "target": _x("62cdb7020ff920e5aa642c3d4066950dd1f01f4d"), }, { "type": "file", "perms": 16384, "name": b"order.", "target": _x("0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33"), }, { "type": "file", "perms": 16384, "name": b"order0", "target": _x("bbe960a25ea311d21d40669e93df2003ba9b90a2"), }, ], } dummy_qualifiers = {"origin": "https://example.com", "lines": "42"} class DirectoryIdentifier(unittest.TestCase): def setUp(self): self.directory = directory_example self.empty_directory = { "id": "4b825dc642cb6eb9a060e54bf8d69288fbee4904", "entries": [], } def test_dir_identifier(self): + self.assertEqual(Directory.from_dict(self.directory).id, self.directory["id"]) self.assertEqual( - _x(identifiers.directory_identifier(self.directory)), self.directory["id"] - ) - self.assertEqual( - _x(identifiers.directory_identifier(remove_id(self.directory))), - self.directory["id"], + Directory.from_dict(remove_id(self.directory)).id, self.directory["id"], ) def test_dir_identifier_entry_order(self): # Reverse order of entries, check the id is still the same. directory = {"entries": reversed(self.directory["entries"])} self.assertEqual( - _x(identifiers.directory_identifier(remove_id(directory))), - self.directory["id"], + Directory.from_dict(remove_id(directory)).id, self.directory["id"], ) def test_dir_identifier_empty_directory(self): self.assertEqual( - identifiers.directory_identifier(remove_id(self.empty_directory)), - self.empty_directory["id"], + Directory.from_dict(remove_id(self.empty_directory)).id, + _x(self.empty_directory["id"]), ) linus_tz = datetime.timezone(datetime.timedelta(minutes=-420)) revision_example = { "id": _x("bc0195aad0daa2ad5b0d76cce22b167bc3435590"), "directory": _x("85a74718d377195e1efd0843ba4f3260bad4fe07"), "parents": [_x("01e2d0627a9a6edb24c37db45db5ecb31e9de808")], "author": { "name": b"Linus Torvalds", "email": b"torvalds@linux-foundation.org", "fullname": b"Linus Torvalds ", }, "date": datetime.datetime(2015, 7, 12, 15, 10, 30, tzinfo=linus_tz), "committer": { "name": b"Linus Torvalds", "email": b"torvalds@linux-foundation.org", "fullname": b"Linus Torvalds ", }, "committer_date": datetime.datetime(2015, 7, 12, 15, 10, 30, tzinfo=linus_tz), "message": b"Linux 4.2-rc2\n", "type": "git", "synthetic": False, } class RevisionIdentifier(unittest.TestCase): def setUp(self): gpgsig = b"""\ -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.13 (Darwin) iQIcBAABAgAGBQJVJcYsAAoJEBiY3kIkQRNJVAUQAJ8/XQIfMqqC5oYeEFfHOPYZ L7qy46bXHVBa9Qd8zAJ2Dou3IbI2ZoF6/Et89K/UggOycMlt5FKV/9toWyuZv4Po L682wonoxX99qvVTHo6+wtnmYO7+G0f82h+qHMErxjP+I6gzRNBvRr+SfY7VlGdK wikMKOMWC5smrScSHITnOq1Ews5pe3N7qDYMzK0XVZmgDoaem4RSWMJs4My/qVLN e0CqYWq2A22GX7sXl6pjneJYQvcAXUX+CAzp24QnPSb+Q22Guj91TcxLFcHCTDdn qgqMsEyMiisoglwrCbO+D+1xq9mjN9tNFWP66SQ48mrrHYTBV5sz9eJyDfroJaLP CWgbDTgq6GzRMehHT3hXfYS5NNatjnhkNISXR7pnVP/obIi/vpWh5ll6Gd8q26z+ a/O41UzOaLTeNI365MWT4/cnXohVLRG7iVJbAbCxoQmEgsYMRc/pBAzWJtLfcB2G jdTswYL6+MUdL8sB9pZ82D+BP/YAdHe69CyTu1lk9RT2pYtI/kkfjHubXBCYEJSG +VGllBbYG6idQJpyrOYNRJyrDi9yvDJ2W+S0iQrlZrxzGBVGTB/y65S8C+2WTBcE lf1Qb5GDsQrZWgD+jtWTywOYHtCBwyCKSAXxSARMbNPeak9WPlcW/Jmu+fUcMe2x dg1KdHOa34shrKDaOVzW =od6m -----END PGP SIGNATURE-----""" self.revision = revision_example self.revision_none_metadata = { - "id": "bc0195aad0daa2ad5b0d76cce22b167bc3435590", + "id": _x("bc0195aad0daa2ad5b0d76cce22b167bc3435590"), "directory": _x("85a74718d377195e1efd0843ba4f3260bad4fe07"), "parents": [_x("01e2d0627a9a6edb24c37db45db5ecb31e9de808")], "author": { "name": b"Linus Torvalds", "email": b"torvalds@linux-foundation.org", }, "date": datetime.datetime(2015, 7, 12, 15, 10, 30, tzinfo=linus_tz), "committer": { "name": b"Linus Torvalds", "email": b"torvalds@linux-foundation.org", }, "committer_date": datetime.datetime( 2015, 7, 12, 15, 10, 30, tzinfo=linus_tz ), "message": b"Linux 4.2-rc2\n", "type": "git", "synthetic": False, "metadata": None, } self.synthetic_revision = { "id": _x("b2a7e1260492e344fab3cbf91bc13c91e05426fd"), "author": { "name": b"Software Heritage", "email": b"robot@softwareheritage.org", }, "date": { "timestamp": {"seconds": 1437047495}, "offset": 0, "negative_utc": False, }, "type": "tar", "committer": { "name": b"Software Heritage", "email": b"robot@softwareheritage.org", }, "committer_date": 1437047495, "synthetic": True, "parents": [], "message": b"synthetic revision message\n", "directory": _x("d11f00a6a0fea6055341d25584b5a96516c0d2b8"), "metadata": { "original_artifact": [ { "archive_type": "tar", "name": "gcc-5.2.0.tar.bz2", "sha1_git": "39d281aff934d44b439730057e55b055e206a586", "sha1": "fe3f5390949d47054b613edc36c557eb1d51c18e", "sha256": "5f835b04b5f7dd4f4d2dc96190ec1621b8d89f" "2dc6f638f9f8bc1b1014ba8cad", } ] }, } # cat commit.txt | git hash-object -t commit --stdin self.revision_with_extra_headers = { - "id": "010d34f384fa99d047cdd5e2f41e56e5c2feee45", + "id": _x("010d34f384fa99d047cdd5e2f41e56e5c2feee45"), "directory": _x("85a74718d377195e1efd0843ba4f3260bad4fe07"), "parents": [_x("01e2d0627a9a6edb24c37db45db5ecb31e9de808")], "author": { "name": b"Linus Torvalds", "email": b"torvalds@linux-foundation.org", "fullname": b"Linus Torvalds ", }, "date": datetime.datetime(2015, 7, 12, 15, 10, 30, tzinfo=linus_tz), "committer": { "name": b"Linus Torvalds", "email": b"torvalds@linux-foundation.org", "fullname": b"Linus Torvalds ", }, "committer_date": datetime.datetime( 2015, 7, 12, 15, 10, 30, tzinfo=linus_tz ), "message": b"Linux 4.2-rc2\n", "type": "git", "synthetic": False, "extra_headers": ( (b"svn-repo-uuid", b"046f1af7-66c2-d61b-5410-ce57b7db7bff"), (b"svn-revision", b"10"), ), } self.revision_with_gpgsig = { - "id": "44cc742a8ca17b9c279be4cc195a93a6ef7a320e", + "id": _x("44cc742a8ca17b9c279be4cc195a93a6ef7a320e"), "directory": _x("b134f9b7dc434f593c0bab696345548b37de0558"), "parents": [ _x("689664ae944b4692724f13b709a4e4de28b54e57"), _x("c888305e1efbaa252d01b4e5e6b778f865a97514"), ], "author": { "name": b"Jiang Xin", "email": b"worldhello.net@gmail.com", "fullname": b"Jiang Xin ", }, "date": {"timestamp": 1428538899, "offset": 480,}, "committer": {"name": b"Jiang Xin", "email": b"worldhello.net@gmail.com",}, "committer_date": {"timestamp": 1428538899, "offset": 480,}, "extra_headers": ((b"gpgsig", gpgsig),), "message": b"""Merge branch 'master' of git://github.com/alexhenrie/git-po * 'master' of git://github.com/alexhenrie/git-po: l10n: ca.po: update translation """, "type": "git", "synthetic": False, } self.revision_no_message = { - "id": "4cfc623c9238fa92c832beed000ce2d003fd8333", + "id": _x("4cfc623c9238fa92c832beed000ce2d003fd8333"), "directory": _x("b134f9b7dc434f593c0bab696345548b37de0558"), "parents": [ _x("689664ae944b4692724f13b709a4e4de28b54e57"), _x("c888305e1efbaa252d01b4e5e6b778f865a97514"), ], "author": { "name": b"Jiang Xin", "email": b"worldhello.net@gmail.com", "fullname": b"Jiang Xin ", }, "date": {"timestamp": 1428538899, "offset": 480,}, "committer": {"name": b"Jiang Xin", "email": b"worldhello.net@gmail.com",}, "committer_date": {"timestamp": 1428538899, "offset": 480,}, "message": None, "type": "git", "synthetic": False, } self.revision_empty_message = { - "id": "7442cd78bd3b4966921d6a7f7447417b7acb15eb", + "id": _x("7442cd78bd3b4966921d6a7f7447417b7acb15eb"), "directory": _x("b134f9b7dc434f593c0bab696345548b37de0558"), "parents": [ _x("689664ae944b4692724f13b709a4e4de28b54e57"), _x("c888305e1efbaa252d01b4e5e6b778f865a97514"), ], "author": { "name": b"Jiang Xin", "email": b"worldhello.net@gmail.com", "fullname": b"Jiang Xin ", }, "date": {"timestamp": 1428538899, "offset": 480,}, "committer": {"name": b"Jiang Xin", "email": b"worldhello.net@gmail.com",}, "committer_date": {"timestamp": 1428538899, "offset": 480,}, "message": b"", "type": "git", "synthetic": False, } self.revision_only_fullname = { - "id": "010d34f384fa99d047cdd5e2f41e56e5c2feee45", + "id": _x("010d34f384fa99d047cdd5e2f41e56e5c2feee45"), "directory": _x("85a74718d377195e1efd0843ba4f3260bad4fe07"), "parents": [_x("01e2d0627a9a6edb24c37db45db5ecb31e9de808")], "author": {"fullname": b"Linus Torvalds ",}, "date": datetime.datetime(2015, 7, 12, 15, 10, 30, tzinfo=linus_tz), "committer": { "fullname": b"Linus Torvalds ", }, "committer_date": datetime.datetime( 2015, 7, 12, 15, 10, 30, tzinfo=linus_tz ), "message": b"Linux 4.2-rc2\n", "type": "git", "synthetic": False, "extra_headers": ( (b"svn-repo-uuid", b"046f1af7-66c2-d61b-5410-ce57b7db7bff"), (b"svn-revision", b"10"), ), } def test_revision_identifier(self): self.assertEqual( - identifiers.revision_identifier(self.revision), - hash_to_hex(self.revision["id"]), + Revision.from_dict(self.revision).id, self.revision["id"], ) self.assertEqual( - identifiers.revision_identifier(remove_id(self.revision)), - hash_to_hex(self.revision["id"]), + Revision.from_dict(remove_id(self.revision)).id, self.revision["id"], ) def test_revision_identifier_none_metadata(self): self.assertEqual( - identifiers.revision_identifier(remove_id(self.revision_none_metadata)), - hash_to_hex(self.revision_none_metadata["id"]), + Revision.from_dict(remove_id(self.revision_none_metadata)).id, + self.revision_none_metadata["id"], ) def test_revision_identifier_synthetic(self): self.assertEqual( - identifiers.revision_identifier(remove_id(self.synthetic_revision)), - hash_to_hex(self.synthetic_revision["id"]), + Revision.from_dict(remove_id(self.synthetic_revision)).id, + self.synthetic_revision["id"], ) def test_revision_identifier_with_extra_headers(self): self.assertEqual( - identifiers.revision_identifier( - remove_id(self.revision_with_extra_headers) - ), - hash_to_hex(self.revision_with_extra_headers["id"]), + Revision.from_dict(remove_id(self.revision_with_extra_headers)).id, + self.revision_with_extra_headers["id"], ) def test_revision_identifier_with_gpgsig(self): self.assertEqual( - identifiers.revision_identifier(remove_id(self.revision_with_gpgsig)), - hash_to_hex(self.revision_with_gpgsig["id"]), + Revision.from_dict(remove_id(self.revision_with_gpgsig)).id, + self.revision_with_gpgsig["id"], ) def test_revision_identifier_no_message(self): self.assertEqual( - identifiers.revision_identifier(remove_id(self.revision_no_message)), - hash_to_hex(self.revision_no_message["id"]), + Revision.from_dict(remove_id(self.revision_no_message)).id, + self.revision_no_message["id"], ) def test_revision_identifier_empty_message(self): self.assertEqual( - identifiers.revision_identifier(remove_id(self.revision_empty_message)), - hash_to_hex(self.revision_empty_message["id"]), + Revision.from_dict(remove_id(self.revision_empty_message)).id, + self.revision_empty_message["id"], ) def test_revision_identifier_only_fullname(self): self.assertEqual( - identifiers.revision_identifier(remove_id(self.revision_only_fullname)), - hash_to_hex(self.revision_only_fullname["id"]), + Revision.from_dict(remove_id(self.revision_only_fullname)).id, + self.revision_only_fullname["id"], ) release_example = { "id": _x("2b10839e32c4c476e9d94492756bb1a3e1ec4aa8"), "target": _x("741b2252a5e14d6c60a913c77a6099abe73a854a"), "target_type": "revision", "name": b"v2.6.14", "author": { "name": b"Linus Torvalds", "email": b"torvalds@g5.osdl.org", "fullname": b"Linus Torvalds ", }, "date": datetime.datetime(2005, 10, 27, 17, 2, 33, tzinfo=linus_tz), "message": b"""\ Linux 2.6.14 release -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.1 (GNU/Linux) iD8DBQBDYWq6F3YsRnbiHLsRAmaeAJ9RCez0y8rOBbhSv344h86l/VVcugCeIhO1 wdLOnvj91G4wxYqrvThthbE= =7VeT -----END PGP SIGNATURE----- """, "synthetic": False, } class ReleaseIdentifier(unittest.TestCase): def setUp(self): linus_tz = datetime.timezone(datetime.timedelta(minutes=-420)) self.release = release_example self.release_no_author = { "id": _x("26791a8bcf0e6d33f43aef7682bdb555236d56de"), "target": _x("9ee1c939d1cb936b1f98e8d81aeffab57bae46ab"), "target_type": "revision", "name": b"v2.6.12", "message": b"""\ This is the final 2.6.12 release -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.2.4 (GNU/Linux) iD8DBQBCsykyF3YsRnbiHLsRAvPNAJ482tCZwuxp/bJRz7Q98MHlN83TpACdHr37 o6X/3T+vm8K3bf3driRr34c= =sBHn -----END PGP SIGNATURE----- """, "synthetic": False, } self.release_no_message = { "id": _x("b6f4f446715f7d9543ef54e41b62982f0db40045"), "target": _x("9ee1c939d1cb936b1f98e8d81aeffab57bae46ab"), "target_type": "revision", "name": b"v2.6.12", "author": {"name": b"Linus Torvalds", "email": b"torvalds@g5.osdl.org",}, "date": datetime.datetime(2005, 10, 27, 17, 2, 33, tzinfo=linus_tz), "message": None, "synthetic": False, } self.release_empty_message = { "id": _x("71a0aea72444d396575dc25ac37fec87ee3c6492"), "target": _x("9ee1c939d1cb936b1f98e8d81aeffab57bae46ab"), "target_type": "revision", "name": b"v2.6.12", "author": {"name": b"Linus Torvalds", "email": b"torvalds@g5.osdl.org",}, "date": datetime.datetime(2005, 10, 27, 17, 2, 33, tzinfo=linus_tz), "message": b"", "synthetic": False, } self.release_negative_utc = { "id": _x("97c8d2573a001f88e72d75f596cf86b12b82fd01"), "name": b"20081029", "target": _x("54e9abca4c77421e2921f5f156c9fe4a9f7441c7"), "target_type": "revision", "date": { "timestamp": {"seconds": 1225281976}, "offset": 0, "negative_utc": True, }, "author": {"name": b"Otavio Salvador", "email": b"otavio@debian.org",}, "synthetic": False, "message": b"tagging version 20081029\n\nr56558\n", } self.release_newline_in_author = { "author": { "email": b"esycat@gmail.com", "fullname": b"Eugene Janusov\n", "name": b"Eugene Janusov\n", }, "date": { "negative_utc": None, "offset": 600, "timestamp": {"microseconds": 0, "seconds": 1377480558,}, }, "id": _x("5c98f559d034162de22d3ebeb95433e6f8885231"), "message": b"Release of v0.3.2.", "name": b"0.3.2", "synthetic": False, "target": _x("c06aa3d93b78a2865c4935170030f8c2d7396fd3"), "target_type": "revision", } self.release_snapshot_target = dict(self.release) self.release_snapshot_target["target_type"] = "snapshot" self.release_snapshot_target["id"] = _x( "c29c3ddcc6769a04e54dd69d63a6fdcbc566f850" ) def test_release_identifier(self): self.assertEqual( - identifiers.release_identifier(self.release), - hash_to_hex(self.release["id"]), + Release.from_dict(self.release).id, self.release["id"], ) self.assertEqual( - identifiers.release_identifier(remove_id(self.release)), - hash_to_hex(self.release["id"]), + Release.from_dict(remove_id(self.release)).id, self.release["id"], ) def test_release_identifier_no_author(self): self.assertEqual( - identifiers.release_identifier(remove_id(self.release_no_author)), - hash_to_hex(self.release_no_author["id"]), + Release.from_dict(remove_id(self.release_no_author)).id, + self.release_no_author["id"], ) def test_release_identifier_no_message(self): self.assertEqual( - identifiers.release_identifier(remove_id(self.release_no_message)), - hash_to_hex(self.release_no_message["id"]), + Release.from_dict(remove_id(self.release_no_message)).id, + self.release_no_message["id"], ) def test_release_identifier_empty_message(self): self.assertEqual( - identifiers.release_identifier(remove_id(self.release_empty_message)), - hash_to_hex(self.release_empty_message["id"]), + Release.from_dict(remove_id(self.release_empty_message)).id, + self.release_empty_message["id"], ) def test_release_identifier_negative_utc(self): self.assertEqual( - identifiers.release_identifier(remove_id(self.release_negative_utc)), - hash_to_hex(self.release_negative_utc["id"]), + Release.from_dict(remove_id(self.release_negative_utc)).id, + self.release_negative_utc["id"], ) def test_release_identifier_newline_in_author(self): self.assertEqual( - identifiers.release_identifier(remove_id(self.release_newline_in_author)), - hash_to_hex(self.release_newline_in_author["id"]), + Release.from_dict(remove_id(self.release_newline_in_author)).id, + self.release_newline_in_author["id"], ) def test_release_identifier_snapshot_target(self): self.assertEqual( - identifiers.release_identifier(self.release_snapshot_target), - hash_to_hex(self.release_snapshot_target["id"]), + Release.from_dict(self.release_snapshot_target).id, + self.release_snapshot_target["id"], ) snapshot_example = { "id": _x("6e65b86363953b780d92b0a928f3e8fcdd10db36"), "branches": { b"directory": { "target": _x("1bd0e65f7d2ff14ae994de17a1e7fe65111dcad8"), "target_type": "directory", }, b"content": { "target": _x("fe95a46679d128ff167b7c55df5d02356c5a1ae1"), "target_type": "content", }, b"alias": {"target": b"revision", "target_type": "alias",}, b"revision": { "target": _x("aafb16d69fd30ff58afdd69036a26047f3aebdc6"), "target_type": "revision", }, b"release": { "target": _x("7045404f3d1c54e6473c71bbb716529fbad4be24"), "target_type": "release", }, b"snapshot": { "target": _x("1a8893e6a86f444e8be8e7bda6cb34fb1735a00e"), "target_type": "snapshot", }, b"dangling": None, }, } class SnapshotIdentifier(unittest.TestCase): def setUp(self): super().setUp() self.empty = { - "id": "1a8893e6a86f444e8be8e7bda6cb34fb1735a00e", + "id": _x("1a8893e6a86f444e8be8e7bda6cb34fb1735a00e"), "branches": {}, } self.dangling_branch = { - "id": "c84502e821eb21ed84e9fd3ec40973abc8b32353", + "id": _x("c84502e821eb21ed84e9fd3ec40973abc8b32353"), "branches": {b"HEAD": None,}, } self.unresolved = { - "id": "84b4548ea486e4b0a7933fa541ff1503a0afe1e0", + "id": _x("84b4548ea486e4b0a7933fa541ff1503a0afe1e0"), "branches": {b"foo": {"target": b"bar", "target_type": "alias",},}, } self.all_types = snapshot_example def test_empty_snapshot(self): self.assertEqual( - identifiers.snapshot_identifier(remove_id(self.empty)), - hash_to_hex(self.empty["id"]), + Snapshot.from_dict(remove_id(self.empty)).id, self.empty["id"], ) def test_dangling_branch(self): self.assertEqual( - identifiers.snapshot_identifier(remove_id(self.dangling_branch)), - hash_to_hex(self.dangling_branch["id"]), + Snapshot.from_dict(remove_id(self.dangling_branch)).id, + self.dangling_branch["id"], ) def test_unresolved(self): with self.assertRaisesRegex(ValueError, "b'foo' -> b'bar'"): - identifiers.snapshot_identifier(remove_id(self.unresolved)) + Snapshot.from_dict(remove_id(self.unresolved)) def test_all_types(self): self.assertEqual( - identifiers.snapshot_identifier(remove_id(self.all_types)), - hash_to_hex(self.all_types["id"]), + Snapshot.from_dict(remove_id(self.all_types)).id, self.all_types["id"], ) authority_example = { "type": "forge", "url": "https://forge.softwareheritage.org/", } fetcher_example = { "name": "swh-phabricator-metadata-fetcher", "version": "0.0.1", } metadata_example = { "target": "swh:1:cnt:568aaf43d83b2c3df8067f3bedbb97d83260be6d", "discovery_date": datetime.datetime( 2021, 1, 25, 11, 27, 51, tzinfo=datetime.timezone.utc ), "authority": authority_example, "fetcher": fetcher_example, "format": "json", "metadata": b'{"foo": "bar"}', } class RawExtrinsicMetadataIdentifier(unittest.TestCase): def setUp(self): super().setUp() self.minimal = metadata_example self.maximal = { **self.minimal, "origin": "https://forge.softwareheritage.org/source/swh-model/", "visit": 42, "snapshot": "swh:1:snp:" + "00" * 20, "release": "swh:1:rel:" + "01" * 20, "revision": "swh:1:rev:" + "02" * 20, "path": b"/abc/def", "directory": "swh:1:dir:" + "03" * 20, } def test_minimal(self): git_object = ( b"raw_extrinsic_metadata 210\0" b"target swh:1:cnt:568aaf43d83b2c3df8067f3bedbb97d83260be6d\n" b"discovery_date 1611574071\n" b"authority forge https://forge.softwareheritage.org/\n" b"fetcher swh-phabricator-metadata-fetcher 0.0.1\n" b"format json\n" b"\n" b'{"foo": "bar"}' ) self.assertEqual( - identifiers.raw_extrinsic_metadata_git_object(self.minimal), git_object, + git_objects.raw_extrinsic_metadata_git_object( + RawExtrinsicMetadata.from_dict(self.minimal) + ), + git_object, ) self.assertEqual( - identifiers.raw_extrinsic_metadata_identifier(self.minimal), - hashlib.sha1(git_object).hexdigest(), + RawExtrinsicMetadata.from_dict(self.minimal).id, + hashlib.sha1(git_object).digest(), ) self.assertEqual( - identifiers.raw_extrinsic_metadata_identifier(self.minimal), - "5c13f20ba336e44549baf3d7b9305b027ec9f43d", + RawExtrinsicMetadata.from_dict(self.minimal).id, + _x("5c13f20ba336e44549baf3d7b9305b027ec9f43d"), ) def test_maximal(self): git_object = ( b"raw_extrinsic_metadata 533\0" b"target swh:1:cnt:568aaf43d83b2c3df8067f3bedbb97d83260be6d\n" b"discovery_date 1611574071\n" b"authority forge https://forge.softwareheritage.org/\n" b"fetcher swh-phabricator-metadata-fetcher 0.0.1\n" b"format json\n" b"origin https://forge.softwareheritage.org/source/swh-model/\n" b"visit 42\n" b"snapshot swh:1:snp:0000000000000000000000000000000000000000\n" b"release swh:1:rel:0101010101010101010101010101010101010101\n" b"revision swh:1:rev:0202020202020202020202020202020202020202\n" b"path /abc/def\n" b"directory swh:1:dir:0303030303030303030303030303030303030303\n" b"\n" b'{"foo": "bar"}' ) self.assertEqual( - identifiers.raw_extrinsic_metadata_git_object(self.maximal), git_object, + git_objects.raw_extrinsic_metadata_git_object( + RawExtrinsicMetadata.from_dict(self.maximal) + ), + git_object, ) self.assertEqual( - identifiers.raw_extrinsic_metadata_identifier(self.maximal), - hashlib.sha1(git_object).hexdigest(), + RawExtrinsicMetadata.from_dict(self.maximal).id, + hashlib.sha1(git_object).digest(), ) self.assertEqual( - identifiers.raw_extrinsic_metadata_identifier(self.maximal), - "f96966e1093d15236a31fde07e47d5b1c9428049", + RawExtrinsicMetadata.from_dict(self.maximal).id, + _x("f96966e1093d15236a31fde07e47d5b1c9428049"), ) def test_nonascii_path(self): metadata = { **self.minimal, "path": b"/ab\nc/d\xf0\x9f\xa4\xb7e\x00f", } git_object = ( b"raw_extrinsic_metadata 231\0" b"target swh:1:cnt:568aaf43d83b2c3df8067f3bedbb97d83260be6d\n" b"discovery_date 1611574071\n" b"authority forge https://forge.softwareheritage.org/\n" b"fetcher swh-phabricator-metadata-fetcher 0.0.1\n" b"format json\n" b"path /ab\n" b" c/d\xf0\x9f\xa4\xb7e\x00f\n" b"\n" b'{"foo": "bar"}' ) self.assertEqual( - identifiers.raw_extrinsic_metadata_git_object(metadata), git_object, + git_objects.raw_extrinsic_metadata_git_object( + RawExtrinsicMetadata.from_dict(metadata) + ), + git_object, ) self.assertEqual( - identifiers.raw_extrinsic_metadata_identifier(metadata), - hashlib.sha1(git_object).hexdigest(), + RawExtrinsicMetadata.from_dict(metadata).id, + hashlib.sha1(git_object).digest(), ) self.assertEqual( - identifiers.raw_extrinsic_metadata_identifier(metadata), - "7cc83fd1912176510c083f5df43f01b09af4b333", + RawExtrinsicMetadata.from_dict(metadata).id, + _x("7cc83fd1912176510c083f5df43f01b09af4b333"), ) def test_timezone_insensitive(self): """Checks the timezone of the datetime.datetime does not affect the hashed git_object.""" utc_plus_one = datetime.timezone(datetime.timedelta(hours=1)) metadata = { **self.minimal, "discovery_date": datetime.datetime( 2021, 1, 25, 12, 27, 51, tzinfo=utc_plus_one, ), } self.assertEqual( - identifiers.raw_extrinsic_metadata_git_object(self.minimal), - identifiers.raw_extrinsic_metadata_git_object(metadata), + git_objects.raw_extrinsic_metadata_git_object( + RawExtrinsicMetadata.from_dict(self.minimal) + ), + git_objects.raw_extrinsic_metadata_git_object( + RawExtrinsicMetadata.from_dict(metadata) + ), ) self.assertEqual( - identifiers.raw_extrinsic_metadata_identifier(self.minimal), - identifiers.raw_extrinsic_metadata_identifier(metadata), + RawExtrinsicMetadata.from_dict(self.minimal).id, + RawExtrinsicMetadata.from_dict(metadata).id, ) self.assertEqual( - identifiers.raw_extrinsic_metadata_identifier(metadata), - "5c13f20ba336e44549baf3d7b9305b027ec9f43d", + RawExtrinsicMetadata.from_dict(metadata).id, + _x("5c13f20ba336e44549baf3d7b9305b027ec9f43d"), ) def test_microsecond_insensitive(self): """Checks the microseconds of the datetime.datetime does not affect the hashed manifest.""" metadata = { **self.minimal, "discovery_date": datetime.datetime( 2021, 1, 25, 11, 27, 51, 123456, tzinfo=datetime.timezone.utc, ), } self.assertEqual( - identifiers.raw_extrinsic_metadata_git_object(self.minimal), - identifiers.raw_extrinsic_metadata_git_object(metadata), + git_objects.raw_extrinsic_metadata_git_object( + RawExtrinsicMetadata.from_dict(self.minimal) + ), + git_objects.raw_extrinsic_metadata_git_object( + RawExtrinsicMetadata.from_dict(metadata) + ), ) self.assertEqual( - identifiers.raw_extrinsic_metadata_identifier(self.minimal), - identifiers.raw_extrinsic_metadata_identifier(metadata), + RawExtrinsicMetadata.from_dict(self.minimal).id, + RawExtrinsicMetadata.from_dict(metadata).id, ) self.assertEqual( - identifiers.raw_extrinsic_metadata_identifier(metadata), - "5c13f20ba336e44549baf3d7b9305b027ec9f43d", + RawExtrinsicMetadata.from_dict(metadata).id, + _x("5c13f20ba336e44549baf3d7b9305b027ec9f43d"), ) def test_noninteger_timezone(self): """Checks the discovery_date is translated to UTC before truncating microseconds""" tz = datetime.timezone(datetime.timedelta(microseconds=-42)) metadata = { **self.minimal, "discovery_date": datetime.datetime( 2021, 1, 25, 11, 27, 50, 1_000_000 - 42, tzinfo=tz, ), } self.assertEqual( - identifiers.raw_extrinsic_metadata_git_object(self.minimal), - identifiers.raw_extrinsic_metadata_git_object(metadata), + git_objects.raw_extrinsic_metadata_git_object( + RawExtrinsicMetadata.from_dict(self.minimal) + ), + git_objects.raw_extrinsic_metadata_git_object( + RawExtrinsicMetadata.from_dict(metadata) + ), ) self.assertEqual( - identifiers.raw_extrinsic_metadata_identifier(self.minimal), - identifiers.raw_extrinsic_metadata_identifier(metadata), + RawExtrinsicMetadata.from_dict(self.minimal).id, + RawExtrinsicMetadata.from_dict(metadata).id, ) self.assertEqual( - identifiers.raw_extrinsic_metadata_identifier(metadata), - "5c13f20ba336e44549baf3d7b9305b027ec9f43d", + RawExtrinsicMetadata.from_dict(metadata).id, + _x("5c13f20ba336e44549baf3d7b9305b027ec9f43d"), ) def test_negative_timestamp(self): metadata = { **self.minimal, "discovery_date": datetime.datetime( 1960, 1, 25, 11, 27, 51, tzinfo=datetime.timezone.utc, ), } git_object = ( b"raw_extrinsic_metadata 210\0" b"target swh:1:cnt:568aaf43d83b2c3df8067f3bedbb97d83260be6d\n" b"discovery_date -313504329\n" b"authority forge https://forge.softwareheritage.org/\n" b"fetcher swh-phabricator-metadata-fetcher 0.0.1\n" b"format json\n" b"\n" b'{"foo": "bar"}' ) self.assertEqual( - identifiers.raw_extrinsic_metadata_git_object(metadata), git_object, + git_objects.raw_extrinsic_metadata_git_object( + RawExtrinsicMetadata.from_dict(metadata) + ), + git_object, ) self.assertEqual( - identifiers.raw_extrinsic_metadata_identifier(metadata), - hashlib.sha1(git_object).hexdigest(), + RawExtrinsicMetadata.from_dict(metadata).id, + hashlib.sha1(git_object).digest(), ) self.assertEqual( - identifiers.raw_extrinsic_metadata_identifier(metadata), - "895d0821a2991dd376ddc303424aceb7c68280f9", + RawExtrinsicMetadata.from_dict(metadata).id, + _x("895d0821a2991dd376ddc303424aceb7c68280f9"), ) def test_epoch(self): metadata = { **self.minimal, "discovery_date": datetime.datetime( 1970, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc, ), } git_object = ( b"raw_extrinsic_metadata 201\0" b"target swh:1:cnt:568aaf43d83b2c3df8067f3bedbb97d83260be6d\n" b"discovery_date 0\n" b"authority forge https://forge.softwareheritage.org/\n" b"fetcher swh-phabricator-metadata-fetcher 0.0.1\n" b"format json\n" b"\n" b'{"foo": "bar"}' ) self.assertEqual( - identifiers.raw_extrinsic_metadata_git_object(metadata), git_object, + git_objects.raw_extrinsic_metadata_git_object( + RawExtrinsicMetadata.from_dict(metadata) + ), + git_object, ) self.assertEqual( - identifiers.raw_extrinsic_metadata_identifier(metadata), - hashlib.sha1(git_object).hexdigest(), + RawExtrinsicMetadata.from_dict(metadata).id, + hashlib.sha1(git_object).digest(), ) self.assertEqual( - identifiers.raw_extrinsic_metadata_identifier(metadata), - "27a53df54ace35ebd910493cdc70b334d6b7cb88", + RawExtrinsicMetadata.from_dict(metadata).id, + _x("27a53df54ace35ebd910493cdc70b334d6b7cb88"), ) def test_negative_epoch(self): metadata = { **self.minimal, "discovery_date": datetime.datetime( 1969, 12, 31, 23, 59, 59, 1, tzinfo=datetime.timezone.utc, ), } git_object = ( b"raw_extrinsic_metadata 202\0" b"target swh:1:cnt:568aaf43d83b2c3df8067f3bedbb97d83260be6d\n" b"discovery_date -1\n" b"authority forge https://forge.softwareheritage.org/\n" b"fetcher swh-phabricator-metadata-fetcher 0.0.1\n" b"format json\n" b"\n" b'{"foo": "bar"}' ) self.assertEqual( - identifiers.raw_extrinsic_metadata_git_object(metadata), git_object, + git_objects.raw_extrinsic_metadata_git_object( + RawExtrinsicMetadata.from_dict(metadata) + ), + git_object, ) self.assertEqual( - identifiers.raw_extrinsic_metadata_identifier(metadata), - hashlib.sha1(git_object).hexdigest(), + RawExtrinsicMetadata.from_dict(metadata).id, + hashlib.sha1(git_object).digest(), ) self.assertEqual( - identifiers.raw_extrinsic_metadata_identifier(metadata), - "be7154a8fd49d87f81547ea634d1e2152907d089", + RawExtrinsicMetadata.from_dict(metadata).id, + _x("be7154a8fd49d87f81547ea634d1e2152907d089"), ) origin_example = { "url": "https://github.com/torvalds/linux", } class OriginIdentifier(unittest.TestCase): def test_content_identifier(self): self.assertEqual( - identifiers.origin_identifier(origin_example), - "b63a575fe3faab7692c9f38fb09d4bb45651bb0f", + Origin.from_dict(origin_example).id, + _x("b63a575fe3faab7692c9f38fb09d4bb45651bb0f"), ) TS_DICTS = [ ( {"timestamp": 12345, "offset": 0}, { "timestamp": {"seconds": 12345, "microseconds": 0}, "offset": 0, "negative_utc": False, }, ), ( {"timestamp": 12345, "offset": 0, "negative_utc": False}, { "timestamp": {"seconds": 12345, "microseconds": 0}, "offset": 0, "negative_utc": False, }, ), ( {"timestamp": 12345, "offset": 0, "negative_utc": False}, { "timestamp": {"seconds": 12345, "microseconds": 0}, "offset": 0, "negative_utc": False, }, ), ( {"timestamp": 12345, "offset": 0, "negative_utc": None}, { "timestamp": {"seconds": 12345, "microseconds": 0}, "offset": 0, "negative_utc": False, }, ), ( {"timestamp": {"seconds": 12345}, "offset": 0, "negative_utc": None}, { "timestamp": {"seconds": 12345, "microseconds": 0}, "offset": 0, "negative_utc": False, }, ), ( { "timestamp": {"seconds": 12345, "microseconds": 0}, "offset": 0, "negative_utc": None, }, { "timestamp": {"seconds": 12345, "microseconds": 0}, "offset": 0, "negative_utc": False, }, ), ( { "timestamp": {"seconds": 12345, "microseconds": 100}, "offset": 0, "negative_utc": None, }, { "timestamp": {"seconds": 12345, "microseconds": 100}, "offset": 0, "negative_utc": False, }, ), ( {"timestamp": 12345, "offset": 0, "negative_utc": True}, { "timestamp": {"seconds": 12345, "microseconds": 0}, "offset": 0, "negative_utc": True, }, ), ( {"timestamp": 12345, "offset": 0, "negative_utc": None}, { "timestamp": {"seconds": 12345, "microseconds": 0}, "offset": 0, "negative_utc": False, }, ), ] @pytest.mark.parametrize("dict_input,expected", TS_DICTS) def test_normalize_timestamp_dict(dict_input, expected): - assert normalize_timestamp(dict_input) == expected + assert TimestampWithTimezone.from_dict(dict_input).to_dict() == expected TS_DICTS_INVALID_TIMESTAMP = [ {"timestamp": 1.2, "offset": 0}, {"timestamp": "1", "offset": 0}, # these below should really also trigger a ValueError... # {"timestamp": {"seconds": "1"}, "offset": 0}, # {"timestamp": {"seconds": 1.2}, "offset": 0}, # {"timestamp": {"seconds": 1.2}, "offset": 0}, ] @pytest.mark.parametrize("dict_input", TS_DICTS_INVALID_TIMESTAMP) def test_normalize_timestamp_dict_invalid_timestamp(dict_input): with pytest.raises(ValueError, match="non-integer timestamp"): - normalize_timestamp(dict_input) + TimestampWithTimezone.from_dict(dict_input) UTC = datetime.timezone.utc TS_TIMEZONES = [ datetime.timezone.min, datetime.timezone(datetime.timedelta(hours=-1)), UTC, datetime.timezone(datetime.timedelta(minutes=+60)), datetime.timezone.max, ] TS_TZ_EXPECTED = [-1439, -60, 0, 60, 1439] TS_DATETIMES = [ datetime.datetime(2020, 2, 27, 14, 39, 19, tzinfo=UTC), datetime.datetime(2120, 12, 31, 23, 59, 59, tzinfo=UTC), datetime.datetime(1610, 5, 14, 15, 43, 0, tzinfo=UTC), ] TS_DT_EXPECTED = [1582814359, 4765132799, -11348929020] @pytest.mark.parametrize("date, seconds", zip(TS_DATETIMES, TS_DT_EXPECTED)) @pytest.mark.parametrize("tz, offset", zip(TS_TIMEZONES, TS_TZ_EXPECTED)) @pytest.mark.parametrize("microsecond", [0, 1, 10, 100, 1000, 999999]) def test_normalize_timestamp_datetime(date, seconds, tz, offset, microsecond): date = date.astimezone(tz).replace(microsecond=microsecond) - assert normalize_timestamp(date) == { + assert TimestampWithTimezone.from_dict(date).to_dict() == { "timestamp": {"seconds": seconds, "microseconds": microsecond}, "offset": offset, "negative_utc": False, } # SWHIDs that are outright invalid, no matter the context INVALID_SWHIDS = [ "swh:1:cnt", "swh:1:", "swh:", "swh:1:cnt:", "foo:1:cnt:abc8bc9d7a6bcf6db04f476d29314f157507d505", "swh:2:dir:def8bc9d7a6bcf6db04f476d29314f157507d505", "swh:1:foo:fed8bc9d7a6bcf6db04f476d29314f157507d505", "swh:1:dir:0b6959356d30f1a4e9b7f6bca59b9a336464c03d;invalid;malformed", "swh:1:snp:gh6959356d30f1a4e9b7f6bca59b9a336464c03d", "swh:1:snp:foo", # wrong qualifier: ori should be origin "swh:1:dir:0b6959356d30f1a4e9b7f6bca59b9a336464c03d;ori=something;anchor=1;visit=1;path=/", # noqa # wrong qualifier: anc should be anchor "swh:1:dir:0b6959356d30f1a4e9b7f6bca59b9a336464c03d;origin=something;anc=1;visit=1;path=/", # noqa # wrong qualifier: vis should be visit "swh:1:dir:0b6959356d30f1a4e9b7f6bca59b9a336464c03d;origin=something;anchor=1;vis=1;path=/", # noqa # wrong qualifier: pa should be path "swh:1:dir:0b6959356d30f1a4e9b7f6bca59b9a336464c03d;origin=something;anchor=1;visit=1;pa=/", # noqa # wrong qualifier: line should be lines "swh:1:dir:0b6959356d30f1a4e9b7f6bca59b9a336464c03d;line=10;origin=something;anchor=1;visit=1;path=/", # noqa # wrong qualifier value: it contains space before of after "swh:1:dir:0b6959356d30f1a4e9b7f6bca59b9a336464c03d;origin= https://some-url", # noqa "swh:1:dir:0b6959356d30f1a4e9b7f6bca59b9a336464c03d;origin=something;anchor=some-anchor ", # noqa "swh:1:dir:0b6959356d30f1a4e9b7f6bca59b9a336464c03d;origin=something;anchor=some-anchor ;visit=1", # noqa # invalid swhid: whitespaces "swh :1:dir:0b6959356d30f1a4e9b7f6bca59b9a336464c03d;ori=something;anchor=1;visit=1;path=/", # noqa "swh: 1:dir:0b6959356d30f1a4e9b7f6bca59b9a336464c03d;ori=something;anchor=1;visit=1;path=/", # noqa "swh: 1: dir:0b6959356d30f1a4e9b7f6bca59b9a336464c03d;ori=something;anchor=1;visit=1;path=/", # noqa "swh:1: dir: 0b6959356d30f1a4e9b7f6bca59b9a336464c03d", "swh:1: dir: 0b6959356d30f1a4e9b7f6bca59b9a336464c03d; origin=blah", "swh:1: dir: 0b6959356d30f1a4e9b7f6bca59b9a336464c03d;lines=12", # other whitespaces "swh\t:1:dir:0b6959356d30f1a4e9b7f6bca59b9a336464c03d;lines=12", "swh:1\n:dir:0b6959356d30f1a4e9b7f6bca59b9a336464c03d;lines=12", "swh:1:\rdir:0b6959356d30f1a4e9b7f6bca59b9a336464c03d;lines=12", "swh:1:dir:0b6959356d30f1a4e9b7f6bca59b9a336464c03d\f;lines=12", "swh:1:dir:0b6959356d30f1a4e9b7f6bca59b9a336464c03d;lines=12\v", ] SWHID_CLASSES = [CoreSWHID, QualifiedSWHID, ExtendedSWHID] @pytest.mark.parametrize( "invalid_swhid,swhid_class", itertools.product(INVALID_SWHIDS, SWHID_CLASSES) ) def test_swhid_parsing_error(invalid_swhid, swhid_class): """Tests SWHID strings that are invalid for all SWHID classes do raise a ValidationError""" with pytest.raises(ValidationError): swhid_class.from_string(invalid_swhid) # string SWHIDs, and how they should be parsed by each of the classes, # or None if the class does not support it HASH = "94a9ed024d3859793618152ea559a168bbcbb5e2" VALID_SWHIDS = [ ( f"swh:1:cnt:{HASH}", CoreSWHID(object_type=ObjectType.CONTENT, object_id=_x(HASH),), QualifiedSWHID(object_type=ObjectType.CONTENT, object_id=_x(HASH),), ExtendedSWHID(object_type=ExtendedObjectType.CONTENT, object_id=_x(HASH),), ), ( f"swh:1:dir:{HASH}", CoreSWHID(object_type=ObjectType.DIRECTORY, object_id=_x(HASH),), QualifiedSWHID(object_type=ObjectType.DIRECTORY, object_id=_x(HASH),), ExtendedSWHID(object_type=ExtendedObjectType.DIRECTORY, object_id=_x(HASH),), ), ( f"swh:1:rev:{HASH}", CoreSWHID(object_type=ObjectType.REVISION, object_id=_x(HASH),), QualifiedSWHID(object_type=ObjectType.REVISION, object_id=_x(HASH),), ExtendedSWHID(object_type=ExtendedObjectType.REVISION, object_id=_x(HASH),), ), ( f"swh:1:rel:{HASH}", CoreSWHID(object_type=ObjectType.RELEASE, object_id=_x(HASH),), QualifiedSWHID(object_type=ObjectType.RELEASE, object_id=_x(HASH),), ExtendedSWHID(object_type=ExtendedObjectType.RELEASE, object_id=_x(HASH),), ), ( f"swh:1:snp:{HASH}", CoreSWHID(object_type=ObjectType.SNAPSHOT, object_id=_x(HASH),), QualifiedSWHID(object_type=ObjectType.SNAPSHOT, object_id=_x(HASH),), ExtendedSWHID(object_type=ExtendedObjectType.SNAPSHOT, object_id=_x(HASH),), ), ( f"swh:1:cnt:{HASH};origin=https://github.com/python/cpython;lines=1-18", None, # CoreSWHID does not allow qualifiers QualifiedSWHID( object_type=ObjectType.CONTENT, object_id=_x(HASH), origin="https://github.com/python/cpython", lines=(1, 18), ), None, # Neither does ExtendedSWHID ), ( f"swh:1:cnt:{HASH};origin=https://github.com/python/cpython;lines=1-18/", None, # likewise None, None, # likewise ), ( f"swh:1:cnt:{HASH};origin=https://github.com/python/cpython;lines=18", None, # likewise QualifiedSWHID( object_type=ObjectType.CONTENT, object_id=_x(HASH), origin="https://github.com/python/cpython", lines=(18, None), ), None, # likewise ), ( f"swh:1:dir:{HASH};origin=deb://Debian/packages/linuxdoc-tools", None, # likewise QualifiedSWHID( object_type=ObjectType.DIRECTORY, object_id=_x(HASH), origin="deb://Debian/packages/linuxdoc-tools", ), None, # likewise ), ( f"swh:1:ori:{HASH}", None, # CoreSWHID does not allow origin pseudo-SWHIDs None, # Neither does QualifiedSWHID ExtendedSWHID(object_type=ExtendedObjectType.ORIGIN, object_id=_x(HASH),), ), ( f"swh:1:emd:{HASH}", None, # likewise for metadata pseudo-SWHIDs None, # Neither does QualifiedSWHID ExtendedSWHID( object_type=ExtendedObjectType.RAW_EXTRINSIC_METADATA, object_id=_x(HASH), ), ), ( f"swh:1:emd:{HASH};origin=https://github.com/python/cpython", None, # CoreSWHID does not allow metadata pseudo-SWHIDs or qualifiers None, # QualifiedSWHID does not allow metadata pseudo-SWHIDs None, # ExtendedSWHID does not allow qualifiers ), ] @pytest.mark.parametrize( "string,core,qualified,extended", [ pytest.param(string, core, qualified, extended, id=string) for (string, core, qualified, extended) in VALID_SWHIDS ], ) def test_parse_unparse_swhids(string, core, qualified, extended): """Tests parsing and serializing valid SWHIDs with the various SWHID classes.""" classes = [CoreSWHID, QualifiedSWHID, ExtendedSWHID] for (cls, parsed_swhid) in zip(classes, [core, qualified, extended]): if parsed_swhid is None: # This class should not accept this SWHID with pytest.raises(ValidationError) as excinfo: cls.from_string(string) # Check string serialization for exception assert str(excinfo.value) is not None else: # This class should assert cls.from_string(string) == parsed_swhid # Also check serialization assert string == str(parsed_swhid) @pytest.mark.parametrize( "core,extended", [ pytest.param(core, extended, id=string) for (string, core, qualified, extended) in VALID_SWHIDS if core is not None ], ) def test_core_to_extended(core, extended): assert core.to_extended() == extended @pytest.mark.parametrize( "ns,version,type,id,qualifiers", [ ("foo", 1, ObjectType.CONTENT, "abc8bc9d7a6bcf6db04f476d29314f157507d505", {}), ("swh", 2, ObjectType.CONTENT, "def8bc9d7a6bcf6db04f476d29314f157507d505", {}), ("swh", 1, ObjectType.DIRECTORY, "aaaa", {}), ], ) def test_QualifiedSWHID_validation_error(ns, version, type, id, qualifiers): with pytest.raises(ValidationError): QualifiedSWHID( namespace=ns, scheme_version=version, object_type=type, object_id=_x(id), **qualifiers, ) @pytest.mark.parametrize( "object_type,qualifiers,expected", [ # No qualifier: (ObjectType.CONTENT, {}, f"swh:1:cnt:{HASH}"), # origin: (ObjectType.CONTENT, {"origin": None}, f"swh:1:cnt:{HASH}"), (ObjectType.CONTENT, {"origin": 42}, ValueError), # visit: ( ObjectType.CONTENT, {"visit": f"swh:1:snp:{HASH}"}, f"swh:1:cnt:{HASH};visit=swh:1:snp:{HASH}", ), ( ObjectType.CONTENT, {"visit": CoreSWHID(object_type=ObjectType.SNAPSHOT, object_id=_x(HASH))}, f"swh:1:cnt:{HASH};visit=swh:1:snp:{HASH}", ), (ObjectType.CONTENT, {"visit": 42}, TypeError), (ObjectType.CONTENT, {"visit": f"swh:1:rel:{HASH}"}, ValidationError,), ( ObjectType.CONTENT, {"visit": CoreSWHID(object_type=ObjectType.RELEASE, object_id=_x(HASH))}, ValidationError, ), # anchor: ( ObjectType.CONTENT, {"anchor": f"swh:1:snp:{HASH}"}, f"swh:1:cnt:{HASH};anchor=swh:1:snp:{HASH}", ), ( ObjectType.CONTENT, {"anchor": CoreSWHID(object_type=ObjectType.SNAPSHOT, object_id=_x(HASH))}, f"swh:1:cnt:{HASH};anchor=swh:1:snp:{HASH}", ), ( ObjectType.CONTENT, {"anchor": f"swh:1:dir:{HASH}"}, f"swh:1:cnt:{HASH};anchor=swh:1:dir:{HASH}", ), ( ObjectType.CONTENT, {"anchor": CoreSWHID(object_type=ObjectType.DIRECTORY, object_id=_x(HASH))}, f"swh:1:cnt:{HASH};anchor=swh:1:dir:{HASH}", ), (ObjectType.CONTENT, {"anchor": 42}, TypeError), (ObjectType.CONTENT, {"anchor": f"swh:1:cnt:{HASH}"}, ValidationError,), ( ObjectType.CONTENT, {"anchor": CoreSWHID(object_type=ObjectType.CONTENT, object_id=_x(HASH))}, ValidationError, ), # path: (ObjectType.CONTENT, {"path": b"/foo"}, f"swh:1:cnt:{HASH};path=/foo",), ( ObjectType.CONTENT, {"path": b"/foo;bar"}, f"swh:1:cnt:{HASH};path=/foo%3Bbar", ), (ObjectType.CONTENT, {"path": "/foo"}, f"swh:1:cnt:{HASH};path=/foo",), ( ObjectType.CONTENT, {"path": "/foo;bar"}, f"swh:1:cnt:{HASH};path=/foo%3Bbar", ), (ObjectType.CONTENT, {"path": 42}, Exception), # lines: (ObjectType.CONTENT, {"lines": (42, None)}, f"swh:1:cnt:{HASH};lines=42",), (ObjectType.CONTENT, {"lines": (21, 42)}, f"swh:1:cnt:{HASH};lines=21-42",), (ObjectType.CONTENT, {"lines": 42}, TypeError,), (ObjectType.CONTENT, {"lines": (None, 42)}, ValueError,), (ObjectType.CONTENT, {"lines": ("42", None)}, ValueError,), ], ) def test_QualifiedSWHID_init(object_type, qualifiers, expected): """Tests validation and converters of qualifiers""" if isinstance(expected, type): assert issubclass(expected, Exception) with pytest.raises(expected): QualifiedSWHID(object_type=object_type, object_id=_x(HASH), **qualifiers) else: assert isinstance(expected, str) swhid = QualifiedSWHID( object_type=object_type, object_id=_x(HASH), **qualifiers ) # Check the build object has the right serialization assert expected == str(swhid) # Check the internal state of the object is the same as if parsed from a string assert QualifiedSWHID.from_string(expected) == swhid def test_QualifiedSWHID_hash(): object_id = _x("94a9ed024d3859793618152ea559a168bbcbb5e2") assert hash( QualifiedSWHID(object_type=ObjectType.DIRECTORY, object_id=object_id) ) == hash(QualifiedSWHID(object_type=ObjectType.DIRECTORY, object_id=object_id)) assert hash( QualifiedSWHID( object_type=ObjectType.DIRECTORY, object_id=object_id, **dummy_qualifiers, ) ) == hash( QualifiedSWHID( object_type=ObjectType.DIRECTORY, object_id=object_id, **dummy_qualifiers, ) ) # Different order of the dictionary, so the underlying order of the tuple in # ImmutableDict is different. assert hash( QualifiedSWHID( object_type=ObjectType.DIRECTORY, object_id=object_id, origin="https://example.com", lines=(42, None), ) ) == hash( QualifiedSWHID( object_type=ObjectType.DIRECTORY, object_id=object_id, lines=(42, None), origin="https://example.com", ) ) def test_QualifiedSWHID_eq(): object_id = _x("94a9ed024d3859793618152ea559a168bbcbb5e2") assert QualifiedSWHID( object_type=ObjectType.DIRECTORY, object_id=object_id ) == QualifiedSWHID(object_type=ObjectType.DIRECTORY, object_id=object_id) assert QualifiedSWHID( object_type=ObjectType.DIRECTORY, object_id=object_id, **dummy_qualifiers, ) == QualifiedSWHID( object_type=ObjectType.DIRECTORY, object_id=object_id, **dummy_qualifiers, ) assert QualifiedSWHID( object_type=ObjectType.DIRECTORY, object_id=object_id, **dummy_qualifiers, ) == QualifiedSWHID( object_type=ObjectType.DIRECTORY, object_id=object_id, **dummy_qualifiers, ) QUALIFIED_SWHIDS = [ # origin: ( f"swh:1:cnt:{HASH};origin=https://github.com/python/cpython", QualifiedSWHID( object_type=ObjectType.CONTENT, object_id=_x(HASH), origin="https://github.com/python/cpython", ), ), ( f"swh:1:cnt:{HASH};origin=https://example.org/foo%3Bbar%25baz", QualifiedSWHID( object_type=ObjectType.CONTENT, object_id=_x(HASH), origin="https://example.org/foo%3Bbar%25baz", ), ), ( f"swh:1:cnt:{HASH};origin=https://example.org?project=test", QualifiedSWHID( object_type=ObjectType.CONTENT, object_id=_x(HASH), origin="https://example.org?project=test", ), ), # visit: ( f"swh:1:cnt:{HASH};visit=swh:1:snp:{HASH}", QualifiedSWHID( object_type=ObjectType.CONTENT, object_id=_x(HASH), visit=CoreSWHID(object_type=ObjectType.SNAPSHOT, object_id=_x(HASH)), ), ), (f"swh:1:cnt:{HASH};visit=swh:1:rel:{HASH}", None,), # anchor: ( f"swh:1:cnt:{HASH};anchor=swh:1:dir:{HASH}", QualifiedSWHID( object_type=ObjectType.CONTENT, object_id=_x(HASH), anchor=CoreSWHID(object_type=ObjectType.DIRECTORY, object_id=_x(HASH)), ), ), ( f"swh:1:cnt:{HASH};anchor=swh:1:rev:{HASH}", QualifiedSWHID( object_type=ObjectType.CONTENT, object_id=_x(HASH), anchor=CoreSWHID(object_type=ObjectType.REVISION, object_id=_x(HASH)), ), ), ( f"swh:1:cnt:{HASH};anchor=swh:1:cnt:{HASH}", None, # 'cnt' is not valid in anchor ), ( f"swh:1:cnt:{HASH};anchor=swh:1:ori:{HASH}", None, # 'ori' is not valid in a CoreSWHID ), # path: ( f"swh:1:cnt:{HASH};path=/foo", QualifiedSWHID( object_type=ObjectType.CONTENT, object_id=_x(HASH), path=b"/foo" ), ), ( f"swh:1:cnt:{HASH};path=/foo%3Bbar", QualifiedSWHID( object_type=ObjectType.CONTENT, object_id=_x(HASH), path=b"/foo;bar" ), ), ( f"swh:1:cnt:{HASH};path=/foo%25bar", QualifiedSWHID( object_type=ObjectType.CONTENT, object_id=_x(HASH), path=b"/foo%bar" ), ), ( f"swh:1:cnt:{HASH};path=/foo/bar%3Dbaz", QualifiedSWHID( object_type=ObjectType.CONTENT, object_id=_x(HASH), path=b"/foo/bar=baz" ), ), # lines ( f"swh:1:cnt:{HASH};lines=1-18", QualifiedSWHID( object_type=ObjectType.CONTENT, object_id=_x(HASH), lines=(1, 18), ), ), ( f"swh:1:cnt:{HASH};lines=18", QualifiedSWHID( object_type=ObjectType.CONTENT, object_id=_x(HASH), lines=(18, None), ), ), (f"swh:1:cnt:{HASH};lines=", None,), (f"swh:1:cnt:{HASH};lines=aa", None,), (f"swh:1:cnt:{HASH};lines=18-aa", None,), ] @pytest.mark.parametrize("string,parsed", QUALIFIED_SWHIDS) def test_QualifiedSWHID_parse_serialize_qualifiers(string, parsed): """Tests parsing and serializing valid SWHIDs with the various SWHID classes.""" if parsed is None: with pytest.raises(ValidationError): print(repr(QualifiedSWHID.from_string(string))) else: assert QualifiedSWHID.from_string(string) == parsed assert str(parsed) == string def test_QualifiedSWHID_serialize_origin(): """Checks that semicolon in origins are escaped.""" string = f"swh:1:cnt:{HASH};origin=https://example.org/foo%3Bbar%25baz" swhid = QualifiedSWHID( object_type=ObjectType.CONTENT, object_id=_x(HASH), origin="https://example.org/foo;bar%25baz", ) assert str(swhid) == string def test_QualifiedSWHID_attributes(): """Checks the set of QualifiedSWHID attributes match the SWHID_QUALIFIERS constant.""" assert set(attr.fields_dict(QualifiedSWHID)) == { "namespace", "scheme_version", "object_type", "object_id", *SWHID_QUALIFIERS, } @pytest.mark.parametrize( "ns,version,type,id", [ ("foo", 1, ObjectType.CONTENT, "abc8bc9d7a6bcf6db04f476d29314f157507d505"), ("swh", 2, ObjectType.CONTENT, "def8bc9d7a6bcf6db04f476d29314f157507d505"), ("swh", 1, ObjectType.DIRECTORY, "aaaa"), ], ) def test_CoreSWHID_validation_error(ns, version, type, id): with pytest.raises(ValidationError): CoreSWHID( namespace=ns, scheme_version=version, object_type=type, object_id=_x(id), ) def test_CoreSWHID_hash(): object_id = _x("94a9ed024d3859793618152ea559a168bbcbb5e2") assert hash( CoreSWHID(object_type=ObjectType.DIRECTORY, object_id=object_id) ) == hash(CoreSWHID(object_type=ObjectType.DIRECTORY, object_id=object_id)) assert hash( CoreSWHID(object_type=ObjectType.DIRECTORY, object_id=object_id,) ) == hash(CoreSWHID(object_type=ObjectType.DIRECTORY, object_id=object_id,)) # Different order of the dictionary, so the underlying order of the tuple in # ImmutableDict is different. assert hash( CoreSWHID(object_type=ObjectType.DIRECTORY, object_id=object_id,) ) == hash(CoreSWHID(object_type=ObjectType.DIRECTORY, object_id=object_id,)) def test_CoreSWHID_eq(): object_id = _x("94a9ed024d3859793618152ea559a168bbcbb5e2") assert CoreSWHID( object_type=ObjectType.DIRECTORY, object_id=object_id ) == CoreSWHID(object_type=ObjectType.DIRECTORY, object_id=object_id) assert CoreSWHID( object_type=ObjectType.DIRECTORY, object_id=object_id, ) == CoreSWHID(object_type=ObjectType.DIRECTORY, object_id=object_id,) assert CoreSWHID( object_type=ObjectType.DIRECTORY, object_id=object_id, ) == CoreSWHID(object_type=ObjectType.DIRECTORY, object_id=object_id,) @pytest.mark.parametrize( "ns,version,type,id", [ ( "foo", 1, ExtendedObjectType.CONTENT, "abc8bc9d7a6bcf6db04f476d29314f157507d505", ), ( "swh", 2, ExtendedObjectType.CONTENT, "def8bc9d7a6bcf6db04f476d29314f157507d505", ), ("swh", 1, ExtendedObjectType.DIRECTORY, "aaaa"), ], ) def test_ExtendedSWHID_validation_error(ns, version, type, id): with pytest.raises(ValidationError): ExtendedSWHID( namespace=ns, scheme_version=version, object_type=type, object_id=_x(id), ) def test_ExtendedSWHID_hash(): object_id = _x("94a9ed024d3859793618152ea559a168bbcbb5e2") assert hash( ExtendedSWHID(object_type=ExtendedObjectType.DIRECTORY, object_id=object_id) ) == hash( ExtendedSWHID(object_type=ExtendedObjectType.DIRECTORY, object_id=object_id) ) assert hash( ExtendedSWHID(object_type=ExtendedObjectType.DIRECTORY, object_id=object_id,) ) == hash( ExtendedSWHID(object_type=ExtendedObjectType.DIRECTORY, object_id=object_id,) ) # Different order of the dictionary, so the underlying order of the tuple in # ImmutableDict is different. assert hash( ExtendedSWHID(object_type=ExtendedObjectType.DIRECTORY, object_id=object_id,) ) == hash( ExtendedSWHID(object_type=ExtendedObjectType.DIRECTORY, object_id=object_id,) ) def test_ExtendedSWHID_eq(): object_id = _x("94a9ed024d3859793618152ea559a168bbcbb5e2") assert ExtendedSWHID( object_type=ExtendedObjectType.DIRECTORY, object_id=object_id ) == ExtendedSWHID(object_type=ExtendedObjectType.DIRECTORY, object_id=object_id) assert ExtendedSWHID( object_type=ExtendedObjectType.DIRECTORY, object_id=object_id, ) == ExtendedSWHID(object_type=ExtendedObjectType.DIRECTORY, object_id=object_id,) assert ExtendedSWHID( object_type=ExtendedObjectType.DIRECTORY, object_id=object_id, ) == ExtendedSWHID(object_type=ExtendedObjectType.DIRECTORY, object_id=object_id,) def test_extid_identifier_bwcompat(): extid_dict = { "extid_type": "test-type", "extid": b"extid", "target": "swh:1:dir:" + "00" * 20, } - assert ( - identifiers.extid_identifier(extid_dict) - == "b9295e1931c31e40a7e3e1e967decd1c89426455" + assert ExtID.from_dict(extid_dict).id == _x( + "b9295e1931c31e40a7e3e1e967decd1c89426455" ) - assert identifiers.extid_identifier( - {**extid_dict, "extid_version": 0} - ) == identifiers.extid_identifier(extid_dict) + assert ( + ExtID.from_dict({**extid_dict, "extid_version": 0}).id + == ExtID.from_dict(extid_dict).id + ) - assert identifiers.extid_identifier( - {**extid_dict, "extid_version": 1} - ) != identifiers.extid_identifier(extid_dict) + assert ( + ExtID.from_dict({**extid_dict, "extid_version": 1}).id + != ExtID.from_dict(extid_dict).id + ) def test_object_types(): """Checks ExtendedObjectType is a superset of ObjectType""" for member in ObjectType: assert getattr(ExtendedObjectType, member.name).value == member.value diff --git a/swh/model/tests/test_model.py b/swh/model/tests/test_model.py index caad5e2..781cfa4 100644 --- a/swh/model/tests/test_model.py +++ b/swh/model/tests/test_model.py @@ -1,1191 +1,1090 @@ # Copyright (C) 2019-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import copy import datetime import attr from attrs_strict import AttributeTypeError from hypothesis import given from hypothesis.strategies import binary import pytest -from swh.model.hashutil import MultiHash, hash_to_bytes, hash_to_hex +from swh.model.hashutil import MultiHash, hash_to_bytes import swh.model.hypothesis_strategies as strategies -from swh.model.identifiers import ( - CoreSWHID, - ExtendedSWHID, - ObjectType, - content_identifier, - directory_identifier, - origin_identifier, - raw_extrinsic_metadata_identifier, - release_identifier, - revision_identifier, - snapshot_identifier, -) from swh.model.model import ( BaseModel, Content, Directory, MetadataAuthority, MetadataAuthorityType, MetadataFetcher, MissingData, Origin, OriginVisit, OriginVisitStatus, Person, RawExtrinsicMetadata, Release, Revision, SkippedContent, Snapshot, Timestamp, TimestampWithTimezone, ) +from swh.model.swhids import CoreSWHID, ExtendedSWHID, ObjectType from swh.model.tests.swh_model_data import TEST_OBJECTS from swh.model.tests.test_identifiers import ( TS_DATETIMES, TS_TIMEZONES, - content_example, directory_example, metadata_example, - origin_example, release_example, revision_example, snapshot_example, ) EXAMPLE_HASH = hash_to_bytes("94a9ed024d3859793618152ea559a168bbcbb5e2") @given(strategies.objects()) def test_todict_inverse_fromdict(objtype_and_obj): (obj_type, obj) = objtype_and_obj if obj_type in ("origin", "origin_visit"): return obj_as_dict = obj.to_dict() obj_as_dict_copy = copy.deepcopy(obj_as_dict) # Check the composition of to_dict and from_dict is the identity assert obj == type(obj).from_dict(obj_as_dict) # Check from_dict() does not change the input dict assert obj_as_dict == obj_as_dict_copy # Check the composition of from_dict and to_dict is the identity assert obj_as_dict == type(obj).from_dict(obj_as_dict).to_dict() @pytest.mark.parametrize("object_type, objects", TEST_OBJECTS.items()) def test_swh_model_todict_fromdict(object_type, objects): """checks model objects in swh_model_data are in correct shape""" assert objects for obj in objects: # Check the composition of from_dict and to_dict is the identity obj_as_dict = obj.to_dict() assert obj == type(obj).from_dict(obj_as_dict) assert obj_as_dict == type(obj).from_dict(obj_as_dict).to_dict() def test_unique_key(): url = "http://example.org/" date = datetime.datetime.now(tz=datetime.timezone.utc) id_ = b"42" * 10 assert Origin(url=url).unique_key() == {"url": url} assert OriginVisit(origin=url, date=date, type="git").unique_key() == { "origin": url, "date": str(date), } assert OriginVisitStatus( origin=url, visit=42, date=date, status="created", snapshot=None ).unique_key() == {"origin": url, "visit": "42", "date": str(date),} assert Snapshot.from_dict({**snapshot_example, "id": id_}).unique_key() == id_ assert Release.from_dict({**release_example, "id": id_}).unique_key() == id_ assert Revision.from_dict({**revision_example, "id": id_}).unique_key() == id_ assert Directory.from_dict({**directory_example, "id": id_}).unique_key() == id_ assert ( RawExtrinsicMetadata.from_dict({**metadata_example, "id": id_}).unique_key() == id_ ) cont = Content.from_data(b"foo") assert cont.unique_key().hex() == "0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33" kwargs = { **cont.to_dict(), "reason": "foo", "status": "absent", } del kwargs["data"] assert SkippedContent(**kwargs).unique_key() == cont.hashes() # Anonymization @given(strategies.objects()) def test_anonymization(objtype_and_obj): (obj_type, obj) = objtype_and_obj def check_person(p): if p is not None: assert p.name is None assert p.email is None assert len(p.fullname) == 32 anon_obj = obj.anonymize() if obj_type == "person": assert anon_obj is not None check_person(anon_obj) elif obj_type == "release": assert anon_obj is not None check_person(anon_obj.author) elif obj_type == "revision": assert anon_obj is not None check_person(anon_obj.author) check_person(anon_obj.committer) else: assert anon_obj is None # Origin, OriginVisit, OriginVisitStatus @given(strategies.origins()) def test_todict_origins(origin): obj = origin.to_dict() assert "type" not in obj assert type(origin)(url=origin.url) == type(origin).from_dict(obj) @given(strategies.origin_visits()) def test_todict_origin_visits(origin_visit): obj = origin_visit.to_dict() assert origin_visit == type(origin_visit).from_dict(obj) def test_origin_visit_naive_datetime(): with pytest.raises(ValueError, match="must be a timezone-aware datetime"): OriginVisit( origin="http://foo/", date=datetime.datetime.now(), type="git", ) @given(strategies.origin_visit_statuses()) def test_todict_origin_visit_statuses(origin_visit_status): obj = origin_visit_status.to_dict() assert origin_visit_status == type(origin_visit_status).from_dict(obj) def test_origin_visit_status_naive_datetime(): with pytest.raises(ValueError, match="must be a timezone-aware datetime"): OriginVisitStatus( origin="http://foo/", visit=42, date=datetime.datetime.now(), status="ongoing", snapshot=None, ) # Timestamp @given(strategies.timestamps()) def test_timestamps_strategy(timestamp): attr.validate(timestamp) def test_timestamp_seconds(): attr.validate(Timestamp(seconds=0, microseconds=0)) with pytest.raises(AttributeTypeError): Timestamp(seconds="0", microseconds=0) attr.validate(Timestamp(seconds=2 ** 63 - 1, microseconds=0)) with pytest.raises(ValueError): Timestamp(seconds=2 ** 63, microseconds=0) attr.validate(Timestamp(seconds=-(2 ** 63), microseconds=0)) with pytest.raises(ValueError): Timestamp(seconds=-(2 ** 63) - 1, microseconds=0) def test_timestamp_microseconds(): attr.validate(Timestamp(seconds=0, microseconds=0)) with pytest.raises(AttributeTypeError): Timestamp(seconds=0, microseconds="0") attr.validate(Timestamp(seconds=0, microseconds=10 ** 6 - 1)) with pytest.raises(ValueError): Timestamp(seconds=0, microseconds=10 ** 6) with pytest.raises(ValueError): Timestamp(seconds=0, microseconds=-1) def test_timestamp_from_dict(): assert Timestamp.from_dict({"seconds": 10, "microseconds": 5}) with pytest.raises(AttributeTypeError): Timestamp.from_dict({"seconds": "10", "microseconds": 5}) with pytest.raises(AttributeTypeError): Timestamp.from_dict({"seconds": 10, "microseconds": "5"}) with pytest.raises(ValueError): Timestamp.from_dict({"seconds": 0, "microseconds": -1}) Timestamp.from_dict({"seconds": 0, "microseconds": 10 ** 6 - 1}) with pytest.raises(ValueError): Timestamp.from_dict({"seconds": 0, "microseconds": 10 ** 6}) # TimestampWithTimezone def test_timestampwithtimezone(): ts = Timestamp(seconds=0, microseconds=0) tstz = TimestampWithTimezone(timestamp=ts, offset=0, negative_utc=False) attr.validate(tstz) assert tstz.negative_utc is False attr.validate(TimestampWithTimezone(timestamp=ts, offset=10, negative_utc=False)) attr.validate(TimestampWithTimezone(timestamp=ts, offset=-10, negative_utc=False)) tstz = TimestampWithTimezone(timestamp=ts, offset=0, negative_utc=True) attr.validate(tstz) assert tstz.negative_utc is True with pytest.raises(AttributeTypeError): TimestampWithTimezone( timestamp=datetime.datetime.now(), offset=0, negative_utc=False ) with pytest.raises(AttributeTypeError): TimestampWithTimezone(timestamp=ts, offset="0", negative_utc=False) with pytest.raises(AttributeTypeError): TimestampWithTimezone(timestamp=ts, offset=1.0, negative_utc=False) with pytest.raises(AttributeTypeError): TimestampWithTimezone(timestamp=ts, offset=1, negative_utc=0) with pytest.raises(ValueError): TimestampWithTimezone(timestamp=ts, offset=1, negative_utc=True) with pytest.raises(ValueError): TimestampWithTimezone(timestamp=ts, offset=-1, negative_utc=True) def test_timestampwithtimezone_from_datetime(): tz = datetime.timezone(datetime.timedelta(minutes=+60)) date = datetime.datetime(2020, 2, 27, 14, 39, 19, tzinfo=tz) tstz = TimestampWithTimezone.from_datetime(date) assert tstz == TimestampWithTimezone( timestamp=Timestamp(seconds=1582810759, microseconds=0,), offset=60, negative_utc=False, ) def test_timestampwithtimezone_from_naive_datetime(): date = datetime.datetime(2020, 2, 27, 14, 39, 19) with pytest.raises(ValueError, match="datetime without timezone"): TimestampWithTimezone.from_datetime(date) def test_timestampwithtimezone_from_iso8601(): date = "2020-02-27 14:39:19.123456+0100" tstz = TimestampWithTimezone.from_iso8601(date) assert tstz == TimestampWithTimezone( timestamp=Timestamp(seconds=1582810759, microseconds=123456,), offset=60, negative_utc=False, ) def test_timestampwithtimezone_from_iso8601_negative_utc(): date = "2020-02-27 13:39:19-0000" tstz = TimestampWithTimezone.from_iso8601(date) assert tstz == TimestampWithTimezone( timestamp=Timestamp(seconds=1582810759, microseconds=0,), offset=0, negative_utc=True, ) @pytest.mark.parametrize("date", TS_DATETIMES) @pytest.mark.parametrize("tz", TS_TIMEZONES) @pytest.mark.parametrize("microsecond", [0, 1, 10, 100, 1000, 999999]) def test_timestampwithtimezone_to_datetime(date, tz, microsecond): date = date.replace(tzinfo=tz, microsecond=microsecond) tstz = TimestampWithTimezone.from_datetime(date) assert tstz.to_datetime() == date assert tstz.to_datetime().utcoffset() == date.utcoffset() def test_person_from_fullname(): """The author should have name, email and fullname filled. """ actual_person = Person.from_fullname(b"tony ") assert actual_person == Person( fullname=b"tony ", name=b"tony", email=b"ynot@dagobah", ) def test_person_from_fullname_no_email(): """The author and fullname should be the same as the input (author). """ actual_person = Person.from_fullname(b"tony") assert actual_person == Person(fullname=b"tony", name=b"tony", email=None,) def test_person_from_fullname_empty_person(): """Empty person has only its fullname filled with the empty byte-string. """ actual_person = Person.from_fullname(b"") assert actual_person == Person(fullname=b"", name=None, email=None,) def test_git_author_line_to_author(): # edge case out of the way with pytest.raises(TypeError): Person.from_fullname(None) tests = { b"a ": Person(name=b"a", email=b"b@c.com", fullname=b"a ",), b"": Person( name=None, email=b"foo@bar.com", fullname=b"", ), b"malformed ': Person( name=b"malformed", email=b'"', ), b"trailing ": Person( name=b"trailing", email=b"sp@c.e", fullname=b"trailing ", ), b"no": Person(name=b"no", email=b"sp@c.e", fullname=b"no",), b" more ": Person( name=b"more", email=b"sp@c.es", fullname=b" more ", ), b" <>": Person(name=None, email=None, fullname=b" <>",), } for person in sorted(tests): expected_person = tests[person] assert expected_person == Person.from_fullname(person) # Content def test_content_get_hash(): hashes = dict(sha1=b"foo", sha1_git=b"bar", sha256=b"baz", blake2s256=b"qux") c = Content(length=42, status="visible", **hashes) for (hash_name, hash_) in hashes.items(): assert c.get_hash(hash_name) == hash_ def test_content_hashes(): hashes = dict(sha1=b"foo", sha1_git=b"bar", sha256=b"baz", blake2s256=b"qux") c = Content(length=42, status="visible", **hashes) assert c.hashes() == hashes def test_content_data(): c = Content( length=42, status="visible", data=b"foo", sha1=b"foo", sha1_git=b"bar", sha256=b"baz", blake2s256=b"qux", ) assert c.with_data() == c def test_content_data_missing(): c = Content( length=42, status="visible", sha1=b"foo", sha1_git=b"bar", sha256=b"baz", blake2s256=b"qux", ) with pytest.raises(MissingData): c.with_data() @given(strategies.present_contents_d()) def test_content_from_dict(content_d): c = Content.from_data(**content_d) assert c assert c.ctime == content_d["ctime"] content_d2 = c.to_dict() c2 = Content.from_dict(content_d2) assert c2.ctime == c.ctime def test_content_from_dict_str_ctime(): # test with ctime as a string n = datetime.datetime(2020, 5, 6, 12, 34, tzinfo=datetime.timezone.utc) content_d = { "ctime": n.isoformat(), "data": b"", "length": 0, "sha1": b"\x00", "sha256": b"\x00", "sha1_git": b"\x00", "blake2s256": b"\x00", } c = Content.from_dict(content_d) assert c.ctime == n def test_content_from_dict_str_naive_ctime(): # test with ctime as a string n = datetime.datetime(2020, 5, 6, 12, 34) content_d = { "ctime": n.isoformat(), "data": b"", "length": 0, "sha1": b"\x00", "sha256": b"\x00", "sha1_git": b"\x00", "blake2s256": b"\x00", } with pytest.raises(ValueError, match="must be a timezone-aware datetime."): Content.from_dict(content_d) @given(binary(max_size=4096)) def test_content_from_data(data): c = Content.from_data(data) assert c.data == data assert c.length == len(data) assert c.status == "visible" for key, value in MultiHash.from_data(data).digest().items(): assert getattr(c, key) == value @given(binary(max_size=4096)) def test_hidden_content_from_data(data): c = Content.from_data(data, status="hidden") assert c.data == data assert c.length == len(data) assert c.status == "hidden" for key, value in MultiHash.from_data(data).digest().items(): assert getattr(c, key) == value def test_content_naive_datetime(): c = Content.from_data(b"foo") with pytest.raises(ValueError, match="must be a timezone-aware datetime"): Content( **c.to_dict(), ctime=datetime.datetime.now(), ) # SkippedContent @given(binary(max_size=4096)) def test_skipped_content_from_data(data): c = SkippedContent.from_data(data, reason="reason") assert c.reason == "reason" assert c.length == len(data) assert c.status == "absent" for key, value in MultiHash.from_data(data).digest().items(): assert getattr(c, key) == value @given(strategies.skipped_contents_d()) def test_skipped_content_origin_is_str(skipped_content_d): assert SkippedContent.from_dict(skipped_content_d) skipped_content_d["origin"] = "http://path/to/origin" assert SkippedContent.from_dict(skipped_content_d) skipped_content_d["origin"] = Origin(url="http://path/to/origin") with pytest.raises(ValueError, match="origin"): SkippedContent.from_dict(skipped_content_d) def test_skipped_content_naive_datetime(): c = SkippedContent.from_data(b"foo", reason="reason") with pytest.raises(ValueError, match="must be a timezone-aware datetime"): SkippedContent( **c.to_dict(), ctime=datetime.datetime.now(), ) # Revision def test_revision_extra_headers_no_headers(): rev_dict = revision_example.copy() rev_dict.pop("id") rev = Revision.from_dict(rev_dict) rev_dict = attr.asdict(rev, recurse=False) rev_model = Revision(**rev_dict) assert rev_model.metadata is None assert rev_model.extra_headers == () rev_dict["metadata"] = { "something": "somewhere", "some other thing": "stranger", } rev_model = Revision(**rev_dict) assert rev_model.metadata == rev_dict["metadata"] assert rev_model.extra_headers == () def test_revision_extra_headers_with_headers(): rev_dict = revision_example.copy() rev_dict.pop("id") rev = Revision.from_dict(rev_dict) rev_dict = attr.asdict(rev, recurse=False) rev_dict["metadata"] = { "something": "somewhere", "some other thing": "stranger", } extra_headers = ( (b"header1", b"value1"), (b"header2", b"42"), (b"header3", b"should I?\x00"), (b"header1", b"again"), ) rev_dict["extra_headers"] = extra_headers rev_model = Revision(**rev_dict) assert "extra_headers" not in rev_model.metadata assert rev_model.extra_headers == extra_headers def test_revision_extra_headers_in_metadata(): rev_dict = revision_example.copy() rev_dict.pop("id") rev = Revision.from_dict(rev_dict) rev_dict = attr.asdict(rev, recurse=False) rev_dict["metadata"] = { "something": "somewhere", "some other thing": "stranger", } extra_headers = ( (b"header1", b"value1"), (b"header2", b"42"), (b"header3", b"should I?\x00"), (b"header1", b"again"), ) # check the bw-compat init hook does the job # ie. extra_headers are given in the metadata field rev_dict["metadata"]["extra_headers"] = extra_headers rev_model = Revision(**rev_dict) assert "extra_headers" not in rev_model.metadata assert rev_model.extra_headers == extra_headers def test_revision_extra_headers_as_lists(): rev_dict = revision_example.copy() rev_dict.pop("id") rev = Revision.from_dict(rev_dict) rev_dict = attr.asdict(rev, recurse=False) rev_dict["metadata"] = {} extra_headers = ( (b"header1", b"value1"), (b"header2", b"42"), (b"header3", b"should I?\x00"), (b"header1", b"again"), ) # check Revision.extra_headers tuplify does the job rev_dict["extra_headers"] = [list(x) for x in extra_headers] rev_model = Revision(**rev_dict) assert "extra_headers" not in rev_model.metadata assert rev_model.extra_headers == extra_headers def test_revision_extra_headers_type_error(): rev_dict = revision_example.copy() rev_dict.pop("id") rev = Revision.from_dict(rev_dict) orig_rev_dict = attr.asdict(rev, recurse=False) orig_rev_dict["metadata"] = { "something": "somewhere", "some other thing": "stranger", } extra_headers = ( ("header1", b"value1"), (b"header2", 42), ("header1", "again"), ) # check headers one at a time # if given as extra_header for extra_header in extra_headers: rev_dict = copy.deepcopy(orig_rev_dict) rev_dict["extra_headers"] = (extra_header,) with pytest.raises(AttributeTypeError): Revision(**rev_dict) # if given as metadata for extra_header in extra_headers: rev_dict = copy.deepcopy(orig_rev_dict) rev_dict["metadata"]["extra_headers"] = (extra_header,) with pytest.raises(AttributeTypeError): Revision(**rev_dict) def test_revision_extra_headers_from_dict(): rev_dict = revision_example.copy() rev_dict.pop("id") rev_model = Revision.from_dict(rev_dict) assert rev_model.metadata is None assert rev_model.extra_headers == () rev_dict["metadata"] = { "something": "somewhere", "some other thing": "stranger", } rev_model = Revision.from_dict(rev_dict) assert rev_model.metadata == rev_dict["metadata"] assert rev_model.extra_headers == () extra_headers = ( (b"header1", b"value1"), (b"header2", b"42"), (b"header3", b"should I?\nmaybe\x00\xff"), (b"header1", b"again"), ) rev_dict["extra_headers"] = extra_headers rev_model = Revision.from_dict(rev_dict) assert "extra_headers" not in rev_model.metadata assert rev_model.extra_headers == extra_headers def test_revision_extra_headers_in_metadata_from_dict(): rev_dict = revision_example.copy() rev_dict.pop("id") rev_dict["metadata"] = { "something": "somewhere", "some other thing": "stranger", } extra_headers = ( (b"header1", b"value1"), (b"header2", b"42"), (b"header3", b"should I?\nmaybe\x00\xff"), (b"header1", b"again"), ) # check the bw-compat init hook does the job rev_dict["metadata"]["extra_headers"] = extra_headers rev_model = Revision.from_dict(rev_dict) assert "extra_headers" not in rev_model.metadata assert rev_model.extra_headers == extra_headers def test_revision_extra_headers_as_lists_from_dict(): rev_dict = revision_example.copy() rev_dict.pop("id") rev_model = Revision.from_dict(rev_dict) rev_dict["metadata"] = { "something": "somewhere", "some other thing": "stranger", } extra_headers = ( (b"header1", b"value1"), (b"header2", b"42"), (b"header3", b"should I?\nmaybe\x00\xff"), (b"header1", b"again"), ) # check Revision.extra_headers converter does the job rev_dict["extra_headers"] = [list(x) for x in extra_headers] rev_model = Revision.from_dict(rev_dict) assert "extra_headers" not in rev_model.metadata assert rev_model.extra_headers == extra_headers -# ID computation - - -def test_content_model_id_computation(): - cnt_dict = content_example.copy() - - cnt_id_str = hash_to_hex(content_identifier(cnt_dict)["sha1_git"]) - cnt_model = Content.from_data(cnt_dict["data"]) - assert str(cnt_model.swhid()) == "swh:1:cnt:" + cnt_id_str - - -def test_directory_model_id_computation(): - dir_dict = directory_example.copy() - del dir_dict["id"] - - dir_id_str = directory_identifier(dir_dict) - dir_id = hash_to_bytes(dir_id_str) - dir_model = Directory.from_dict(dir_dict) - assert dir_model.id == dir_id - assert str(dir_model.swhid()) == "swh:1:dir:" + dir_id_str - - -def test_revision_model_id_computation(): - rev_dict = revision_example.copy() - del rev_dict["id"] - - rev_id_str = revision_identifier(rev_dict) - rev_id = hash_to_bytes(rev_id_str) - rev_model = Revision.from_dict(rev_dict) - assert rev_model.id == rev_id - assert str(rev_model.swhid()) == "swh:1:rev:" + rev_id_str - - -def test_revision_model_id_computation_with_no_date(): - """We can have revision with date to None - - """ - rev_dict = revision_example.copy() - rev_dict["date"] = None - rev_dict["committer_date"] = None - del rev_dict["id"] - - rev_id = hash_to_bytes(revision_identifier(rev_dict)) - rev_model = Revision.from_dict(rev_dict) - assert rev_model.date is None - assert rev_model.committer_date is None - assert rev_model.id == rev_id - - -def test_release_model_id_computation(): - rel_dict = release_example.copy() - del rel_dict["id"] - - rel_id_str = release_identifier(rel_dict) - rel_id = hash_to_bytes(rel_id_str) - rel_model = Release.from_dict(rel_dict) - assert isinstance(rel_model.date, TimestampWithTimezone) - assert rel_model.id == hash_to_bytes(rel_id) - assert str(rel_model.swhid()) == "swh:1:rel:" + rel_id_str - - -def test_snapshot_model_id_computation(): - snp_dict = snapshot_example.copy() - del snp_dict["id"] - - snp_id_str = snapshot_identifier(snp_dict) - snp_id = hash_to_bytes(snp_id_str) - snp_model = Snapshot.from_dict(snp_dict) - assert snp_model.id == snp_id - assert str(snp_model.swhid()) == "swh:1:snp:" + snp_id_str - - -def test_origin_model_id_computation(): - ori_dict = origin_example.copy() - - ori_id_str = origin_identifier(ori_dict) - ori_model = Origin.from_dict(ori_dict) - assert str(ori_model.swhid()) == "swh:1:ori:" + ori_id_str - - -def test_raw_extrinsic_metadata_model_id_computation(): - emd_dict = metadata_example.copy() - - emd_id_str = raw_extrinsic_metadata_identifier(emd_dict) - emd_model = RawExtrinsicMetadata.from_dict(emd_dict) - assert str(emd_model.swhid()) == "swh:1:emd:" + emd_id_str - - @given(strategies.objects(split_content=True)) def test_object_type(objtype_and_obj): obj_type, obj = objtype_and_obj assert obj_type == obj.object_type def test_object_type_is_final(): object_types = set() def check_final(cls): if hasattr(cls, "object_type"): assert cls.object_type not in object_types object_types.add(cls.object_type) if cls.__subclasses__(): assert not hasattr(cls, "object_type") for subcls in cls.__subclasses__(): check_final(subcls) check_final(BaseModel) _metadata_authority = MetadataAuthority( type=MetadataAuthorityType.FORGE, url="https://forge.softwareheritage.org", ) _metadata_fetcher = MetadataFetcher(name="test-fetcher", version="0.0.1",) _content_swhid = ExtendedSWHID.from_string( "swh:1:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2" ) _origin_url = "https://forge.softwareheritage.org/source/swh-model.git" _origin_swhid = ExtendedSWHID.from_string( "swh:1:ori:94a9ed024d3859793618152ea559a168bbcbb5e2" ) _dummy_qualifiers = {"origin": "https://example.com", "lines": "42"} _common_metadata_fields = dict( discovery_date=datetime.datetime( 2021, 1, 29, 13, 57, 9, tzinfo=datetime.timezone.utc ), authority=_metadata_authority, fetcher=_metadata_fetcher, format="json", metadata=b'{"origin": "https://example.com", "lines": "42"}', ) def test_metadata_valid(): """Checks valid RawExtrinsicMetadata objects don't raise an error.""" # Simplest case RawExtrinsicMetadata(target=_origin_swhid, **_common_metadata_fields) # Object with an SWHID RawExtrinsicMetadata( target=_content_swhid, **_common_metadata_fields, ) def test_metadata_to_dict(): """Checks valid RawExtrinsicMetadata objects don't raise an error.""" common_fields = { "authority": {"type": "forge", "url": "https://forge.softwareheritage.org"}, "fetcher": {"name": "test-fetcher", "version": "0.0.1",}, "discovery_date": _common_metadata_fields["discovery_date"], "format": "json", "metadata": b'{"origin": "https://example.com", "lines": "42"}', } m = RawExtrinsicMetadata(target=_origin_swhid, **_common_metadata_fields,) assert m.to_dict() == { "target": str(_origin_swhid), "id": b"@j\xc9\x01\xbc\x1e#p*\xf3q9\xa7u\x97\x00\x14\x02xa", **common_fields, } assert RawExtrinsicMetadata.from_dict(m.to_dict()) == m m = RawExtrinsicMetadata(target=_content_swhid, **_common_metadata_fields,) assert m.to_dict() == { "target": "swh:1:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2", "id": b"\xbc\xa3U\xddf\x19U\xc5\xd2\xd7\xdfK\xd7c\x1f\xa8\xfeh\x992", **common_fields, } assert RawExtrinsicMetadata.from_dict(m.to_dict()) == m hash_hex = "6162" * 10 hash_bin = b"ab" * 10 m = RawExtrinsicMetadata( target=_content_swhid, **_common_metadata_fields, origin="https://example.org/", snapshot=CoreSWHID(object_type=ObjectType.SNAPSHOT, object_id=hash_bin), release=CoreSWHID(object_type=ObjectType.RELEASE, object_id=hash_bin), revision=CoreSWHID(object_type=ObjectType.REVISION, object_id=hash_bin), path=b"/foo/bar", directory=CoreSWHID(object_type=ObjectType.DIRECTORY, object_id=hash_bin), ) assert m.to_dict() == { "target": "swh:1:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2", "id": b"\x14l\xb0\x1f\xb9\xc0{)\xc7\x0f\xbd\xc0*,YZ\xf5C\xab\xfc", **common_fields, "origin": "https://example.org/", "snapshot": f"swh:1:snp:{hash_hex}", "release": f"swh:1:rel:{hash_hex}", "revision": f"swh:1:rev:{hash_hex}", "path": b"/foo/bar", "directory": f"swh:1:dir:{hash_hex}", } assert RawExtrinsicMetadata.from_dict(m.to_dict()) == m def test_metadata_invalid_target(): """Checks various invalid values for the 'target' field.""" # SWHID passed as string instead of SWHID with pytest.raises(ValueError, match="target must be.*ExtendedSWHID"): RawExtrinsicMetadata( target="swh:1:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2", **_common_metadata_fields, ) def test_metadata_naive_datetime(): with pytest.raises(ValueError, match="must be a timezone-aware datetime"): RawExtrinsicMetadata( target=_origin_swhid, **{**_common_metadata_fields, "discovery_date": datetime.datetime.now()}, ) def test_metadata_validate_context_origin(): """Checks validation of RawExtrinsicMetadata.origin.""" # Origins can't have an 'origin' context with pytest.raises( ValueError, match="Unexpected 'origin' context for origin object" ): RawExtrinsicMetadata( target=_origin_swhid, origin=_origin_url, **_common_metadata_fields, ) # but all other types can RawExtrinsicMetadata( target=_content_swhid, origin=_origin_url, **_common_metadata_fields, ) # SWHIDs aren't valid origin URLs with pytest.raises(ValueError, match="SWHID used as context origin URL"): RawExtrinsicMetadata( target=_content_swhid, origin="swh:1:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2", **_common_metadata_fields, ) def test_metadata_validate_context_visit(): """Checks validation of RawExtrinsicMetadata.visit.""" # Origins can't have a 'visit' context with pytest.raises( ValueError, match="Unexpected 'visit' context for origin object" ): RawExtrinsicMetadata( target=_origin_swhid, visit=42, **_common_metadata_fields, ) # but all other types can RawExtrinsicMetadata( target=_content_swhid, origin=_origin_url, visit=42, **_common_metadata_fields, ) # Missing 'origin' with pytest.raises(ValueError, match="'origin' context must be set if 'visit' is"): RawExtrinsicMetadata( target=_content_swhid, visit=42, **_common_metadata_fields, ) # visit id must be positive with pytest.raises(ValueError, match="Nonpositive visit id"): RawExtrinsicMetadata( target=_content_swhid, origin=_origin_url, visit=-42, **_common_metadata_fields, ) def test_metadata_validate_context_snapshot(): """Checks validation of RawExtrinsicMetadata.snapshot.""" # Origins can't have a 'snapshot' context with pytest.raises( ValueError, match="Unexpected 'snapshot' context for origin object" ): RawExtrinsicMetadata( target=_origin_swhid, snapshot=CoreSWHID( object_type=ObjectType.SNAPSHOT, object_id=EXAMPLE_HASH, ), **_common_metadata_fields, ) # but content can RawExtrinsicMetadata( target=_content_swhid, snapshot=CoreSWHID(object_type=ObjectType.SNAPSHOT, object_id=EXAMPLE_HASH), **_common_metadata_fields, ) # SWHID type doesn't match the expected type of this context key with pytest.raises( ValueError, match="Expected SWHID type 'snapshot', got 'content'" ): RawExtrinsicMetadata( target=_content_swhid, snapshot=CoreSWHID(object_type=ObjectType.CONTENT, object_id=EXAMPLE_HASH,), **_common_metadata_fields, ) def test_metadata_validate_context_release(): """Checks validation of RawExtrinsicMetadata.release.""" # Origins can't have a 'release' context with pytest.raises( ValueError, match="Unexpected 'release' context for origin object" ): RawExtrinsicMetadata( target=_origin_swhid, release=CoreSWHID(object_type=ObjectType.RELEASE, object_id=EXAMPLE_HASH,), **_common_metadata_fields, ) # but content can RawExtrinsicMetadata( target=_content_swhid, release=CoreSWHID(object_type=ObjectType.RELEASE, object_id=EXAMPLE_HASH), **_common_metadata_fields, ) # SWHID type doesn't match the expected type of this context key with pytest.raises( ValueError, match="Expected SWHID type 'release', got 'content'" ): RawExtrinsicMetadata( target=_content_swhid, release=CoreSWHID(object_type=ObjectType.CONTENT, object_id=EXAMPLE_HASH,), **_common_metadata_fields, ) def test_metadata_validate_context_revision(): """Checks validation of RawExtrinsicMetadata.revision.""" # Origins can't have a 'revision' context with pytest.raises( ValueError, match="Unexpected 'revision' context for origin object" ): RawExtrinsicMetadata( target=_origin_swhid, revision=CoreSWHID( object_type=ObjectType.REVISION, object_id=EXAMPLE_HASH, ), **_common_metadata_fields, ) # but content can RawExtrinsicMetadata( target=_content_swhid, revision=CoreSWHID(object_type=ObjectType.REVISION, object_id=EXAMPLE_HASH), **_common_metadata_fields, ) # SWHID type doesn't match the expected type of this context key with pytest.raises( ValueError, match="Expected SWHID type 'revision', got 'content'" ): RawExtrinsicMetadata( target=_content_swhid, revision=CoreSWHID(object_type=ObjectType.CONTENT, object_id=EXAMPLE_HASH,), **_common_metadata_fields, ) def test_metadata_validate_context_path(): """Checks validation of RawExtrinsicMetadata.path.""" # Origins can't have a 'path' context with pytest.raises(ValueError, match="Unexpected 'path' context for origin object"): RawExtrinsicMetadata( target=_origin_swhid, path=b"/foo/bar", **_common_metadata_fields, ) # but content can RawExtrinsicMetadata( target=_content_swhid, path=b"/foo/bar", **_common_metadata_fields, ) def test_metadata_validate_context_directory(): """Checks validation of RawExtrinsicMetadata.directory.""" # Origins can't have a 'directory' context with pytest.raises( ValueError, match="Unexpected 'directory' context for origin object" ): RawExtrinsicMetadata( target=_origin_swhid, directory=CoreSWHID( object_type=ObjectType.DIRECTORY, object_id=EXAMPLE_HASH, ), **_common_metadata_fields, ) # but content can RawExtrinsicMetadata( target=_content_swhid, directory=CoreSWHID(object_type=ObjectType.DIRECTORY, object_id=EXAMPLE_HASH,), **_common_metadata_fields, ) # SWHID type doesn't match the expected type of this context key with pytest.raises( ValueError, match="Expected SWHID type 'directory', got 'content'" ): RawExtrinsicMetadata( target=_content_swhid, directory=CoreSWHID( object_type=ObjectType.CONTENT, object_id=EXAMPLE_HASH, ), **_common_metadata_fields, ) def test_metadata_normalize_discovery_date(): fields_copy = {**_common_metadata_fields} truncated_date = fields_copy.pop("discovery_date") assert truncated_date.microsecond == 0 # Check for TypeError on disabled object type: we removed attrs_strict's # type_validator with pytest.raises(TypeError): RawExtrinsicMetadata( target=_content_swhid, discovery_date="not a datetime", **fields_copy ) # Check for truncation to integral second date_with_us = truncated_date.replace(microsecond=42) md = RawExtrinsicMetadata( target=_content_swhid, discovery_date=date_with_us, **fields_copy, ) assert md.discovery_date == truncated_date assert md.discovery_date.tzinfo == datetime.timezone.utc # Check that the timezone gets normalized. Timezones can be offset by a # non-integral number of seconds, so we need to handle that. timezone = datetime.timezone(offset=datetime.timedelta(hours=2)) date_with_tz = truncated_date.astimezone(timezone) assert date_with_tz.tzinfo != datetime.timezone.utc md = RawExtrinsicMetadata( target=_content_swhid, discovery_date=date_with_tz, **fields_copy, ) assert md.discovery_date == truncated_date assert md.discovery_date.tzinfo == datetime.timezone.utc