diff --git a/bin/swh-hashtree b/bin/swh-hashtree index 5b85b7b..a4f8d7b 100755 --- a/bin/swh-hashtree +++ b/bin/swh-hashtree @@ -1,56 +1,57 @@ #!/usr/bin/env python3 # Use sample: # swh-hashtree --path . --ignore '.svn' --ignore '.git-svn' \ # --ignore-empty-folders # 38f8d2c3a951f6b94007896d0981077e48bbd702 -import click import os +import click + from swh.model import from_disk, hashutil def combine_filters(*filters): """Combine several ignore filters""" if len(filters) == 0: return from_disk.accept_all_directories elif len(filters) == 1: return filters[0] def combined_filter(*args, **kwargs): return all(filter(*args, **kwargs) for filter in filters) return combined_filter @click.command() @click.option("--path", default=".", help="Optional path to hash.") @click.option( "--ignore-empty-folder", is_flag=True, default=False, help="Ignore empty folder." ) @click.option("--ignore", multiple=True, help="Ignore pattern.") def main(path, ignore_empty_folder=False, ignore=None): filters = [] if ignore_empty_folder: filters.append(from_disk.ignore_empty_directories) if ignore: filters.append( from_disk.ignore_named_directories([os.fsencode(name) for name in ignore]) ) try: d = from_disk.Directory.from_disk( path=os.fsencode(path), dir_filter=combine_filters(*filters) ) hash = d.hash except Exception as e: print(e) return else: print(hashutil.hash_to_hex(hash)) if __name__ == "__main__": main() diff --git a/bin/swh-revhash b/bin/swh-revhash index d3a8caf..56b587d 100755 --- a/bin/swh-revhash +++ b/bin/swh-revhash @@ -1,31 +1,31 @@ #!/usr/bin/env python3 # Use: # swh-revhash 'tree 4b825dc642cb6eb9a060e54bf8d69288fbee4904\nparent 22c0fa5195a53f2e733ec75a9b6e9d1624a8b771\nauthor seanius 1138341044 +0000\ncommitter seanius 1138341044 +0000\n\nmaking dir structure...\n' # noqa # output: 17a631d474f49bbebfdf3d885dcde470d7faafd7 # To compare with git: # git-revhash 'tree 4b825dc642cb6eb9a060e54bf8d69288fbee4904\nparent 22c0fa5195a53f2e733ec75a9b6e9d1624a8b771\nauthor seanius 1138341044 +0000\ncommitter seanius 1138341044 +0000\n\nmaking dir structure...\n' # noqa # output: 17a631d474f49bbebfdf3d885dcde470d7faafd7 import sys -from swh.model import identifiers, hashutil +from swh.model import hashutil, identifiers def revhash(revision_raw): """Compute the revision hash. """ # HACK: string have somehow their \n expanded to \\n if b"\\n" in revision_raw: revision_raw = revision_raw.replace(b"\\n", b"\n") h = hashutil.hash_git_data(revision_raw, "commit") return identifiers.identifier_to_str(h) if __name__ == "__main__": revision_raw = sys.argv[1].encode("utf-8") print(revhash(revision_raw)) diff --git a/setup.py b/setup.py index ecaac10..f42058c 100755 --- a/setup.py +++ b/setup.py @@ -1,79 +1,79 @@ #!/usr/bin/env python3 # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information -from setuptools import setup, find_packages - -from os import path from io import open +from os import path + +from setuptools import find_packages, setup here = path.abspath(path.dirname(__file__)) # Get the long description from the README file with open(path.join(here, "README.md"), encoding="utf-8") as f: long_description = f.read() def parse_requirements(name=None): if name: reqf = "requirements-%s.txt" % name else: reqf = "requirements.txt" requirements = [] if not path.exists(reqf): return requirements with open(reqf) as f: for line in f.readlines(): line = line.strip() if not line or line.startswith("#"): continue requirements.append(line) return requirements blake2_requirements = ['pyblake2;python_version<"3.6"'] setup( name="swh.model", description="Software Heritage data model", long_description=long_description, long_description_content_type="text/markdown", python_requires=">=3.7", author="Software Heritage developers", author_email="swh-devel@inria.fr", url="https://forge.softwareheritage.org/diffusion/DMOD/", packages=find_packages(), setup_requires=["setuptools-scm"], use_scm_version=True, install_requires=( parse_requirements() + parse_requirements("swh") + blake2_requirements ), extras_require={ "cli": parse_requirements("cli"), "testing": parse_requirements("test"), }, include_package_data=True, entry_points=""" [console_scripts] swh-identify=swh.model.cli:identify [swh.cli.subcommands] identify=swh.model.cli:identify """, classifiers=[ "Programming Language :: Python :: 3", "Intended Audience :: Developers", "License :: OSI Approved :: GNU General Public License v3 (GPLv3)", "Operating System :: OS Independent", "Development Status :: 5 - Production/Stable", ], project_urls={ "Bug Reports": "https://forge.softwareheritage.org/maniphest", "Funding": "https://www.softwareheritage.org/donate", "Source": "https://forge.softwareheritage.org/source/swh-model", "Documentation": "https://docs.softwareheritage.org/devel/swh-model/", }, ) diff --git a/swh/model/cli.py b/swh/model/cli.py index 5392070..68a3680 100644 --- a/swh/model/cli.py +++ b/swh/model/cli.py @@ -1,226 +1,225 @@ # Copyright (C) 2018-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information -# WARNING: do not import unnecessary things here to keep cli startup time under -# control -import click import os import sys +# WARNING: do not import unnecessary things here to keep cli startup time under +# control +import click CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"]) # Mapping between dulwich types and Software Heritage ones. Used by snapshot ID # computation. _DULWICH_TYPES = { b"blob": "content", b"tree": "directory", b"commit": "revision", b"tag": "release", } class SWHIDParamType(click.ParamType): name = "persistent identifier" def convert(self, value, param, ctx): from swh.model.exceptions import ValidationError from swh.model.identifiers import parse_swhid try: parse_swhid(value) return value # return as string, as we need just that except ValidationError as e: self.fail("%s is not a valid SWHID. %s." % (value, e), param, ctx) def swhid_of_file(path): from swh.model.from_disk import Content - from swh.model.identifiers import swhid, CONTENT + from swh.model.identifiers import CONTENT, swhid object = Content.from_file(path=path).get_data() return swhid(CONTENT, object) def swhid_of_file_content(data): from swh.model.from_disk import Content - from swh.model.identifiers import swhid, CONTENT + from swh.model.identifiers import CONTENT, swhid object = Content.from_bytes(mode=644, data=data).get_data() return swhid(CONTENT, object) def swhid_of_dir(path): from swh.model.from_disk import Directory - from swh.model.identifiers import swhid, DIRECTORY + from swh.model.identifiers import DIRECTORY, swhid object = Directory.from_disk(path=path).get_data() return swhid(DIRECTORY, object) def swhid_of_origin(url): - from swh.model.identifiers import origin_identifier - from swh.model.identifiers import SWHID + from swh.model.identifiers import SWHID, origin_identifier return str(SWHID(object_type="origin", object_id=origin_identifier({"url": url}))) def swhid_of_git_repo(path): import dulwich.repo + from swh.model import hashutil - from swh.model.identifiers import snapshot_identifier - from swh.model.identifiers import SWHID + from swh.model.identifiers import SWHID, snapshot_identifier repo = dulwich.repo.Repo(path) branches = {} for ref, target in repo.refs.as_dict().items(): obj = repo[target] if obj: branches[ref] = { "target": hashutil.bytehex_to_hash(target), "target_type": _DULWICH_TYPES[obj.type_name], } else: branches[ref] = None for ref, target in repo.refs.get_symrefs().items(): branches[ref] = { "target": target, "target_type": "alias", } snapshot = {"branches": branches} return str(SWHID(object_type="snapshot", object_id=snapshot_identifier(snapshot))) def identify_object(obj_type, follow_symlinks, obj): from urllib.parse import urlparse if obj_type == "auto": if obj == "-" or os.path.isfile(obj): obj_type = "content" elif os.path.isdir(obj): obj_type = "directory" else: try: # URL parsing if urlparse(obj).scheme: obj_type = "origin" else: raise ValueError except ValueError: raise click.BadParameter("cannot detect object type for %s" % obj) swhid = None if obj == "-": content = sys.stdin.buffer.read() swhid = swhid_of_file_content(content) elif obj_type in ["content", "directory"]: path = obj.encode(sys.getfilesystemencoding()) if follow_symlinks and os.path.islink(obj): path = os.path.realpath(obj) if obj_type == "content": swhid = swhid_of_file(path) elif obj_type == "directory": swhid = swhid_of_dir(path) elif obj_type == "origin": swhid = swhid_of_origin(obj) elif obj_type == "snapshot": swhid = swhid_of_git_repo(obj) else: # shouldn't happen, due to option validation raise click.BadParameter("invalid object type: " + obj_type) # note: we return original obj instead of path here, to preserve user-given # file name in output return (obj, swhid) @click.command(context_settings=CONTEXT_SETTINGS) @click.option( "--dereference/--no-dereference", "follow_symlinks", default=True, help="follow (or not) symlinks for OBJECTS passed as arguments " + "(default: follow)", ) @click.option( "--filename/--no-filename", "show_filename", default=True, help="show/hide file name (default: show)", ) @click.option( "--type", "-t", "obj_type", default="auto", type=click.Choice(["auto", "content", "directory", "origin", "snapshot"]), help="type of object to identify (default: auto)", ) @click.option( "--verify", "-v", metavar="SWHID", type=SWHIDParamType(), help="reference identifier to be compared with computed one", ) @click.argument("objects", nargs=-1, required=True) def identify(obj_type, verify, show_filename, follow_symlinks, objects): """Compute the Software Heritage persistent identifier (SWHID) for the given source code object(s). For more details about SWHIDs see: \b https://docs.softwareheritage.org/devel/swh-model/persistent-identifiers.html Tip: you can pass "-" to identify the content of standard input. \b Examples: \b $ swh identify fork.c kmod.c sched/deadline.c swh:1:cnt:2e391c754ae730bd2d8520c2ab497c403220c6e3 fork.c swh:1:cnt:0277d1216f80ae1adeed84a686ed34c9b2931fc2 kmod.c swh:1:cnt:57b939c81bce5d06fa587df8915f05affbe22b82 sched/deadline.c \b $ swh identify --no-filename /usr/src/linux/kernel/ swh:1:dir:f9f858a48d663b3809c9e2f336412717496202ab \b $ git clone --mirror https://forge.softwareheritage.org/source/helloworld.git $ swh identify --type snapshot helloworld.git/ swh:1:snp:510aa88bdc517345d258c1fc2babcd0e1f905e93 helloworld.git """ # NoQA # overlong lines in shell examples are fine from functools import partial if verify and len(objects) != 1: raise click.BadParameter("verification requires a single object") results = map(partial(identify_object, obj_type, follow_symlinks), objects) if verify: swhid = next(results)[1] if verify == swhid: click.echo("SWHID match: %s" % swhid) sys.exit(0) else: click.echo("SWHID mismatch: %s != %s" % (verify, swhid)) sys.exit(1) else: for (obj, swhid) in results: msg = swhid if show_filename: msg = "%s\t%s" % (swhid, os.fsdecode(obj)) click.echo(msg) if __name__ == "__main__": identify() diff --git a/swh/model/fields/__init__.py b/swh/model/fields/__init__.py index a5b1ed3..7e3c2fe 100644 --- a/swh/model/fields/__init__.py +++ b/swh/model/fields/__init__.py @@ -1,18 +1,18 @@ # Copyright (C) 2015 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information # We do our imports here but we don't use them, so flake8 complains # flake8: noqa +from .compound import validate_against_schema, validate_all_keys, validate_any_key +from .hashes import validate_sha1, validate_sha1_git, validate_sha256 from .simple import ( - validate_type, - validate_int, - validate_str, validate_bytes, validate_datetime, validate_enum, + validate_int, + validate_str, + validate_type, ) -from .hashes import validate_sha1, validate_sha1_git, validate_sha256 -from .compound import validate_against_schema, validate_all_keys, validate_any_key diff --git a/swh/model/fields/compound.py b/swh/model/fields/compound.py index 3133f59..90b4685 100644 --- a/swh/model/fields/compound.py +++ b/swh/model/fields/compound.py @@ -1,125 +1,125 @@ # Copyright (C) 2015 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from collections import defaultdict import itertools -from ..exceptions import ValidationError, NON_FIELD_ERRORS +from ..exceptions import NON_FIELD_ERRORS, ValidationError def validate_against_schema(model, schema, value): """Validate a value for the given model against the given schema. Args: model: the name of the model schema: the schema to validate against value: the value to validate Returns: True if the value is correct against the schema Raises: ValidationError if the value does not validate against the schema """ if not isinstance(value, dict): raise ValidationError( "Unexpected type %(type)s for %(model)s, expected dict", params={"model": model, "type": value.__class__.__name__,}, code="model-unexpected-type", ) errors = defaultdict(list) for key, (mandatory, validators) in itertools.chain( ((k, v) for k, v in schema.items() if k != NON_FIELD_ERRORS), [(NON_FIELD_ERRORS, (False, schema.get(NON_FIELD_ERRORS, [])))], ): if not validators: continue if not isinstance(validators, list): validators = [validators] validated_value = value if key != NON_FIELD_ERRORS: try: validated_value = value[key] except KeyError: if mandatory: errors[key].append( ValidationError( "Field %(field)s is mandatory", params={"field": key}, code="model-field-mandatory", ) ) continue else: if errors: # Don't validate the whole object if some fields are broken continue for validator in validators: try: valid = validator(validated_value) except ValidationError as e: errors[key].append(e) else: if not valid: errdata = { "validator": validator.__name__, } if key == NON_FIELD_ERRORS: errmsg = ( "Validation of model %(model)s failed in " "%(validator)s" ) errdata["model"] = model errcode = "model-validation-failed" else: errmsg = ( "Validation of field %(field)s failed in " "%(validator)s" ) errdata["field"] = key errcode = "field-validation-failed" errors[key].append( ValidationError(errmsg, params=errdata, code=errcode) ) if errors: raise ValidationError(dict(errors)) return True def validate_all_keys(value, keys): """Validate that all the given keys are present in value""" missing_keys = set(keys) - set(value) if missing_keys: missing_fields = ", ".join(sorted(missing_keys)) raise ValidationError( "Missing mandatory fields %(missing_fields)s", params={"missing_fields": missing_fields}, code="missing-mandatory-field", ) return True def validate_any_key(value, keys): """Validate that any of the given keys is present in value""" present_keys = set(keys) & set(value) if not present_keys: missing_fields = ", ".join(sorted(keys)) raise ValidationError( "Must contain one of the alternative fields %(missing_fields)s", params={"missing_fields": missing_fields}, code="missing-alternative-field", ) return True diff --git a/swh/model/fields/hashes.py b/swh/model/fields/hashes.py index 47e872c..9b5ee4a 100644 --- a/swh/model/fields/hashes.py +++ b/swh/model/fields/hashes.py @@ -1,115 +1,116 @@ # Copyright (C) 2015 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import string + from ..exceptions import ValidationError def validate_hash(value, hash_type): """Validate that the given value represents a hash of the given hash_type. Args: value: the value to check hash_type: the type of hash the value is representing Returns: True if the hash validates Raises: ValueError if the hash does not validate """ hash_lengths = { "sha1": 20, "sha1_git": 20, "sha256": 32, } hex_digits = set(string.hexdigits) if hash_type not in hash_lengths: raise ValidationError( "Unexpected hash type %(hash_type)s, expected one of" " %(hash_types)s", params={ "hash_type": hash_type, "hash_types": ", ".join(sorted(hash_lengths)), }, code="unexpected-hash-type", ) if isinstance(value, str): errors = [] extra_chars = set(value) - hex_digits if extra_chars: errors.append( ValidationError( "Unexpected characters `%(unexpected_chars)s' for hash " "type %(hash_type)s", params={ "unexpected_chars": ", ".join(sorted(extra_chars)), "hash_type": hash_type, }, code="unexpected-hash-contents", ) ) length = len(value) expected_length = 2 * hash_lengths[hash_type] if length != expected_length: errors.append( ValidationError( "Unexpected length %(length)d for hash type " "%(hash_type)s, expected %(expected_length)d", params={ "length": length, "expected_length": expected_length, "hash_type": hash_type, }, code="unexpected-hash-length", ) ) if errors: raise ValidationError(errors) return True if isinstance(value, bytes): length = len(value) expected_length = hash_lengths[hash_type] if length != expected_length: raise ValidationError( "Unexpected length %(length)d for hash type " "%(hash_type)s, expected %(expected_length)d", params={ "length": length, "expected_length": expected_length, "hash_type": hash_type, }, code="unexpected-hash-length", ) return True raise ValidationError( "Unexpected type %(type)s for hash, expected str or bytes", params={"type": value.__class__.__name__,}, code="unexpected-hash-value-type", ) def validate_sha1(sha1): """Validate that sha1 is a valid sha1 hash""" return validate_hash(sha1, "sha1") def validate_sha1_git(sha1_git): """Validate that sha1_git is a valid sha1_git hash""" return validate_hash(sha1_git, "sha1_git") def validate_sha256(sha256): """Validate that sha256 is a valid sha256 hash""" return validate_hash(sha256, "sha256") diff --git a/swh/model/from_disk.py b/swh/model/from_disk.py index 5ac97e2..719599d 100644 --- a/swh/model/from_disk.py +++ b/swh/model/from_disk.py @@ -1,482 +1,479 @@ # Copyright (C) 2017-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime import enum import os import stat +from typing import Any, Iterable, List, Optional, Tuple import attr from attrs_strict import type_validator -from typing import Any, Iterable, List, Optional, Tuple from typing_extensions import Final +from . import model from .hashutil import MultiHash +from .identifiers import directory_entry_sort_key, directory_identifier +from .identifiers import identifier_to_bytes as id_to_bytes +from .identifiers import identifier_to_str as id_to_str from .merkle import MerkleLeaf, MerkleNode -from .identifiers import ( - directory_entry_sort_key, - directory_identifier, - identifier_to_bytes as id_to_bytes, - identifier_to_str as id_to_str, -) -from . import model @attr.s class DiskBackedContent(model.BaseContent): """Content-like class, which allows lazy-loading data from the disk.""" object_type: Final = "content_file" sha1 = attr.ib(type=bytes, validator=type_validator()) sha1_git = attr.ib(type=model.Sha1Git, validator=type_validator()) sha256 = attr.ib(type=bytes, validator=type_validator()) blake2s256 = attr.ib(type=bytes, validator=type_validator()) length = attr.ib(type=int, validator=type_validator()) status = attr.ib( type=str, validator=attr.validators.in_(["visible", "hidden"]), default="visible", ) ctime = attr.ib( type=Optional[datetime.datetime], validator=type_validator(), default=None, eq=False, ) path = attr.ib(type=Optional[bytes], default=None) @classmethod def from_dict(cls, d): return cls(**d) def __attrs_post_init__(self): if self.path is None: raise TypeError("path must not be None.") def with_data(self) -> model.Content: args = self.to_dict() del args["path"] assert self.path is not None with open(self.path, "rb") as fd: return model.Content.from_dict({**args, "data": fd.read()}) class DentryPerms(enum.IntEnum): """Admissible permissions for directory entries.""" content = 0o100644 """Content""" executable_content = 0o100755 """Executable content (e.g. executable script)""" symlink = 0o120000 """Symbolic link""" directory = 0o040000 """Directory""" revision = 0o160000 """Revision (e.g. submodule)""" def mode_to_perms(mode): """Convert a file mode to a permission compatible with Software Heritage directory entries Args: mode (int): a file mode as returned by :func:`os.stat` in :attr:`os.stat_result.st_mode` Returns: DentryPerms: one of the following values: :const:`DentryPerms.content`: plain file :const:`DentryPerms.executable_content`: executable file :const:`DentryPerms.symlink`: symbolic link :const:`DentryPerms.directory`: directory """ if stat.S_ISLNK(mode): return DentryPerms.symlink if stat.S_ISDIR(mode): return DentryPerms.directory else: # file is executable in any way if mode & (0o111): return DentryPerms.executable_content else: return DentryPerms.content class Content(MerkleLeaf): """Representation of a Software Heritage content as a node in a Merkle tree. The current Merkle hash for the Content nodes is the `sha1_git`, which makes it consistent with what :class:`Directory` uses for its own hash computation. """ __slots__ = [] # type: List[str] object_type: Final = "content" @classmethod def from_bytes(cls, *, mode, data): """Convert data (raw :class:`bytes`) to a Software Heritage content entry Args: mode (int): a file mode (passed to :func:`mode_to_perms`) data (bytes): raw contents of the file """ ret = MultiHash.from_data(data).digest() ret["length"] = len(data) ret["perms"] = mode_to_perms(mode) ret["data"] = data ret["status"] = "visible" return cls(ret) @classmethod def from_symlink(cls, *, path, mode): """Convert a symbolic link to a Software Heritage content entry""" return cls.from_bytes(mode=mode, data=os.readlink(path)) @classmethod def from_file(cls, *, path, max_content_length=None): """Compute the Software Heritage content entry corresponding to an on-disk file. The returned dictionary contains keys useful for both: - loading the content in the archive (hashes, `length`) - using the content as a directory entry in a directory Args: save_path (bool): add the file path to the entry max_content_length (Optional[int]): if given, all contents larger than this will be skipped. """ file_stat = os.lstat(path) mode = file_stat.st_mode length = file_stat.st_size too_large = max_content_length is not None and length > max_content_length if stat.S_ISLNK(mode): # Symbolic link: return a file whose contents are the link target if too_large: # Unlike large contents, we can't stream symlinks to # MultiHash, and we don't want to fit them in memory if # they exceed max_content_length either. # Thankfully, this should not happen for reasonable values of # max_content_length because of OS/filesystem limitations, # so let's just raise an error. raise Exception(f"Symlink too large ({length} bytes)") return cls.from_symlink(path=path, mode=mode) elif not stat.S_ISREG(mode): # not a regular file: return the empty file instead return cls.from_bytes(mode=mode, data=b"") if too_large: skip_reason = "Content too large" else: skip_reason = None hashes = MultiHash.from_path(path).digest() if skip_reason: ret = { **hashes, "status": "absent", "reason": skip_reason, } else: ret = { **hashes, "status": "visible", } ret["path"] = path ret["perms"] = mode_to_perms(mode) ret["length"] = length obj = cls(ret) return obj def __repr__(self): return "Content(id=%s)" % id_to_str(self.hash) def compute_hash(self): return self.data["sha1_git"] def to_model(self) -> model.BaseContent: """Builds a `model.BaseContent` object based on this leaf.""" data = self.get_data().copy() data.pop("perms", None) if data["status"] == "absent": data.pop("path", None) return model.SkippedContent.from_dict(data) elif "data" in data: return model.Content.from_dict(data) else: return DiskBackedContent.from_dict(data) def accept_all_directories(dirpath: str, dirname: str, entries: Iterable[Any]) -> bool: """Default filter for :func:`Directory.from_disk` accepting all directories Args: dirname (bytes): directory name entries (list): directory entries """ return True def ignore_empty_directories( dirpath: str, dirname: str, entries: Iterable[Any] ) -> bool: """Filter for :func:`directory_to_objects` ignoring empty directories Args: dirname (bytes): directory name entries (list): directory entries Returns: True if the directory is not empty, false if the directory is empty """ return bool(entries) def ignore_named_directories(names, *, case_sensitive=True): """Filter for :func:`directory_to_objects` to ignore directories named one of names. Args: names (list of bytes): names to ignore case_sensitive (bool): whether to do the filtering in a case sensitive way Returns: a directory filter for :func:`directory_to_objects` """ if not case_sensitive: names = [name.lower() for name in names] def named_filter( dirpath: str, dirname: str, entries: Iterable[Any], names: Iterable[Any] = names, case_sensitive: bool = case_sensitive, ): if case_sensitive: return dirname not in names else: return dirname.lower() not in names return named_filter def iter_directory( directory, ) -> Tuple[List[model.Content], List[model.SkippedContent], List[model.Directory]]: """Return the directory listing from a disk-memory directory instance. Raises: TypeError in case an unexpected object type is listed. Returns: Tuple of respectively iterable of content, skipped content and directories. """ contents: List[model.Content] = [] skipped_contents: List[model.SkippedContent] = [] directories: List[model.Directory] = [] for obj in directory.iter_tree(): obj = obj.to_model() obj_type = obj.object_type if obj_type in (model.Content.object_type, DiskBackedContent.object_type): # FIXME: read the data from disk later (when the # storage buffer is flushed). obj = obj.with_data() contents.append(obj) elif obj_type == model.SkippedContent.object_type: skipped_contents.append(obj) elif obj_type == model.Directory.object_type: directories.append(obj) else: raise TypeError(f"Unexpected object type from disk: {obj}") return contents, skipped_contents, directories class Directory(MerkleNode): """Representation of a Software Heritage directory as a node in a Merkle Tree. This class can be used to generate, from an on-disk directory, all the objects that need to be sent to the Software Heritage archive. The :func:`from_disk` constructor allows you to generate the data structure from a directory on disk. The resulting :class:`Directory` can then be manipulated as a dictionary, using the path as key. The :func:`collect` method is used to retrieve all the objects that need to be added to the Software Heritage archive since the last collection, by class (contents and directories). When using the dict-like methods to update the contents of the directory, the affected levels of hierarchy are reset and can be collected again using the same method. This enables the efficient collection of updated nodes, for instance when the client is applying diffs. """ __slots__ = ["__entries"] object_type: Final = "directory" @classmethod def from_disk( cls, *, path, dir_filter=accept_all_directories, max_content_length=None ): """Compute the Software Heritage objects for a given directory tree Args: path (bytes): the directory to traverse data (bool): whether to add the data to the content objects save_path (bool): whether to add the path to the content objects dir_filter (function): a filter to ignore some directories by name or contents. Takes two arguments: dirname and entries, and returns True if the directory should be added, False if the directory should be ignored. max_content_length (Optional[int]): if given, all contents larger than this will be skipped. """ top_path = path dirs = {} for root, dentries, fentries in os.walk(top_path, topdown=False): entries = {} # Join fentries and dentries in the same processing, as symbolic # links to directories appear in dentries... for name in fentries + dentries: path = os.path.join(root, name) if not os.path.isdir(path) or os.path.islink(path): content = Content.from_file( path=path, max_content_length=max_content_length ) entries[name] = content else: if dir_filter(path, name, dirs[path].entries): entries[name] = dirs[path] dirs[root] = cls({"name": os.path.basename(root)}) dirs[root].update(entries) return dirs[top_path] def __init__(self, data=None): super().__init__(data=data) self.__entries = None def invalidate_hash(self): self.__entries = None super().invalidate_hash() @staticmethod def child_to_directory_entry(name, child): if child.object_type == "directory": return { "type": "dir", "perms": DentryPerms.directory, "target": child.hash, "name": name, } elif child.object_type == "content": return { "type": "file", "perms": child.data["perms"], "target": child.hash, "name": name, } else: raise ValueError(f"unknown child {child}") def get_data(self, **kwargs): return { "id": self.hash, "entries": self.entries, } @property def entries(self): """Child nodes, sorted by name in the same way `directory_identifier` does.""" if self.__entries is None: self.__entries = sorted( ( self.child_to_directory_entry(name, child) for name, child in self.items() ), key=directory_entry_sort_key, ) return self.__entries def compute_hash(self): return id_to_bytes(directory_identifier({"entries": self.entries})) def to_model(self) -> model.Directory: """Builds a `model.Directory` object based on this node; ignoring its children.""" return model.Directory.from_dict(self.get_data()) def __getitem__(self, key): if not isinstance(key, bytes): raise ValueError("Can only get a bytes from Directory") # Convenience shortcut if key == b"": return self if b"/" not in key: return super().__getitem__(key) else: key1, key2 = key.split(b"/", 1) return self.__getitem__(key1)[key2] def __setitem__(self, key, value): if not isinstance(key, bytes): raise ValueError("Can only set a bytes Directory entry") if not isinstance(value, (Content, Directory)): raise ValueError( "Can only set a Directory entry to a Content or " "Directory" ) if key == b"": raise ValueError("Directory entry must have a name") if b"\x00" in key: raise ValueError("Directory entry name must not contain nul bytes") if b"/" not in key: return super().__setitem__(key, value) else: key1, key2 = key.rsplit(b"/", 1) self[key1].__setitem__(key2, value) def __delitem__(self, key): if not isinstance(key, bytes): raise ValueError("Can only delete a bytes Directory entry") if b"/" not in key: super().__delitem__(key) else: key1, key2 = key.rsplit(b"/", 1) del self[key1][key2] def __repr__(self): return "Directory(id=%s, entries=[%s])" % ( id_to_str(self.hash), ", ".join(str(entry) for entry in self), ) diff --git a/swh/model/hashutil.py b/swh/model/hashutil.py index 954ae95..cec8778 100644 --- a/swh/model/hashutil.py +++ b/swh/model/hashutil.py @@ -1,363 +1,362 @@ # Copyright (C) 2015-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information """Module in charge of hashing function definitions. This is the base module use to compute swh's hashes. Only a subset of hashing algorithms is supported as defined in the ALGORITHMS set. Any provided algorithms not in that list will result in a ValueError explaining the error. This module defines a MultiHash class to ease the softwareheritage hashing algorithms computation. This allows to compute hashes from file object, path, data using a similar interface as what the standard hashlib module provides. Basic usage examples: - file object: MultiHash.from_file( file_object, hash_names=DEFAULT_ALGORITHMS).digest() - path (filepath): MultiHash.from_path(b'foo').hexdigest() - data (bytes): MultiHash.from_data(b'foo').bytehexdigest() "Complex" usage, defining a swh hashlib instance first: - To compute length, integrate the length to the set of algorithms to compute, for example: .. code-block:: python h = MultiHash(hash_names=set({'length'}).union(DEFAULT_ALGORITHMS)) with open(filepath, 'rb') as f: h.update(f.read(HASH_BLOCK_SIZE)) hashes = h.digest() # returns a dict of {hash_algo_name: hash_in_bytes} - Write alongside computing hashing algorithms (from a stream), example: .. code-block:: python h = MultiHash(length=length) with open(filepath, 'wb') as f: for chunk in r.iter_content(): # r a stream of sort h.update(chunk) f.write(chunk) hashes = h.hexdigest() # returns a dict of {hash_algo_name: hash_in_hex} """ import binascii import functools import hashlib -import os - from io import BytesIO +import os from typing import Callable, Dict ALGORITHMS = set(["sha1", "sha256", "sha1_git", "blake2s256", "blake2b512"]) """Hashing algorithms supported by this module""" DEFAULT_ALGORITHMS = set(["sha1", "sha256", "sha1_git", "blake2s256"]) """Algorithms computed by default when calling the functions from this module. Subset of :const:`ALGORITHMS`. """ HASH_BLOCK_SIZE = 32768 """Block size for streaming hash computations made in this module""" _blake2_hash_cache = {} # type: Dict[str, Callable] class MultiHash: """Hashutil class to support multiple hashes computation. Args: hash_names (set): Set of hash algorithms (+ optionally length) to compute hashes (cf. DEFAULT_ALGORITHMS) length (int): Length of the total sum of chunks to read If the length is provided as algorithm, the length is also computed and returned. """ def __init__(self, hash_names=DEFAULT_ALGORITHMS, length=None): self.state = {} self.track_length = False for name in hash_names: if name == "length": self.state["length"] = 0 self.track_length = True else: self.state[name] = _new_hash(name, length) @classmethod def from_state(cls, state, track_length): ret = cls([]) ret.state = state ret.track_length = track_length @classmethod def from_file(cls, fobj, hash_names=DEFAULT_ALGORITHMS, length=None): ret = cls(length=length, hash_names=hash_names) while True: chunk = fobj.read(HASH_BLOCK_SIZE) if not chunk: break ret.update(chunk) return ret @classmethod def from_path(cls, path, hash_names=DEFAULT_ALGORITHMS): length = os.path.getsize(path) with open(path, "rb") as f: ret = cls.from_file(f, hash_names=hash_names, length=length) return ret @classmethod def from_data(cls, data, hash_names=DEFAULT_ALGORITHMS): length = len(data) fobj = BytesIO(data) return cls.from_file(fobj, hash_names=hash_names, length=length) def update(self, chunk): for name, h in self.state.items(): if name == "length": continue h.update(chunk) if self.track_length: self.state["length"] += len(chunk) def digest(self): return { name: h.digest() if name != "length" else h for name, h in self.state.items() } def hexdigest(self): return { name: h.hexdigest() if name != "length" else h for name, h in self.state.items() } def bytehexdigest(self): return { name: hash_to_bytehex(h.digest()) if name != "length" else h for name, h in self.state.items() } def copy(self): copied_state = { name: h.copy() if name != "length" else h for name, h in self.state.items() } return self.from_state(copied_state, self.track_length) def _new_blake2_hash(algo): """Return a function that initializes a blake2 hash. """ if algo in _blake2_hash_cache: return _blake2_hash_cache[algo]() lalgo = algo.lower() if not lalgo.startswith("blake2"): raise ValueError("Algorithm %s is not a blake2 hash" % algo) blake_family = lalgo[:7] digest_size = None if lalgo[7:]: try: digest_size, remainder = divmod(int(lalgo[7:]), 8) except ValueError: raise ValueError("Unknown digest size for algo %s" % algo) from None if remainder: raise ValueError( "Digest size for algorithm %s must be a multiple of 8" % algo ) if lalgo in hashlib.algorithms_available: # Handle the case where OpenSSL ships the given algorithm # (e.g. Python 3.5 on Debian 9 stretch) _blake2_hash_cache[algo] = lambda: hashlib.new(lalgo) else: # Try using the built-in implementation for Python 3.6+ if blake_family in hashlib.algorithms_available: blake2 = getattr(hashlib, blake_family) else: import pyblake2 blake2 = getattr(pyblake2, blake_family) _blake2_hash_cache[algo] = lambda: blake2(digest_size=digest_size) return _blake2_hash_cache[algo]() def _new_hashlib_hash(algo): """Initialize a digest object from hashlib. Handle the swh-specific names for the blake2-related algorithms """ if algo.startswith("blake2"): return _new_blake2_hash(algo) else: return hashlib.new(algo) def _new_git_hash(base_algo, git_type, length): """Initialize a digest object (as returned by python's hashlib) for the requested algorithm, and feed it with the header for a git object of the given type and length. The header for hashing a git object consists of: - The type of the object (encoded in ASCII) - One ASCII space (\x20) - The length of the object (decimal encoded in ASCII) - One NUL byte Args: base_algo (str from :const:`ALGORITHMS`): a hashlib-supported algorithm git_type: the type of the git object (supposedly one of 'blob', 'commit', 'tag', 'tree') length: the length of the git object you're encoding Returns: a hashutil.hash object """ h = _new_hashlib_hash(base_algo) git_header = "%s %d\0" % (git_type, length) h.update(git_header.encode("ascii")) return h def _new_hash(algo, length=None): """Initialize a digest object (as returned by python's hashlib) for the requested algorithm. See the constant ALGORITHMS for the list of supported algorithms. If a git-specific hashing algorithm is requested (e.g., "sha1_git"), the hashing object will be pre-fed with the needed header; for this to work, length must be given. Args: algo (str): a hashing algorithm (one of ALGORITHMS) length (int): the length of the hashed payload (needed for git-specific algorithms) Returns: a hashutil.hash object Raises: ValueError if algo is unknown, or length is missing for a git-specific hash. """ if algo not in ALGORITHMS: raise ValueError( "Unexpected hashing algorithm %s, expected one of %s" % (algo, ", ".join(sorted(ALGORITHMS))) ) if algo.endswith("_git"): if length is None: raise ValueError("Missing length for git hashing algorithm") base_algo = algo[:-4] return _new_git_hash(base_algo, "blob", length) return _new_hashlib_hash(algo) def hash_git_data(data, git_type, base_algo="sha1"): """Hash the given data as a git object of type git_type. Args: data: a bytes object git_type: the git object type base_algo: the base hashing algorithm used (default: sha1) Returns: a dict mapping each algorithm to a bytes digest Raises: ValueError if the git_type is unexpected. """ git_object_types = {"blob", "tree", "commit", "tag", "snapshot"} if git_type not in git_object_types: raise ValueError( "Unexpected git object type %s, expected one of %s" % (git_type, ", ".join(sorted(git_object_types))) ) h = _new_git_hash(base_algo, git_type, len(data)) h.update(data) return h.digest() @functools.lru_cache() def hash_to_hex(hash): """Converts a hash (in hex or bytes form) to its hexadecimal ascii form Args: hash (str or bytes): a :class:`bytes` hash or a :class:`str` containing the hexadecimal form of the hash Returns: str: the hexadecimal form of the hash """ if isinstance(hash, str): return hash return binascii.hexlify(hash).decode("ascii") @functools.lru_cache() def hash_to_bytehex(hash): """Converts a hash to its hexadecimal bytes representation Args: hash (bytes): a :class:`bytes` hash Returns: bytes: the hexadecimal form of the hash, as :class:`bytes` """ return binascii.hexlify(hash) @functools.lru_cache() def hash_to_bytes(hash): """Converts a hash (in hex or bytes form) to its raw bytes form Args: hash (str or bytes): a :class:`bytes` hash or a :class:`str` containing the hexadecimal form of the hash Returns: bytes: the :class:`bytes` form of the hash """ if isinstance(hash, bytes): return hash return bytes.fromhex(hash) @functools.lru_cache() def bytehex_to_hash(hex): """Converts a hexadecimal bytes representation of a hash to that hash Args: hash (bytes): a :class:`bytes` containing the hexadecimal form of the hash encoded in ascii Returns: bytes: the :class:`bytes` form of the hash """ return hash_to_bytes(hex.decode()) diff --git a/swh/model/hypothesis_strategies.py b/swh/model/hypothesis_strategies.py index 21e922e..0c54a99 100644 --- a/swh/model/hypothesis_strategies.py +++ b/swh/model/hypothesis_strategies.py @@ -1,469 +1,468 @@ # Copyright (C) 2019-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime from hypothesis import assume from hypothesis.extra.dateutil import timezones from hypothesis.strategies import ( binary, booleans, builds, characters, composite, datetimes, dictionaries, from_regex, integers, just, lists, none, one_of, sampled_from, sets, text, tuples, ) from .from_disk import DentryPerms +from .identifiers import identifier_to_bytes, snapshot_identifier from .model import ( - Person, - Timestamp, - TimestampWithTimezone, + BaseContent, + Content, + Directory, + DirectoryEntry, + ObjectType, Origin, OriginVisit, OriginVisitStatus, - Snapshot, - SnapshotBranch, - ObjectType, - TargetType, + Person, Release, Revision, RevisionType, - BaseContent, - Directory, - DirectoryEntry, - Content, SkippedContent, + Snapshot, + SnapshotBranch, + TargetType, + Timestamp, + TimestampWithTimezone, ) -from .identifiers import snapshot_identifier, identifier_to_bytes - pgsql_alphabet = characters( blacklist_categories=("Cs",), blacklist_characters=["\u0000"] ) # postgresql does not like these def optional(strategy): return one_of(none(), strategy) def pgsql_text(): return text(alphabet=pgsql_alphabet) def sha1_git(): return binary(min_size=20, max_size=20) def sha1(): return binary(min_size=20, max_size=20) def aware_datetimes(): # datetimes in Software Heritage are not used for software artifacts # (which may be much older than 2000), but only for objects like scheduler # task runs, and origin visits, which were created by Software Heritage, # so at least in 2015. # We're forbidding old datetimes, because until 1956, many timezones had seconds # in their "UTC offsets" (see # ), which is not # encodable in ISO8601; and we need our datetimes to be ISO8601-encodable in the # RPC protocol min_value = datetime.datetime(2000, 1, 1, 0, 0, 0) return datetimes(min_value=min_value, timezones=timezones()) @composite def urls(draw): protocol = draw(sampled_from(["git", "http", "https", "deb"])) domain = draw(from_regex(r"\A([a-z]([a-z0-9-]*)\.){1,3}[a-z0-9]+\Z")) return "%s://%s" % (protocol, domain) @composite def persons_d(draw): fullname = draw(binary()) email = draw(optional(binary())) name = draw(optional(binary())) assume(not (len(fullname) == 32 and email is None and name is None)) return dict(fullname=fullname, name=name, email=email) def persons(): return persons_d().map(Person.from_dict) def timestamps_d(): max_seconds = datetime.datetime.max.replace( tzinfo=datetime.timezone.utc ).timestamp() min_seconds = datetime.datetime.min.replace( tzinfo=datetime.timezone.utc ).timestamp() return builds( dict, seconds=integers(min_seconds, max_seconds), microseconds=integers(0, 1000000), ) def timestamps(): return timestamps_d().map(Timestamp.from_dict) @composite def timestamps_with_timezone_d( draw, timestamp=timestamps_d(), offset=integers(min_value=-14 * 60, max_value=14 * 60), negative_utc=booleans(), ): timestamp = draw(timestamp) offset = draw(offset) negative_utc = draw(negative_utc) assume(not (negative_utc and offset)) return dict(timestamp=timestamp, offset=offset, negative_utc=negative_utc) timestamps_with_timezone = timestamps_with_timezone_d().map( TimestampWithTimezone.from_dict ) def origins_d(): return builds(dict, url=urls()) def origins(): return origins_d().map(Origin.from_dict) def origin_visits_d(): return builds( dict, visit=integers(1, 1000), origin=urls(), date=aware_datetimes(), type=pgsql_text(), ) def origin_visits(): return origin_visits_d().map(OriginVisit.from_dict) def metadata_dicts(): return dictionaries(pgsql_text(), pgsql_text()) def origin_visit_statuses_d(): return builds( dict, visit=integers(1, 1000), origin=urls(), status=sampled_from(["created", "ongoing", "full", "partial"]), date=aware_datetimes(), snapshot=optional(sha1_git()), metadata=optional(metadata_dicts()), ) def origin_visit_statuses(): return origin_visit_statuses_d().map(OriginVisitStatus.from_dict) @composite def releases_d(draw): target_type = sampled_from([x.value for x in ObjectType]) name = binary() message = optional(binary()) synthetic = booleans() target = sha1_git() metadata = optional(revision_metadata()) return draw( one_of( builds( dict, name=name, message=message, synthetic=synthetic, author=none(), date=none(), target=target, target_type=target_type, metadata=metadata, ), builds( dict, name=name, message=message, synthetic=synthetic, date=timestamps_with_timezone_d(), author=persons_d(), target=target, target_type=target_type, metadata=metadata, ), ) ) def releases(): return releases_d().map(Release.from_dict) revision_metadata = metadata_dicts def extra_headers(): return lists( tuples(binary(min_size=0, max_size=50), binary(min_size=0, max_size=500)) ).map(tuple) def revisions_d(): return builds( dict, message=optional(binary()), synthetic=booleans(), author=persons_d(), committer=persons_d(), date=timestamps_with_timezone_d(), committer_date=timestamps_with_timezone_d(), parents=tuples(sha1_git()), directory=sha1_git(), type=sampled_from([x.value for x in RevisionType]), metadata=optional(revision_metadata()), extra_headers=extra_headers(), ) # TODO: metadata['extra_headers'] can have binary keys and values def revisions(): return revisions_d().map(Revision.from_dict) def directory_entries_d(): return builds( dict, name=binary(), target=sha1_git(), type=sampled_from(["file", "dir", "rev"]), perms=sampled_from([perm.value for perm in DentryPerms]), ) def directory_entries(): return directory_entries_d().map(DirectoryEntry) def directories_d(): return builds(dict, entries=tuples(directory_entries_d())) def directories(): return directories_d().map(Directory.from_dict) def contents_d(): return one_of(present_contents_d(), skipped_contents_d()) def contents(): return one_of(present_contents(), skipped_contents()) def present_contents_d(): return builds( dict, data=binary(max_size=4096), ctime=optional(aware_datetimes()), status=one_of(just("visible"), just("hidden")), ) def present_contents(): return present_contents_d().map(lambda d: Content.from_data(**d)) @composite def skipped_contents_d(draw): result = BaseContent._hash_data(draw(binary(max_size=4096))) result.pop("data") nullify_attrs = draw( sets(sampled_from(["sha1", "sha1_git", "sha256", "blake2s256"])) ) for k in nullify_attrs: result[k] = None result["reason"] = draw(pgsql_text()) result["status"] = "absent" result["ctime"] = draw(optional(aware_datetimes())) return result def skipped_contents(): return skipped_contents_d().map(SkippedContent.from_dict) def branch_names(): return binary(min_size=1) def branch_targets_object_d(): return builds( dict, target=sha1_git(), target_type=sampled_from( [x.value for x in TargetType if x.value not in ("alias",)] ), ) def branch_targets_alias_d(): return builds( dict, target=sha1_git(), target_type=just("alias") ) # TargetType.ALIAS.value)) def branch_targets_d(*, only_objects=False): if only_objects: return branch_targets_object_d() else: return one_of(branch_targets_alias_d(), branch_targets_object_d()) def branch_targets(*, only_objects=False): return builds(SnapshotBranch.from_dict, branch_targets_d(only_objects=only_objects)) @composite def snapshots_d(draw, *, min_size=0, max_size=100, only_objects=False): branches = draw( dictionaries( keys=branch_names(), values=optional(branch_targets_d(only_objects=only_objects)), min_size=min_size, max_size=max_size, ) ) if not only_objects: # Make sure aliases point to actual branches unresolved_aliases = { branch: target["target"] for branch, target in branches.items() if ( target and target["target_type"] == "alias" and target["target"] not in branches ) } for alias_name, alias_target in unresolved_aliases.items(): # Override alias branch with one pointing to a real object # if max_size constraint is reached alias = alias_target if len(branches) < max_size else alias_name branches[alias] = draw(branch_targets_d(only_objects=True)) # Ensure no cycles between aliases while True: try: id_ = snapshot_identifier( { "branches": { name: branch or None for (name, branch) in branches.items() } } ) except ValueError as e: for (source, target) in e.args[1]: branches[source] = draw(branch_targets_d(only_objects=True)) else: break return dict(id=identifier_to_bytes(id_), branches=branches) def snapshots(*, min_size=0, max_size=100, only_objects=False): return snapshots_d( min_size=min_size, max_size=max_size, only_objects=only_objects ).map(Snapshot.from_dict) def objects(blacklist_types=("origin_visit_status",), split_content=False): """generates a random couple (type, obj) which obj is an instance of the Model class corresponding to obj_type. `blacklist_types` is a list of obj_type to exclude from the strategy. If `split_content` is True, generates Content and SkippedContent under different obj_type, resp. "content" and "skipped_content". """ strategies = [ ("origin", origins), ("origin_visit", origin_visits), ("origin_visit_status", origin_visit_statuses), ("snapshot", snapshots), ("release", releases), ("revision", revisions), ("directory", directories), ] if split_content: strategies.append(("content", present_contents)) strategies.append(("skipped_content", skipped_contents)) else: strategies.append(("content", contents)) args = [ obj_gen().map(lambda x, obj_type=obj_type: (obj_type, x)) for (obj_type, obj_gen) in strategies if obj_type not in blacklist_types ] return one_of(*args) def object_dicts(blacklist_types=("origin_visit_status",), split_content=False): """generates a random couple (type, dict) which dict is suitable for .from_dict() factory methods. `blacklist_types` is a list of obj_type to exclude from the strategy. If `split_content` is True, generates Content and SkippedContent under different obj_type, resp. "content" and "skipped_content". """ strategies = [ ("origin", origins_d), ("origin_visit", origin_visits_d), ("origin_visit_status", origin_visit_statuses_d), ("snapshot", snapshots_d), ("release", releases_d), ("revision", revisions_d), ("directory", directories_d), ] if split_content: strategies.append(("content", present_contents_d)) strategies.append(("skipped_content", skipped_contents_d)) else: strategies.append(("content", contents_d)) args = [ obj_gen().map(lambda x, obj_type=obj_type: (obj_type, x)) for (obj_type, obj_gen) in strategies if obj_type not in blacklist_types ] return one_of(*args) diff --git a/swh/model/identifiers.py b/swh/model/identifiers.py index e1cf0df..63d028d 100644 --- a/swh/model/identifiers.py +++ b/swh/model/identifiers.py @@ -1,862 +1,860 @@ # Copyright (C) 2015-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import binascii import datetime -import hashlib - from functools import lru_cache +import hashlib from typing import Any, Dict, Union import attr from deprecated import deprecated from .collections import ImmutableDict from .exceptions import ValidationError from .fields.hashes import validate_sha1 -from .hashutil import hash_git_data, hash_to_hex, MultiHash - +from .hashutil import MultiHash, hash_git_data, hash_to_hex ORIGIN = "origin" SNAPSHOT = "snapshot" REVISION = "revision" RELEASE = "release" DIRECTORY = "directory" CONTENT = "content" SWHID_NAMESPACE = "swh" SWHID_VERSION = 1 SWHID_TYPES = ["ori", "snp", "rel", "rev", "dir", "cnt"] SWHID_SEP = ":" SWHID_CTXT_SEP = ";" # deprecated variables PID_NAMESPACE = SWHID_NAMESPACE PID_VERSION = SWHID_VERSION PID_TYPES = SWHID_TYPES PID_SEP = SWHID_SEP PID_CTXT_SEP = SWHID_CTXT_SEP @lru_cache() def identifier_to_bytes(identifier): """Convert a text identifier to bytes. Args: identifier: an identifier, either a 40-char hexadecimal string or a bytes object of length 20 Returns: The length 20 bytestring corresponding to the given identifier Raises: ValueError: if the identifier is of an unexpected type or length. """ if isinstance(identifier, bytes): if len(identifier) != 20: raise ValueError( "Wrong length for bytes identifier %s, expected 20" % len(identifier) ) return identifier if isinstance(identifier, str): if len(identifier) != 40: raise ValueError( "Wrong length for str identifier %s, expected 40" % len(identifier) ) return bytes.fromhex(identifier) raise ValueError( "Wrong type for identifier %s, expected bytes or str" % identifier.__class__.__name__ ) @lru_cache() def identifier_to_str(identifier): """Convert an identifier to an hexadecimal string. Args: identifier: an identifier, either a 40-char hexadecimal string or a bytes object of length 20 Returns: The length 40 string corresponding to the given identifier, hex encoded Raises: ValueError: if the identifier is of an unexpected type or length. """ if isinstance(identifier, str): if len(identifier) != 40: raise ValueError( "Wrong length for str identifier %s, expected 40" % len(identifier) ) return identifier if isinstance(identifier, bytes): if len(identifier) != 20: raise ValueError( "Wrong length for bytes identifier %s, expected 20" % len(identifier) ) return binascii.hexlify(identifier).decode() raise ValueError( "Wrong type for identifier %s, expected bytes or str" % identifier.__class__.__name__ ) def content_identifier(content): """Return the intrinsic identifier for a content. A content's identifier is the sha1, sha1_git and sha256 checksums of its data. Args: content: a content conforming to the Software Heritage schema Returns: A dictionary with all the hashes for the data Raises: KeyError: if the content doesn't have a data member. """ return MultiHash.from_data(content["data"]).digest() def directory_entry_sort_key(entry): """The sorting key for tree entries""" if entry["type"] == "dir": return entry["name"] + b"/" else: return entry["name"] @lru_cache() def _perms_to_bytes(perms): """Convert the perms value to its bytes representation""" oc = oct(perms)[2:] return oc.encode("ascii") def escape_newlines(snippet): """Escape the newlines present in snippet according to git rules. New lines in git manifests are escaped by indenting the next line by one space. """ if b"\n" in snippet: return b"\n ".join(snippet.split(b"\n")) else: return snippet def directory_identifier(directory): """Return the intrinsic identifier for a directory. A directory's identifier is the tree sha1 à la git of a directory listing, using the following algorithm, which is equivalent to the git algorithm for trees: 1. Entries of the directory are sorted using the name (or the name with '/' appended for directory entries) as key, in bytes order. 2. For each entry of the directory, the following bytes are output: - the octal representation of the permissions for the entry (stored in the 'perms' member), which is a representation of the entry type: - b'100644' (int 33188) for files - b'100755' (int 33261) for executable files - b'120000' (int 40960) for symbolic links - b'40000' (int 16384) for directories - b'160000' (int 57344) for references to revisions - an ascii space (b'\x20') - the entry's name (as raw bytes), stored in the 'name' member - a null byte (b'\x00') - the 20 byte long identifier of the object pointed at by the entry, stored in the 'target' member: - for files or executable files: their blob sha1_git - for symbolic links: the blob sha1_git of a file containing the link destination - for directories: their intrinsic identifier - for revisions: their intrinsic identifier (Note that there is no separator between entries) """ components = [] for entry in sorted(directory["entries"], key=directory_entry_sort_key): components.extend( [ _perms_to_bytes(entry["perms"]), b"\x20", entry["name"], b"\x00", identifier_to_bytes(entry["target"]), ] ) return identifier_to_str(hash_git_data(b"".join(components), "tree")) def format_date(date): """Convert a date object into an UTC timestamp encoded as ascii bytes. Git stores timestamps as an integer number of seconds since the UNIX epoch. However, Software Heritage stores timestamps as an integer number of microseconds (postgres type "datetime with timezone"). Therefore, we print timestamps with no microseconds as integers, and timestamps with microseconds as floating point values. We elide the trailing zeroes from microsecond values, to "future-proof" our representation if we ever need more precision in timestamps. """ if not isinstance(date, dict): raise ValueError("format_date only supports dicts, %r received" % date) seconds = date.get("seconds", 0) microseconds = date.get("microseconds", 0) if not microseconds: return str(seconds).encode() else: float_value = "%d.%06d" % (seconds, microseconds) return float_value.rstrip("0").encode() @lru_cache() def format_offset(offset, negative_utc=None): """Convert an integer number of minutes into an offset representation. The offset representation is [+-]hhmm where: - hh is the number of hours; - mm is the number of minutes. A null offset is represented as +0000. """ if offset < 0 or offset == 0 and negative_utc: sign = "-" else: sign = "+" hours = abs(offset) // 60 minutes = abs(offset) % 60 t = "%s%02d%02d" % (sign, hours, minutes) return t.encode() def normalize_timestamp(time_representation): """Normalize a time representation for processing by Software Heritage This function supports a numeric timestamp (representing a number of seconds since the UNIX epoch, 1970-01-01 at 00:00 UTC), a :obj:`datetime.datetime` object (with timezone information), or a normalized Software Heritage time representation (idempotency). Args: time_representation: the representation of a timestamp Returns: dict: a normalized dictionary with three keys: - timestamp: a dict with two optional keys: - seconds: the integral number of seconds since the UNIX epoch - microseconds: the integral number of microseconds - offset: the timezone offset as a number of minutes relative to UTC - negative_utc: a boolean representing whether the offset is -0000 when offset = 0. """ if time_representation is None: return None negative_utc = False if isinstance(time_representation, dict): ts = time_representation["timestamp"] if isinstance(ts, dict): seconds = ts.get("seconds", 0) microseconds = ts.get("microseconds", 0) elif isinstance(ts, int): seconds = ts microseconds = 0 else: raise ValueError( "normalize_timestamp received non-integer timestamp member:" " %r" % ts ) offset = time_representation["offset"] if "negative_utc" in time_representation: negative_utc = time_representation["negative_utc"] if negative_utc is None: negative_utc = False elif isinstance(time_representation, datetime.datetime): seconds = int(time_representation.timestamp()) microseconds = time_representation.microsecond utcoffset = time_representation.utcoffset() if utcoffset is None: raise ValueError( "normalize_timestamp received datetime without timezone: %s" % time_representation ) # utcoffset is an integer number of minutes seconds_offset = utcoffset.total_seconds() offset = int(seconds_offset) // 60 elif isinstance(time_representation, int): seconds = time_representation microseconds = 0 offset = 0 else: raise ValueError( "normalize_timestamp received non-integer timestamp:" " %r" % time_representation ) return { "timestamp": {"seconds": seconds, "microseconds": microseconds,}, "offset": offset, "negative_utc": negative_utc, } def format_author(author): """Format the specification of an author. An author is either a byte string (passed unchanged), or a dict with three keys, fullname, name and email. If the fullname exists, return it; if it doesn't, we construct a fullname using the following heuristics: if the name value is None, we return the email in angle brackets, else, we return the name, a space, and the email in angle brackets. """ if isinstance(author, bytes) or author is None: return author if "fullname" in author: return author["fullname"] ret = [] if author["name"] is not None: ret.append(author["name"]) if author["email"] is not None: ret.append(b"".join([b"<", author["email"], b">"])) return b" ".join(ret) def format_author_line(header, author, date_offset): """Format a an author line according to git standards. An author line has three components: - a header, describing the type of author (author, committer, tagger) - a name and email, which is an arbitrary bytestring - optionally, a timestamp with UTC offset specification The author line is formatted thus:: `header` `name and email`[ `timestamp` `utc_offset`] The timestamp is encoded as a (decimal) number of seconds since the UNIX epoch (1970-01-01 at 00:00 UTC). As an extension to the git format, we support fractional timestamps, using a dot as the separator for the decimal part. The utc offset is a number of minutes encoded as '[+-]HHMM'. Note some tools can pass a negative offset corresponding to the UTC timezone ('-0000'), which is valid and is encoded as such. For convenience, this function returns the whole line with its trailing newline. Args: header: the header of the author line (one of 'author', 'committer', 'tagger') author: an author specification (dict with two bytes values: name and email, or byte value) date_offset: a normalized date/time representation as returned by :func:`normalize_timestamp`. Returns: the newline-terminated byte string containing the author line """ ret = [header.encode(), b" ", escape_newlines(format_author(author))] date_offset = normalize_timestamp(date_offset) if date_offset is not None: date_f = format_date(date_offset["timestamp"]) offset_f = format_offset(date_offset["offset"], date_offset["negative_utc"]) ret.extend([b" ", date_f, b" ", offset_f]) ret.append(b"\n") return b"".join(ret) def revision_identifier(revision): """Return the intrinsic identifier for a revision. The fields used for the revision identifier computation are: - directory - parents - author - author_date - committer - committer_date - extra_headers or metadata -> extra_headers - message A revision's identifier is the 'git'-checksum of a commit manifest constructed as follows (newlines are a single ASCII newline character):: tree [for each parent in parents] parent [end for each parents] author committer [for each key, value in extra_headers] [end for each extra_headers] The directory identifier is the ascii representation of its hexadecimal encoding. Author and committer are formatted with the :func:`format_author` function. Dates are formatted with the :func:`format_offset` function. Extra headers are an ordered list of [key, value] pairs. Keys are strings and get encoded to utf-8 for identifier computation. Values are either byte strings, unicode strings (that get encoded to utf-8), or integers (that get encoded to their utf-8 decimal representation). Multiline extra header values are escaped by indenting the continuation lines with one ascii space. If the message is None, the manifest ends with the last header. Else, the message is appended to the headers after an empty line. The checksum of the full manifest is computed using the 'commit' git object type. """ components = [ b"tree ", identifier_to_str(revision["directory"]).encode(), b"\n", ] for parent in revision["parents"]: if parent: components.extend( [b"parent ", identifier_to_str(parent).encode(), b"\n",] ) components.extend( [ format_author_line("author", revision["author"], revision["date"]), format_author_line( "committer", revision["committer"], revision["committer_date"] ), ] ) # Handle extra headers metadata = revision.get("metadata") or {} extra_headers = revision.get("extra_headers", ()) if not extra_headers and "extra_headers" in metadata: extra_headers = metadata["extra_headers"] for key, value in extra_headers: components.extend([key, b" ", escape_newlines(value), b"\n"]) if revision["message"] is not None: components.extend([b"\n", revision["message"]]) commit_raw = b"".join(components) return identifier_to_str(hash_git_data(commit_raw, "commit")) def target_type_to_git(target_type): """Convert a software heritage target type to a git object type""" return { "content": b"blob", "directory": b"tree", "revision": b"commit", "release": b"tag", "snapshot": b"refs", }[target_type] def release_identifier(release): """Return the intrinsic identifier for a release.""" components = [ b"object ", identifier_to_str(release["target"]).encode(), b"\n", b"type ", target_type_to_git(release["target_type"]), b"\n", b"tag ", release["name"], b"\n", ] if "author" in release and release["author"]: components.append( format_author_line("tagger", release["author"], release["date"]) ) if release["message"] is not None: components.extend([b"\n", release["message"]]) return identifier_to_str(hash_git_data(b"".join(components), "tag")) def snapshot_identifier(snapshot, *, ignore_unresolved=False): """Return the intrinsic identifier for a snapshot. Snapshots are a set of named branches, which are pointers to objects at any level of the Software Heritage DAG. As well as pointing to other objects in the Software Heritage DAG, branches can also be *alias*es, in which case their target is the name of another branch in the same snapshot, or *dangling*, in which case the target is unknown (and represented by the ``None`` value). A snapshot identifier is a salted sha1 (using the git hashing algorithm with the ``snapshot`` object type) of a manifest following the algorithm: 1. Branches are sorted using the name as key, in bytes order. 2. For each branch, the following bytes are output: - the type of the branch target: - ``content``, ``directory``, ``revision``, ``release`` or ``snapshot`` for the corresponding entries in the DAG; - ``alias`` for branches referencing another branch; - ``dangling`` for dangling branches - an ascii space (``\\x20``) - the branch name (as raw bytes) - a null byte (``\\x00``) - the length of the target identifier, as an ascii-encoded decimal number (``20`` for current intrinsic identifiers, ``0`` for dangling branches, the length of the target branch name for branch aliases) - a colon (``:``) - the identifier of the target object pointed at by the branch, stored in the 'target' member: - for contents: their *sha1_git* - for directories, revisions, releases or snapshots: their intrinsic identifier - for branch aliases, the name of the target branch (as raw bytes) - for dangling branches, the empty string Note that, akin to directory manifests, there is no separator between entries. Because of symbolic branches, identifiers are of arbitrary length but are length-encoded to avoid ambiguity. Args: snapshot (dict): the snapshot of which to compute the identifier. A single entry is needed, ``'branches'``, which is itself a :class:`dict` mapping each branch to its target ignore_unresolved (bool): if `True`, ignore unresolved branch aliases. Returns: str: the intrinsic identifier for `snapshot` """ unresolved = [] lines = [] for name, target in sorted(snapshot["branches"].items()): if not target: target_type = b"dangling" target_id = b"" elif target["target_type"] == "alias": target_type = b"alias" target_id = target["target"] if target_id not in snapshot["branches"] or target_id == name: unresolved.append((name, target_id)) else: target_type = target["target_type"].encode() target_id = identifier_to_bytes(target["target"]) lines.extend( [ target_type, b"\x20", name, b"\x00", ("%d:" % len(target_id)).encode(), target_id, ] ) if unresolved and not ignore_unresolved: raise ValueError( "Branch aliases unresolved: %s" % ", ".join("%s -> %s" % x for x in unresolved), unresolved, ) return identifier_to_str(hash_git_data(b"".join(lines), "snapshot")) def origin_identifier(origin): """Return the intrinsic identifier for an origin. An origin's identifier is the sha1 checksum of the entire origin URL """ return hashlib.sha1(origin["url"].encode("utf-8")).hexdigest() _object_type_map = { ORIGIN: {"short_name": "ori", "key_id": "id"}, SNAPSHOT: {"short_name": "snp", "key_id": "id"}, RELEASE: {"short_name": "rel", "key_id": "id"}, REVISION: {"short_name": "rev", "key_id": "id"}, DIRECTORY: {"short_name": "dir", "key_id": "id"}, CONTENT: {"short_name": "cnt", "key_id": "sha1_git"}, } @attr.s(frozen=True) class SWHID: """ Named tuple holding the relevant info associated to a SoftWare Heritage persistent IDentifier (SWHID) Args: namespace (str): the namespace of the identifier, defaults to ``swh`` scheme_version (int): the scheme version of the identifier, defaults to 1 object_type (str): the type of object the identifier points to, either ``content``, ``directory``, ``release``, ``revision`` or ``snapshot`` object_id (str): object's identifier metadata (dict): optional dict filled with metadata related to pointed object Raises: swh.model.exceptions.ValidationError: In case of invalid object type or id Once created, it contains the following attributes: Attributes: namespace (str): the namespace of the identifier scheme_version (int): the scheme version of the identifier object_type (str): the type of object the identifier points to object_id (str): hexadecimal representation of the object hash metadata (dict): metadata related to the pointed object To get the raw SWHID string from an instance of this named tuple, use the :func:`str` function:: swhid = SWHID( object_type='content', object_id='8ff44f081d43176474b267de5451f2c2e88089d0' ) swhid_str = str(swhid) # 'swh:1:cnt:8ff44f081d43176474b267de5451f2c2e88089d0' """ namespace = attr.ib(type=str, default="swh") scheme_version = attr.ib(type=int, default=1) object_type = attr.ib(type=str, default="") object_id = attr.ib(type=str, converter=hash_to_hex, default="") # type: ignore metadata = attr.ib( type=ImmutableDict[str, Any], converter=ImmutableDict, default=ImmutableDict() ) @namespace.validator def check_namespace(self, attribute, value): if value != SWHID_NAMESPACE: raise ValidationError( "Wrong format: only supported namespace is '%s'" % SWHID_NAMESPACE ) @scheme_version.validator def check_scheme_version(self, attribute, value): if value != SWHID_VERSION: raise ValidationError( "Wrong format: only supported version is %d" % SWHID_VERSION ) @object_type.validator def check_object_type(self, attribute, value): if value not in _object_type_map: raise ValidationError( "Wrong input: Supported types are %s" % (list(_object_type_map.keys())) ) @object_id.validator def check_object_id(self, attribute, value): validate_sha1(value) # can raise if invalid hash def to_dict(self) -> Dict[str, Any]: return attr.asdict(self) def __str__(self) -> str: o = _object_type_map.get(self.object_type) assert o swhid = SWHID_SEP.join( [self.namespace, str(self.scheme_version), o["short_name"], self.object_id] ) if self.metadata: for k, v in self.metadata.items(): swhid += "%s%s=%s" % (SWHID_CTXT_SEP, k, v) return swhid @deprecated("Use swh.model.identifiers.SWHID instead") class PersistentId(SWHID): """ Named tuple holding the relevant info associated to a SoftWare Heritage persistent IDentifier. .. deprecated:: 0.3.8 Use :class:`swh.model.identifiers.SWHID` instead """ pass def swhid( object_type: str, object_id: Union[str, Dict[str, Any]], scheme_version: int = 1, metadata: Union[ImmutableDict[str, Any], Dict[str, Any]] = ImmutableDict(), ) -> str: """Compute :ref:`persistent-identifiers` Args: object_type: object's type, either ``content``, ``directory``, ``release``, ``revision`` or ``snapshot`` object_id: object's identifier scheme_version: SWHID scheme version, defaults to 1 metadata: metadata related to the pointed object Raises: swh.model.exceptions.ValidationError: In case of invalid object type or id Returns: the SWHID of the object """ if isinstance(object_id, dict): o = _object_type_map[object_type] object_id = object_id[o["key_id"]] swhid = SWHID( scheme_version=scheme_version, object_type=object_type, object_id=object_id, metadata=metadata, # type: ignore # mypy can't properly unify types ) return str(swhid) @deprecated("Use swh.model.identifiers.swhid instead") def persistent_identifier(*args, **kwargs) -> str: """Compute :ref:`persistent-identifiers` .. deprecated:: 0.3.8 Use :func:`swh.model.identifiers.swhid` instead """ return swhid(*args, **kwargs) def parse_swhid(swhid: str) -> SWHID: """Parse :ref:`persistent-identifiers`. Args: swhid (str): A persistent identifier Raises: swh.model.exceptions.ValidationError: in case of: * missing mandatory values (4) * invalid namespace supplied * invalid version supplied * invalid type supplied * missing hash * invalid hash identifier supplied Returns: a named tuple holding the parsing result """ # ; swhid_parts = swhid.split(SWHID_CTXT_SEP) swhid_data = swhid_parts.pop(0).split(":") if len(swhid_data) != 4: raise ValidationError("Wrong format: There should be 4 mandatory values") # Checking for parsing errors _ns, _version, _type, _id = swhid_data for otype, data in _object_type_map.items(): if _type == data["short_name"]: _type = otype break if not _id: raise ValidationError("Wrong format: Identifier should be present") _metadata = {} for part in swhid_parts: try: key, val = part.split("=") _metadata[key] = val except Exception: msg = "Contextual data is badly formatted, form key=val expected" raise ValidationError(msg) return SWHID( _ns, int(_version), _type, _id, _metadata, # type: ignore # mypy can't properly unify types ) @deprecated("Use swh.model.identifiers.parse_swhid instead") def parse_persistent_identifier(persistent_id: str) -> PersistentId: """Parse :ref:`persistent-identifiers`. .. deprecated:: 0.3.8 Use :func:`swh.model.identifiers.parse_swhid` instead """ return PersistentId(**parse_swhid(persistent_id).to_dict()) diff --git a/swh/model/merkle.py b/swh/model/merkle.py index 0311d9d..e84ef9d 100644 --- a/swh/model/merkle.py +++ b/swh/model/merkle.py @@ -1,312 +1,311 @@ # Copyright (C) 2017-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information """Merkle tree data structure""" import abc from collections.abc import Mapping - from typing import Iterator, List, Set def deep_update(left, right): """Recursively update the left mapping with deeply nested values from the right mapping. This function is useful to merge the results of several calls to :func:`MerkleNode.collect`. Arguments: left: a mapping (modified by the update operation) right: a mapping Returns: the left mapping, updated with nested values from the right mapping Example: >>> a = { ... 'key1': { ... 'key2': { ... 'key3': 'value1/2/3', ... }, ... }, ... } >>> deep_update(a, { ... 'key1': { ... 'key2': { ... 'key4': 'value1/2/4', ... }, ... }, ... }) == { ... 'key1': { ... 'key2': { ... 'key3': 'value1/2/3', ... 'key4': 'value1/2/4', ... }, ... }, ... } True >>> deep_update(a, { ... 'key1': { ... 'key2': { ... 'key3': 'newvalue1/2/3', ... }, ... }, ... }) == { ... 'key1': { ... 'key2': { ... 'key3': 'newvalue1/2/3', ... 'key4': 'value1/2/4', ... }, ... }, ... } True """ for key, rvalue in right.items(): if isinstance(rvalue, Mapping): new_lvalue = deep_update(left.get(key, {}), rvalue) left[key] = new_lvalue else: left[key] = rvalue return left class MerkleNode(dict, metaclass=abc.ABCMeta): """Representation of a node in a Merkle Tree. A (generalized) `Merkle Tree`_ is a tree in which every node is labeled with a hash of its own data and the hash of its children. .. _Merkle Tree: https://en.wikipedia.org/wiki/Merkle_tree In pseudocode:: node.hash = hash(node.data + sum(child.hash for child in node.children)) This class efficiently implements the Merkle Tree data structure on top of a Python :class:`dict`, minimizing hash computations and new data collections when updating nodes. Node data is stored in the :attr:`data` attribute, while (named) children are stored as items of the underlying dictionary. Addition, update and removal of objects are instrumented to automatically invalidate the hashes of the current node as well as its registered parents; It also resets the collection status of the objects so the updated objects can be collected. The collection of updated data from the tree is implemented through the :func:`collect` function and associated helpers. Attributes: data (dict): data associated to the current node parents (list): known parents of the current node collected (bool): whether the current node has been collected """ __slots__ = ["parents", "data", "__hash", "collected"] """Type of the current node (used as a classifier for :func:`collect`)""" def __init__(self, data=None): super().__init__() self.parents = [] self.data = data self.__hash = None self.collected = False def __eq__(self, other): return ( isinstance(other, MerkleNode) and super().__eq__(other) and self.data == other.data ) def __ne__(self, other): return not self.__eq__(other) def invalidate_hash(self): """Invalidate the cached hash of the current node.""" if not self.__hash: return self.__hash = None self.collected = False for parent in self.parents: parent.invalidate_hash() def update_hash(self, *, force=False): """Recursively compute the hash of the current node. Args: force (bool): invalidate the cache and force the computation for this node and all children. """ if self.__hash and not force: return self.__hash if force: self.invalidate_hash() for child in self.values(): child.update_hash(force=force) self.__hash = self.compute_hash() return self.__hash @property def hash(self): """The hash of the current node, as calculated by :func:`compute_hash`. """ return self.update_hash() @abc.abstractmethod def compute_hash(self): """Compute the hash of the current node. The hash should depend on the data of the node, as well as on hashes of the children nodes. """ raise NotImplementedError("Must implement compute_hash method") def __setitem__(self, name, new_child): """Add a child, invalidating the current hash""" self.invalidate_hash() super().__setitem__(name, new_child) new_child.parents.append(self) def __delitem__(self, name): """Remove a child, invalidating the current hash""" if name in self: self.invalidate_hash() self[name].parents.remove(self) super().__delitem__(name) else: raise KeyError(name) def update(self, new_children): """Add several named children from a dictionary""" if not new_children: return self.invalidate_hash() for name, new_child in new_children.items(): new_child.parents.append(self) if name in self: self[name].parents.remove(self) super().update(new_children) def get_data(self, **kwargs): """Retrieve and format the collected data for the current node, for use by :func:`collect`. Can be overridden, for instance when you want the collected data to contain information about the child nodes. Arguments: kwargs: allow subclasses to alter behaviour depending on how :func:`collect` is called. Returns: data formatted for :func:`collect` """ return self.data def collect_node(self, **kwargs): """Collect the data for the current node, for use by :func:`collect`. Arguments: kwargs: passed as-is to :func:`get_data`. Returns: A :class:`dict` compatible with :func:`collect`. """ if not self.collected: self.collected = True return {self.object_type: {self.hash: self.get_data(**kwargs)}} else: return {} def collect(self, **kwargs): """Collect the data for all nodes in the subtree rooted at `self`. The data is deduplicated by type and by hash. Arguments: kwargs: passed as-is to :func:`get_data`. Returns: A :class:`dict` with the following structure:: { 'typeA': { node1.hash: node1.get_data(), node2.hash: node2.get_data(), }, 'typeB': { node3.hash: node3.get_data(), ... }, ... } """ ret = self.collect_node(**kwargs) for child in self.values(): deep_update(ret, child.collect(**kwargs)) return ret def reset_collect(self): """Recursively unmark collected nodes in the subtree rooted at `self`. This lets the caller use :func:`collect` again. """ self.collected = False for child in self.values(): child.reset_collect() def iter_tree(self) -> Iterator["MerkleNode"]: """Yields all children nodes, recursively. Common nodes are deduplicated. """ yield from self._iter_tree(set()) def _iter_tree(self, seen: Set[bytes]) -> Iterator["MerkleNode"]: if self.hash not in seen: seen.add(self.hash) yield self for child in self.values(): yield from child._iter_tree(seen=seen) class MerkleLeaf(MerkleNode): """A leaf to a Merkle tree. A Merkle leaf is simply a Merkle node with children disabled. """ __slots__ = [] # type: List[str] def __setitem__(self, name, child): raise ValueError("%s is a leaf" % self.__class__.__name__) def __getitem__(self, name): raise ValueError("%s is a leaf" % self.__class__.__name__) def __delitem__(self, name): raise ValueError("%s is a leaf" % self.__class__.__name__) def update(self, new_children): """Children update operation. Disabled for leaves.""" raise ValueError("%s is a leaf" % self.__class__.__name__) diff --git a/swh/model/model.py b/swh/model/model.py index 219d7c5..d719966 100644 --- a/swh/model/model.py +++ b/swh/model/model.py @@ -1,1027 +1,1026 @@ # Copyright (C) 2018-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information -import datetime - from abc import ABCMeta, abstractmethod +import datetime from enum import Enum from hashlib import sha256 from typing import Any, Dict, Iterable, Optional, Tuple, TypeVar, Union -from typing_extensions import Final import attr from attrs_strict import type_validator import dateutil.parser import iso8601 +from typing_extensions import Final from .collections import ImmutableDict -from .hashutil import DEFAULT_ALGORITHMS, hash_to_bytes, MultiHash +from .hashutil import DEFAULT_ALGORITHMS, MultiHash, hash_to_bytes from .identifiers import ( - normalize_timestamp, + SWHID, directory_identifier, - revision_identifier, + normalize_timestamp, + parse_swhid, release_identifier, + revision_identifier, snapshot_identifier, - SWHID, - parse_swhid, ) class MissingData(Exception): """Raised by `Content.with_data` when it has no way of fetching the data (but not when fetching the data fails).""" pass SHA1_SIZE = 20 # TODO: Limit this to 20 bytes Sha1Git = bytes Sha1 = bytes KT = TypeVar("KT") VT = TypeVar("VT") def freeze_optional_dict( d: Union[None, Dict[KT, VT], ImmutableDict[KT, VT]] # type: ignore ) -> Optional[ImmutableDict[KT, VT]]: if isinstance(d, dict): return ImmutableDict(d) else: return d def dictify(value): "Helper function used by BaseModel.to_dict()" if isinstance(value, BaseModel): return value.to_dict() elif isinstance(value, SWHID): return str(value) elif isinstance(value, Enum): return value.value elif isinstance(value, (dict, ImmutableDict)): return {k: dictify(v) for k, v in value.items()} elif isinstance(value, tuple): return tuple(dictify(v) for v in value) else: return value ModelType = TypeVar("ModelType", bound="BaseModel") class BaseModel: """Base class for SWH model classes. Provides serialization/deserialization to/from Python dictionaries, that are suitable for JSON/msgpack-like formats.""" def to_dict(self): """Wrapper of `attr.asdict` that can be overridden by subclasses that have special handling of some of the fields.""" return dictify(attr.asdict(self, recurse=False)) @classmethod def from_dict(cls, d): """Takes a dictionary representing a tree of SWH objects, and recursively builds the corresponding objects.""" return cls(**d) def anonymize(self: ModelType) -> Optional[ModelType]: """Returns an anonymized version of the object, if needed. If the object model does not need/support anonymization, returns None. """ return None class HashableObject(metaclass=ABCMeta): """Mixin to automatically compute object identifier hash when the associated model is instantiated.""" @staticmethod @abstractmethod def compute_hash(object_dict): """Derived model classes must implement this to compute the object hash from its dict representation.""" pass def __attrs_post_init__(self): if not self.id: obj_id = hash_to_bytes(self.compute_hash(self.to_dict())) object.__setattr__(self, "id", obj_id) @attr.s(frozen=True) class Person(BaseModel): """Represents the author/committer of a revision or release.""" object_type: Final = "person" fullname = attr.ib(type=bytes, validator=type_validator()) name = attr.ib(type=Optional[bytes], validator=type_validator()) email = attr.ib(type=Optional[bytes], validator=type_validator()) @classmethod def from_fullname(cls, fullname: bytes): """Returns a Person object, by guessing the name and email from the fullname, in the `name ` format. The fullname is left unchanged.""" if fullname is None: raise TypeError("fullname is None.") name: Optional[bytes] email: Optional[bytes] try: open_bracket = fullname.index(b"<") except ValueError: name = fullname email = None else: raw_name = fullname[:open_bracket] raw_email = fullname[open_bracket + 1 :] if not raw_name: name = None else: name = raw_name.strip() try: close_bracket = raw_email.rindex(b">") except ValueError: email = raw_email else: email = raw_email[:close_bracket] return Person(name=name or None, email=email or None, fullname=fullname,) def anonymize(self) -> "Person": """Returns an anonymized version of the Person object. Anonymization is simply a Person which fullname is the hashed, with unset name or email. """ return Person(fullname=sha256(self.fullname).digest(), name=None, email=None,) @attr.s(frozen=True) class Timestamp(BaseModel): """Represents a naive timestamp from a VCS.""" object_type: Final = "timestamp" seconds = attr.ib(type=int, validator=type_validator()) microseconds = attr.ib(type=int, validator=type_validator()) @seconds.validator def check_seconds(self, attribute, value): """Check that seconds fit in a 64-bits signed integer.""" if not (-(2 ** 63) <= value < 2 ** 63): raise ValueError("Seconds must be a signed 64-bits integer.") @microseconds.validator def check_microseconds(self, attribute, value): """Checks that microseconds are positive and < 1000000.""" if not (0 <= value < 10 ** 6): raise ValueError("Microseconds must be in [0, 1000000[.") @attr.s(frozen=True) class TimestampWithTimezone(BaseModel): """Represents a TZ-aware timestamp from a VCS.""" object_type: Final = "timestamp_with_timezone" timestamp = attr.ib(type=Timestamp, validator=type_validator()) offset = attr.ib(type=int, validator=type_validator()) negative_utc = attr.ib(type=bool, validator=type_validator()) @offset.validator def check_offset(self, attribute, value): """Checks the offset is a 16-bits signed integer (in theory, it should always be between -14 and +14 hours).""" if not (-(2 ** 15) <= value < 2 ** 15): # max 14 hours offset in theory, but you never know what # you'll find in the wild... raise ValueError("offset too large: %d minutes" % value) @negative_utc.validator def check_negative_utc(self, attribute, value): if self.offset and value: raise ValueError("negative_utc can only be True is offset=0") @classmethod def from_dict(cls, obj: Union[Dict, datetime.datetime, int]): """Builds a TimestampWithTimezone from any of the formats accepted by :func:`swh.model.normalize_timestamp`.""" # TODO: this accept way more types than just dicts; find a better # name d = normalize_timestamp(obj) return cls( timestamp=Timestamp.from_dict(d["timestamp"]), offset=d["offset"], negative_utc=d["negative_utc"], ) @classmethod def from_datetime(cls, dt: datetime.datetime): return cls.from_dict(dt) @classmethod def from_iso8601(cls, s): """Builds a TimestampWithTimezone from an ISO8601-formatted string. """ dt = iso8601.parse_date(s) tstz = cls.from_datetime(dt) if dt.tzname() == "-00:00": tstz = attr.evolve(tstz, negative_utc=True) return tstz @attr.s(frozen=True) class Origin(BaseModel): """Represents a software source: a VCS and an URL.""" object_type: Final = "origin" url = attr.ib(type=str, validator=type_validator()) @attr.s(frozen=True) class OriginVisit(BaseModel): """Represents an origin visit with a given type at a given point in time, by a SWH loader.""" object_type: Final = "origin_visit" origin = attr.ib(type=str, validator=type_validator()) date = attr.ib(type=datetime.datetime, validator=type_validator()) type = attr.ib(type=str, validator=type_validator()) """Should not be set before calling 'origin_visit_add()'.""" visit = attr.ib(type=Optional[int], validator=type_validator(), default=None) @date.validator def check_date(self, attribute, value): """Checks the date has a timezone.""" if value is not None and value.tzinfo is None: raise ValueError("date must be a timezone-aware datetime.") def to_dict(self): """Serializes the date as a string and omits the visit id if it is `None`.""" ov = super().to_dict() if ov["visit"] is None: del ov["visit"] return ov @attr.s(frozen=True) class OriginVisitStatus(BaseModel): """Represents a visit update of an origin at a given point in time. """ object_type: Final = "origin_visit_status" origin = attr.ib(type=str, validator=type_validator()) visit = attr.ib(type=int, validator=type_validator()) date = attr.ib(type=datetime.datetime, validator=type_validator()) status = attr.ib( type=str, validator=attr.validators.in_(["created", "ongoing", "full", "partial"]), ) snapshot = attr.ib(type=Optional[Sha1Git], validator=type_validator()) metadata = attr.ib( type=Optional[ImmutableDict[str, object]], validator=type_validator(), converter=freeze_optional_dict, default=None, ) @date.validator def check_date(self, attribute, value): """Checks the date has a timezone.""" if value is not None and value.tzinfo is None: raise ValueError("date must be a timezone-aware datetime.") class TargetType(Enum): """The type of content pointed to by a snapshot branch. Usually a revision or an alias.""" CONTENT = "content" DIRECTORY = "directory" REVISION = "revision" RELEASE = "release" SNAPSHOT = "snapshot" ALIAS = "alias" class ObjectType(Enum): """The type of content pointed to by a release. Usually a revision""" CONTENT = "content" DIRECTORY = "directory" REVISION = "revision" RELEASE = "release" SNAPSHOT = "snapshot" @attr.s(frozen=True) class SnapshotBranch(BaseModel): """Represents one of the branches of a snapshot.""" object_type: Final = "snapshot_branch" target = attr.ib(type=bytes, validator=type_validator()) target_type = attr.ib(type=TargetType, validator=type_validator()) @target.validator def check_target(self, attribute, value): """Checks the target type is not an alias, checks the target is a valid sha1_git.""" if self.target_type != TargetType.ALIAS and self.target is not None: if len(value) != 20: raise ValueError("Wrong length for bytes identifier: %d" % len(value)) @classmethod def from_dict(cls, d): return cls(target=d["target"], target_type=TargetType(d["target_type"])) @attr.s(frozen=True) class Snapshot(BaseModel, HashableObject): """Represents the full state of an origin at a given point in time.""" object_type: Final = "snapshot" branches = attr.ib( type=ImmutableDict[bytes, Optional[SnapshotBranch]], validator=type_validator(), converter=freeze_optional_dict, ) id = attr.ib(type=Sha1Git, validator=type_validator(), default=b"") @staticmethod def compute_hash(object_dict): return snapshot_identifier(object_dict) @classmethod def from_dict(cls, d): d = d.copy() return cls( branches=ImmutableDict( (name, SnapshotBranch.from_dict(branch) if branch else None) for (name, branch) in d.pop("branches").items() ), **d, ) @attr.s(frozen=True) class Release(BaseModel, HashableObject): object_type: Final = "release" name = attr.ib(type=bytes, validator=type_validator()) message = attr.ib(type=Optional[bytes], validator=type_validator()) target = attr.ib(type=Optional[Sha1Git], validator=type_validator()) target_type = attr.ib(type=ObjectType, validator=type_validator()) synthetic = attr.ib(type=bool, validator=type_validator()) author = attr.ib(type=Optional[Person], validator=type_validator(), default=None) date = attr.ib( type=Optional[TimestampWithTimezone], validator=type_validator(), default=None ) metadata = attr.ib( type=Optional[ImmutableDict[str, object]], validator=type_validator(), converter=freeze_optional_dict, default=None, ) id = attr.ib(type=Sha1Git, validator=type_validator(), default=b"") @staticmethod def compute_hash(object_dict): return release_identifier(object_dict) @author.validator def check_author(self, attribute, value): """If the author is `None`, checks the date is `None` too.""" if self.author is None and self.date is not None: raise ValueError("release date must be None if author is None.") def to_dict(self): rel = super().to_dict() if rel["metadata"] is None: del rel["metadata"] return rel @classmethod def from_dict(cls, d): d = d.copy() if d.get("author"): d["author"] = Person.from_dict(d["author"]) if d.get("date"): d["date"] = TimestampWithTimezone.from_dict(d["date"]) return cls(target_type=ObjectType(d.pop("target_type")), **d) def anonymize(self) -> "Release": """Returns an anonymized version of the Release object. Anonymization consists in replacing the author with an anonymized Person object. """ author = self.author and self.author.anonymize() return attr.evolve(self, author=author) class RevisionType(Enum): GIT = "git" TAR = "tar" DSC = "dsc" SUBVERSION = "svn" MERCURIAL = "hg" def tuplify_extra_headers(value: Iterable): return tuple((k, v) for k, v in value) @attr.s(frozen=True) class Revision(BaseModel, HashableObject): object_type: Final = "revision" message = attr.ib(type=Optional[bytes], validator=type_validator()) author = attr.ib(type=Person, validator=type_validator()) committer = attr.ib(type=Person, validator=type_validator()) date = attr.ib(type=Optional[TimestampWithTimezone], validator=type_validator()) committer_date = attr.ib( type=Optional[TimestampWithTimezone], validator=type_validator() ) type = attr.ib(type=RevisionType, validator=type_validator()) directory = attr.ib(type=Sha1Git, validator=type_validator()) synthetic = attr.ib(type=bool, validator=type_validator()) metadata = attr.ib( type=Optional[ImmutableDict[str, object]], validator=type_validator(), converter=freeze_optional_dict, default=None, ) parents = attr.ib(type=Tuple[Sha1Git, ...], validator=type_validator(), default=()) id = attr.ib(type=Sha1Git, validator=type_validator(), default=b"") extra_headers = attr.ib( type=Tuple[Tuple[bytes, bytes], ...], validator=type_validator(), converter=tuplify_extra_headers, default=(), ) def __attrs_post_init__(self): super().__attrs_post_init__() # ensure metadata is a deep copy of whatever was given, and if needed # extract extra_headers from there if self.metadata: metadata = self.metadata if not self.extra_headers and "extra_headers" in metadata: (extra_headers, metadata) = metadata.copy_pop("extra_headers") object.__setattr__( self, "extra_headers", tuplify_extra_headers(extra_headers), ) attr.validate(self) object.__setattr__(self, "metadata", metadata) @staticmethod def compute_hash(object_dict): return revision_identifier(object_dict) @classmethod def from_dict(cls, d): d = d.copy() date = d.pop("date") if date: date = TimestampWithTimezone.from_dict(date) committer_date = d.pop("committer_date") if committer_date: committer_date = TimestampWithTimezone.from_dict(committer_date) return cls( author=Person.from_dict(d.pop("author")), committer=Person.from_dict(d.pop("committer")), date=date, committer_date=committer_date, type=RevisionType(d.pop("type")), parents=tuple(d.pop("parents")), # for BW compat **d, ) def anonymize(self) -> "Revision": """Returns an anonymized version of the Revision object. Anonymization consists in replacing the author and committer with an anonymized Person object. """ return attr.evolve( self, author=self.author.anonymize(), committer=self.committer.anonymize() ) @attr.s(frozen=True) class DirectoryEntry(BaseModel): object_type: Final = "directory_entry" name = attr.ib(type=bytes, validator=type_validator()) type = attr.ib(type=str, validator=attr.validators.in_(["file", "dir", "rev"])) target = attr.ib(type=Sha1Git, validator=type_validator()) perms = attr.ib(type=int, validator=type_validator()) """Usually one of the values of `swh.model.from_disk.DentryPerms`.""" @attr.s(frozen=True) class Directory(BaseModel, HashableObject): object_type: Final = "directory" entries = attr.ib(type=Tuple[DirectoryEntry, ...], validator=type_validator()) id = attr.ib(type=Sha1Git, validator=type_validator(), default=b"") @staticmethod def compute_hash(object_dict): return directory_identifier(object_dict) @classmethod def from_dict(cls, d): d = d.copy() return cls( entries=tuple( DirectoryEntry.from_dict(entry) for entry in d.pop("entries") ), **d, ) @attr.s(frozen=True) class BaseContent(BaseModel): status = attr.ib( type=str, validator=attr.validators.in_(["visible", "hidden", "absent"]) ) @staticmethod def _hash_data(data: bytes): """Hash some data, returning most of the fields of a content object""" d = MultiHash.from_data(data).digest() d["data"] = data d["length"] = len(data) return d @classmethod def from_dict(cls, d, use_subclass=True): if use_subclass: # Chooses a subclass to instantiate instead. if d["status"] == "absent": return SkippedContent.from_dict(d) else: return Content.from_dict(d) else: return super().from_dict(d) def get_hash(self, hash_name): if hash_name not in DEFAULT_ALGORITHMS: raise ValueError("{} is not a valid hash name.".format(hash_name)) return getattr(self, hash_name) def hashes(self) -> Dict[str, bytes]: """Returns a dictionary {hash_name: hash_value}""" return {algo: getattr(self, algo) for algo in DEFAULT_ALGORITHMS} @attr.s(frozen=True) class Content(BaseContent): object_type: Final = "content" sha1 = attr.ib(type=bytes, validator=type_validator()) sha1_git = attr.ib(type=Sha1Git, validator=type_validator()) sha256 = attr.ib(type=bytes, validator=type_validator()) blake2s256 = attr.ib(type=bytes, validator=type_validator()) length = attr.ib(type=int, validator=type_validator()) status = attr.ib( type=str, validator=attr.validators.in_(["visible", "hidden"]), default="visible", ) data = attr.ib(type=Optional[bytes], validator=type_validator(), default=None) ctime = attr.ib( type=Optional[datetime.datetime], validator=type_validator(), default=None, eq=False, ) @length.validator def check_length(self, attribute, value): """Checks the length is positive.""" if value < 0: raise ValueError("Length must be positive.") @ctime.validator def check_ctime(self, attribute, value): """Checks the ctime has a timezone.""" if value is not None and value.tzinfo is None: raise ValueError("ctime must be a timezone-aware datetime.") def to_dict(self): content = super().to_dict() if content["data"] is None: del content["data"] if content["ctime"] is None: del content["ctime"] return content @classmethod def from_data(cls, data, status="visible", ctime=None) -> "Content": """Generate a Content from a given `data` byte string. This populates the Content with the hashes and length for the data passed as argument, as well as the data itself. """ d = cls._hash_data(data) d["status"] = status d["ctime"] = ctime return cls(**d) @classmethod def from_dict(cls, d): if isinstance(d.get("ctime"), str): d = d.copy() d["ctime"] = dateutil.parser.parse(d["ctime"]) return super().from_dict(d, use_subclass=False) def with_data(self) -> "Content": """Loads the `data` attribute; meaning that it is guaranteed not to be None after this call. This call is almost a no-op, but subclasses may overload this method to lazy-load data (eg. from disk or objstorage).""" if self.data is None: raise MissingData("Content data is None.") return self @attr.s(frozen=True) class SkippedContent(BaseContent): object_type: Final = "skipped_content" sha1 = attr.ib(type=Optional[bytes], validator=type_validator()) sha1_git = attr.ib(type=Optional[Sha1Git], validator=type_validator()) sha256 = attr.ib(type=Optional[bytes], validator=type_validator()) blake2s256 = attr.ib(type=Optional[bytes], validator=type_validator()) length = attr.ib(type=Optional[int], validator=type_validator()) status = attr.ib(type=str, validator=attr.validators.in_(["absent"])) reason = attr.ib(type=Optional[str], validator=type_validator(), default=None) origin = attr.ib(type=Optional[str], validator=type_validator(), default=None) ctime = attr.ib( type=Optional[datetime.datetime], validator=type_validator(), default=None, eq=False, ) @reason.validator def check_reason(self, attribute, value): """Checks the reason is full if status != absent.""" assert self.reason == value if value is None: raise ValueError("Must provide a reason if content is absent.") @length.validator def check_length(self, attribute, value): """Checks the length is positive or -1.""" if value < -1: raise ValueError("Length must be positive or -1.") @ctime.validator def check_ctime(self, attribute, value): """Checks the ctime has a timezone.""" if value is not None and value.tzinfo is None: raise ValueError("ctime must be a timezone-aware datetime.") def to_dict(self): content = super().to_dict() if content["origin"] is None: del content["origin"] if content["ctime"] is None: del content["ctime"] return content @classmethod def from_data( cls, data: bytes, reason: str, ctime: Optional[datetime.datetime] = None ) -> "SkippedContent": """Generate a SkippedContent from a given `data` byte string. This populates the SkippedContent with the hashes and length for the data passed as argument. You can use `attr.evolve` on such a generated content to nullify some of its attributes, e.g. for tests. """ d = cls._hash_data(data) del d["data"] d["status"] = "absent" d["reason"] = reason d["ctime"] = ctime return cls(**d) @classmethod def from_dict(cls, d): d2 = d.copy() if d2.pop("data", None) is not None: raise ValueError('SkippedContent has no "data" attribute %r' % d) return super().from_dict(d2, use_subclass=False) class MetadataAuthorityType(Enum): DEPOSIT_CLIENT = "deposit_client" FORGE = "forge" REGISTRY = "registry" @attr.s(frozen=True) class MetadataAuthority(BaseModel): """Represents an entity that provides metadata about an origin or software artifact.""" object_type: Final = "metadata_authority" type = attr.ib(type=MetadataAuthorityType, validator=type_validator()) url = attr.ib(type=str, validator=type_validator()) metadata = attr.ib( type=Optional[ImmutableDict[str, Any]], default=None, validator=type_validator(), converter=freeze_optional_dict, ) def to_dict(self): d = super().to_dict() if d["metadata"] is None: del d["metadata"] return d @classmethod def from_dict(cls, d): d["type"] = MetadataAuthorityType(d["type"]) return super().from_dict(d) @attr.s(frozen=True) class MetadataFetcher(BaseModel): """Represents a software component used to fetch metadata from a metadata authority, and ingest them into the Software Heritage archive.""" object_type: Final = "metadata_fetcher" name = attr.ib(type=str, validator=type_validator()) version = attr.ib(type=str, validator=type_validator()) metadata = attr.ib( type=Optional[ImmutableDict[str, Any]], default=None, validator=type_validator(), converter=freeze_optional_dict, ) def to_dict(self): d = super().to_dict() if d["metadata"] is None: del d["metadata"] return d class MetadataTargetType(Enum): """The type of object extrinsic metadata refer to.""" CONTENT = "content" DIRECTORY = "directory" REVISION = "revision" RELEASE = "release" SNAPSHOT = "snapshot" ORIGIN = "origin" @attr.s(frozen=True) class RawExtrinsicMetadata(BaseModel): object_type: Final = "raw_extrinsic_metadata" # target object type = attr.ib(type=MetadataTargetType, validator=type_validator()) id = attr.ib(type=Union[str, SWHID], validator=type_validator()) """URL if type=MetadataTargetType.ORIGIN, else core SWHID""" # source discovery_date = attr.ib(type=datetime.datetime, validator=type_validator()) authority = attr.ib(type=MetadataAuthority, validator=type_validator()) fetcher = attr.ib(type=MetadataFetcher, validator=type_validator()) # the metadata itself format = attr.ib(type=str, validator=type_validator()) metadata = attr.ib(type=bytes, validator=type_validator()) # context origin = attr.ib(type=Optional[str], default=None, validator=type_validator()) visit = attr.ib(type=Optional[int], default=None, validator=type_validator()) snapshot = attr.ib(type=Optional[SWHID], default=None, validator=type_validator()) release = attr.ib(type=Optional[SWHID], default=None, validator=type_validator()) revision = attr.ib(type=Optional[SWHID], default=None, validator=type_validator()) path = attr.ib(type=Optional[bytes], default=None, validator=type_validator()) directory = attr.ib(type=Optional[SWHID], default=None, validator=type_validator()) @id.validator def check_id(self, attribute, value): if self.type == MetadataTargetType.ORIGIN: if isinstance(value, SWHID) or value.startswith("swh:"): raise ValueError( "Got SWHID as id for origin metadata (expected an URL)." ) else: self._check_pid(self.type.value, value) @discovery_date.validator def check_discovery_date(self, attribute, value): """Checks the discovery_date has a timezone.""" if value is not None and value.tzinfo is None: raise ValueError("discovery_date must be a timezone-aware datetime.") @origin.validator def check_origin(self, attribute, value): if value is None: return if self.type not in ( MetadataTargetType.SNAPSHOT, MetadataTargetType.RELEASE, MetadataTargetType.REVISION, MetadataTargetType.DIRECTORY, MetadataTargetType.CONTENT, ): raise ValueError( f"Unexpected 'origin' context for {self.type.value} object: {value}" ) if value.startswith("swh:"): # Technically this is valid; but: # 1. SWHIDs are URIs, not URLs # 2. if a SWHID gets here, it's very likely to be a mistake # (and we can remove this check if it turns out there is a # legitimate use for it). raise ValueError(f"SWHID used as context origin URL: {value}") @visit.validator def check_visit(self, attribute, value): if value is None: return if self.type not in ( MetadataTargetType.SNAPSHOT, MetadataTargetType.RELEASE, MetadataTargetType.REVISION, MetadataTargetType.DIRECTORY, MetadataTargetType.CONTENT, ): raise ValueError( f"Unexpected 'visit' context for {self.type.value} object: {value}" ) if self.origin is None: raise ValueError("'origin' context must be set if 'visit' is.") if value <= 0: raise ValueError("Nonpositive visit id") @snapshot.validator def check_snapshot(self, attribute, value): if value is None: return if self.type not in ( MetadataTargetType.RELEASE, MetadataTargetType.REVISION, MetadataTargetType.DIRECTORY, MetadataTargetType.CONTENT, ): raise ValueError( f"Unexpected 'snapshot' context for {self.type.value} object: {value}" ) self._check_pid("snapshot", value) @release.validator def check_release(self, attribute, value): if value is None: return if self.type not in ( MetadataTargetType.REVISION, MetadataTargetType.DIRECTORY, MetadataTargetType.CONTENT, ): raise ValueError( f"Unexpected 'release' context for {self.type.value} object: {value}" ) self._check_pid("release", value) @revision.validator def check_revision(self, attribute, value): if value is None: return if self.type not in (MetadataTargetType.DIRECTORY, MetadataTargetType.CONTENT,): raise ValueError( f"Unexpected 'revision' context for {self.type.value} object: {value}" ) self._check_pid("revision", value) @path.validator def check_path(self, attribute, value): if value is None: return if self.type not in (MetadataTargetType.DIRECTORY, MetadataTargetType.CONTENT,): raise ValueError( f"Unexpected 'path' context for {self.type.value} object: {value}" ) @directory.validator def check_directory(self, attribute, value): if value is None: return if self.type not in (MetadataTargetType.CONTENT,): raise ValueError( f"Unexpected 'directory' context for {self.type.value} object: {value}" ) self._check_pid("directory", value) def _check_pid(self, expected_object_type, pid): if isinstance(pid, str): raise ValueError(f"Expected SWHID, got a string: {pid}") if pid.object_type != expected_object_type: raise ValueError( f"Expected SWHID type '{expected_object_type}', " f"got '{pid.object_type}' in {pid}" ) if pid.metadata: raise ValueError(f"Expected core SWHID, but got: {pid}") def to_dict(self): d = super().to_dict() context_keys = ( "origin", "visit", "snapshot", "release", "revision", "directory", "path", ) for context_key in context_keys: if d[context_key] is None: del d[context_key] return d @classmethod def from_dict(cls, d): d = { **d, "type": MetadataTargetType(d["type"]), "authority": MetadataAuthority.from_dict(d["authority"]), "fetcher": MetadataFetcher.from_dict(d["fetcher"]), } if d["type"] != MetadataTargetType.ORIGIN: d["id"] = parse_swhid(d["id"]) swhid_keys = ("snapshot", "release", "revision", "directory") for swhid_key in swhid_keys: if d.get(swhid_key): d[swhid_key] = parse_swhid(d[swhid_key]) return super().from_dict(d) diff --git a/swh/model/tests/generate_testdata.py b/swh/model/tests/generate_testdata.py index 0280a6a..f4093a4 100644 --- a/swh/model/tests/generate_testdata.py +++ b/swh/model/tests/generate_testdata.py @@ -1,72 +1,72 @@ # Copyright (C) 2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from datetime import datetime -from pytz import all_timezones, timezone from random import choice, randint, random, shuffle -from typing import List, Dict +from typing import Dict, List -from swh.model.hashutil import MultiHash +from pytz import all_timezones, timezone +from swh.model.hashutil import MultiHash PROTOCOLS = ["git", "http", "https", "deb", "svn", "mock"] DOMAINS = ["example.com", "some.long.host.name", "xn--n28h.tld"] PATHS = [ "", "/", "/stuff", "/stuff/", "/path/to/resource", "/path/with/anchor#id=42", "/path/with/qargs?q=1&b", ] CONTENT_STATUS = ["visible", "hidden", "absent"] MAX_DATE = 3e9 # around 2065 def gen_all_origins(): for protocol in PROTOCOLS: for domain in DOMAINS: for urlpath in PATHS: yield {"url": "%s://%s%s" % (protocol, domain, urlpath)} ORIGINS = list(gen_all_origins()) def gen_origins(n: int = 100) -> List: """Returns a list of n randomly generated origins suitable for using as Storage.add_origin() argument. """ origins = ORIGINS[:] shuffle(origins) return origins[:n] def gen_content(): size = randint(1, 10 * 1024) data = bytes(randint(0, 255) for i in range(size)) status = choice(CONTENT_STATUS) h = MultiHash.from_data(data) ctime = datetime.fromtimestamp(random() * MAX_DATE, timezone(choice(all_timezones))) content = { "data": data, "status": status, "length": size, "ctime": ctime, **h.digest(), } if status == "absent": content["reason"] = "why not" content["data"] = None return content def gen_contents(n=20) -> List[Dict]: """Returns a list of n randomly generated content objects (as dict) suitable for using as Storage.content_add() argument. """ return [gen_content() for i in range(n)] diff --git a/swh/model/tests/generate_testdata_from_disk.py b/swh/model/tests/generate_testdata_from_disk.py index 063e390..3ad4564 100644 --- a/swh/model/tests/generate_testdata_from_disk.py +++ b/swh/model/tests/generate_testdata_from_disk.py @@ -1,92 +1,92 @@ # Copyright (C) 2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from operator import itemgetter import os import sys -from swh.model.from_disk import Directory, DentryPerms +from swh.model.from_disk import DentryPerms, Directory from swh.model.hashutil import ALGORITHMS, hash_to_hex def generate_from_directory(varname, directory, indent=0): """Generate test data from a given directory""" def get_data(member, path): yield (path, member.get_data()) if isinstance(member, Directory): for name, child in member.items(): yield from get_data(child, os.path.join(path, name)) data = dict(get_data(directory, b"")) out = [] def format_hash(h, indent=0): spindent = " " * indent if len(h) > 20: cutoff = len(h) // 2 parts = h[:cutoff], h[cutoff:] else: parts = [h] out.append("hash_to_bytes(\n") for part in parts: out.append(spindent + " %s\n" % repr(hash_to_hex(part))) out.append(spindent + ")") def format_dict_items(d, indent=0): spindent = " " * indent for key, value in sorted(d.items()): if isinstance(key, bytes): out.append(spindent + repr(key) + ": {\n") format_dict_items(value, indent=indent + 4) out.append(spindent + "}") else: out.append(spindent + repr(key) + ": ") if key == "entries": if not value: out.append("[]") else: out.append("[") last_index = len(value) - 1 for i, entry in enumerate( sorted(value, key=itemgetter("name")) ): if i: out.append(" ") out.append("{\n") format_dict_items(entry, indent=indent + 4) if i != last_index: out.append(spindent + "},") out.append(spindent + "}]") elif key in ALGORITHMS | {"id", "target"}: format_hash(value, indent=indent) elif isinstance(value, DentryPerms): out.append(str(value)) else: out.append(repr(value)) out.append(",\n") spindent = " " * indent out.append(spindent + "%s = {\n" % varname) format_dict_items(data, indent=4 + indent) out.append(spindent + "}") return "".join(out) if __name__ == "__main__": if not sys.argv[1:]: print("Usage: %s dir1 dir2" % sys.argv[0], file=sys.stderr) exit(2) for dirname in sys.argv[1:]: basename = os.path.basename(dirname) varname = "expected_%s" % basename testdata = generate_from_directory( varname, Directory.from_disk(path=os.fsencode(dirname)), indent=8 ) print(testdata) print() diff --git a/swh/model/tests/test_from_disk.py b/swh/model/tests/test_from_disk.py index 0256156..497bf6c 100644 --- a/swh/model/tests/test_from_disk.py +++ b/swh/model/tests/test_from_disk.py @@ -1,965 +1,964 @@ # Copyright (C) 2017-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information +from collections import defaultdict import os -import pytest import tarfile import tempfile +from typing import ClassVar, Optional import unittest -from collections import defaultdict -from typing import ClassVar, Optional +import pytest -from swh.model import from_disk +from swh.model import from_disk, model from swh.model.from_disk import Content, DentryPerms, Directory, DiskBackedContent from swh.model.hashutil import DEFAULT_ALGORITHMS, hash_to_bytes, hash_to_hex -from swh.model import model TEST_DATA = os.path.join(os.path.dirname(__file__), "data") class ModeToPerms(unittest.TestCase): def setUp(self): super().setUp() # Generate a full permissions map self.perms_map = {} # Symlinks for i in range(0o120000, 0o127777 + 1): self.perms_map[i] = DentryPerms.symlink # Directories for i in range(0o040000, 0o047777 + 1): self.perms_map[i] = DentryPerms.directory # Other file types: socket, regular file, block device, character # device, fifo all map to regular files for ft in [0o140000, 0o100000, 0o060000, 0o020000, 0o010000]: for i in range(ft, ft + 0o7777 + 1): if i & 0o111: # executable bits are set self.perms_map[i] = DentryPerms.executable_content else: self.perms_map[i] = DentryPerms.content def test_exhaustive_mode_to_perms(self): for fmode, perm in self.perms_map.items(): self.assertEqual(perm, from_disk.mode_to_perms(fmode)) class TestDiskBackedContent(unittest.TestCase): def test_with_data(self): expected_content = model.Content( length=42, status="visible", data=b"foo bar", sha1=b"foo", sha1_git=b"bar", sha256=b"baz", blake2s256=b"qux", ) with tempfile.NamedTemporaryFile(mode="w+b") as fd: content = DiskBackedContent( length=42, status="visible", path=fd.name, sha1=b"foo", sha1_git=b"bar", sha256=b"baz", blake2s256=b"qux", ) fd.write(b"foo bar") fd.seek(0) content_with_data = content.with_data() assert expected_content == content_with_data def test_lazy_data(self): with tempfile.NamedTemporaryFile(mode="w+b") as fd: fd.write(b"foo") fd.seek(0) content = DiskBackedContent( length=42, status="visible", path=fd.name, sha1=b"foo", sha1_git=b"bar", sha256=b"baz", blake2s256=b"qux", ) fd.write(b"bar") fd.seek(0) content_with_data = content.with_data() fd.write(b"baz") fd.seek(0) assert content_with_data.data == b"bar" def test_with_data_cannot_read(self): with tempfile.NamedTemporaryFile(mode="w+b") as fd: content = DiskBackedContent( length=42, status="visible", path=fd.name, sha1=b"foo", sha1_git=b"bar", sha256=b"baz", blake2s256=b"qux", ) with pytest.raises(OSError): content.with_data() def test_missing_path(self): with pytest.raises(TypeError): DiskBackedContent( length=42, status="visible", sha1=b"foo", sha1_git=b"bar", sha256=b"baz", blake2s256=b"qux", ) with pytest.raises(TypeError): DiskBackedContent( length=42, status="visible", path=None, sha1=b"foo", sha1_git=b"bar", sha256=b"baz", blake2s256=b"qux", ) class DataMixin: maxDiff = None # type: ClassVar[Optional[int]] def setUp(self): self.tmpdir = tempfile.TemporaryDirectory(prefix="swh.model.from_disk") self.tmpdir_name = os.fsencode(self.tmpdir.name) self.contents = { b"file": { "data": b"42\n", "sha1": hash_to_bytes("34973274ccef6ab4dfaaf86599792fa9c3fe4689"), "sha256": hash_to_bytes( "084c799cd551dd1d8d5c5f9a5d593b2e" "931f5e36122ee5c793c1d08a19839cc0" ), "sha1_git": hash_to_bytes("d81cc0710eb6cf9efd5b920a8453e1e07157b6cd"), "blake2s256": hash_to_bytes( "d5fe1939576527e42cfd76a9455a2432" "fe7f56669564577dd93c4280e76d661d" ), "length": 3, "mode": 0o100644, }, } self.symlinks = { b"symlink": { "data": b"target", "blake2s256": hash_to_bytes( "595d221b30fdd8e10e2fdf18376e688e" "9f18d56fd9b6d1eb6a822f8c146c6da6" ), "sha1": hash_to_bytes("0e8a3ad980ec179856012b7eecf4327e99cd44cd"), "sha1_git": hash_to_bytes("1de565933b05f74c75ff9a6520af5f9f8a5a2f1d"), "sha256": hash_to_bytes( "34a04005bcaf206eec990bd9637d9fdb" "6725e0a0c0d4aebf003f17f4c956eb5c" ), "length": 6, "perms": DentryPerms.symlink, } } self.specials = { b"fifo": os.mkfifo, } self.empty_content = { "data": b"", "length": 0, "blake2s256": hash_to_bytes( "69217a3079908094e11121d042354a7c" "1f55b6482ca1a51e1b250dfd1ed0eef9" ), "sha1": hash_to_bytes("da39a3ee5e6b4b0d3255bfef95601890afd80709"), "sha1_git": hash_to_bytes("e69de29bb2d1d6434b8b29ae775ad8c2e48c5391"), "sha256": hash_to_bytes( "e3b0c44298fc1c149afbf4c8996fb924" "27ae41e4649b934ca495991b7852b855" ), "perms": DentryPerms.content, } self.empty_directory = { "id": hash_to_bytes("4b825dc642cb6eb9a060e54bf8d69288fbee4904"), "entries": [], } # Generated with generate_testdata_from_disk self.tarball_contents = { b"": { "entries": [ { "name": b"bar", "perms": DentryPerms.directory, "target": hash_to_bytes( "3c1f578394f4623f74a0ba7fe761729f59fc6ec4" ), "type": "dir", }, { "name": b"empty-folder", "perms": DentryPerms.directory, "target": hash_to_bytes( "4b825dc642cb6eb9a060e54bf8d69288fbee4904" ), "type": "dir", }, { "name": b"foo", "perms": DentryPerms.directory, "target": hash_to_bytes( "2b41c40f0d1fbffcba12497db71fba83fcca96e5" ), "type": "dir", }, { "name": b"link-to-another-quote", "perms": DentryPerms.symlink, "target": hash_to_bytes( "7d5c08111e21c8a9f71540939998551683375fad" ), "type": "file", }, { "name": b"link-to-binary", "perms": DentryPerms.symlink, "target": hash_to_bytes( "e86b45e538d9b6888c969c89fbd22a85aa0e0366" ), "type": "file", }, { "name": b"link-to-foo", "perms": DentryPerms.symlink, "target": hash_to_bytes( "19102815663d23f8b75a47e7a01965dcdc96468c" ), "type": "file", }, { "name": b"some-binary", "perms": DentryPerms.executable_content, "target": hash_to_bytes( "68769579c3eaadbe555379b9c3538e6628bae1eb" ), "type": "file", }, ], "id": hash_to_bytes("e8b0f1466af8608c8a3fb9879db172b887e80759"), }, b"bar": { "entries": [ { "name": b"barfoo", "perms": DentryPerms.directory, "target": hash_to_bytes( "c3020f6bf135a38c6df3afeb5fb38232c5e07087" ), "type": "dir", } ], "id": hash_to_bytes("3c1f578394f4623f74a0ba7fe761729f59fc6ec4"), }, b"bar/barfoo": { "entries": [ { "name": b"another-quote.org", "perms": DentryPerms.content, "target": hash_to_bytes( "133693b125bad2b4ac318535b84901ebb1f6b638" ), "type": "file", } ], "id": hash_to_bytes("c3020f6bf135a38c6df3afeb5fb38232c5e07087"), }, b"bar/barfoo/another-quote.org": { "blake2s256": hash_to_bytes( "d26c1cad82d43df0bffa5e7be11a60e3" "4adb85a218b433cbce5278b10b954fe8" ), "length": 72, "perms": DentryPerms.content, "sha1": hash_to_bytes("90a6138ba59915261e179948386aa1cc2aa9220a"), "sha1_git": hash_to_bytes("133693b125bad2b4ac318535b84901ebb1f6b638"), "sha256": hash_to_bytes( "3db5ae168055bcd93a4d08285dc99ffe" "e2883303b23fac5eab850273a8ea5546" ), }, b"empty-folder": { "entries": [], "id": hash_to_bytes("4b825dc642cb6eb9a060e54bf8d69288fbee4904"), }, b"foo": { "entries": [ { "name": b"barfoo", "perms": DentryPerms.symlink, "target": hash_to_bytes( "8185dfb2c0c2c597d16f75a8a0c37668567c3d7e" ), "type": "file", }, { "name": b"quotes.md", "perms": DentryPerms.content, "target": hash_to_bytes( "7c4c57ba9ff496ad179b8f65b1d286edbda34c9a" ), "type": "file", }, { "name": b"rel-link-to-barfoo", "perms": DentryPerms.symlink, "target": hash_to_bytes( "acac326ddd63b0bc70840659d4ac43619484e69f" ), "type": "file", }, ], "id": hash_to_bytes("2b41c40f0d1fbffcba12497db71fba83fcca96e5"), }, b"foo/barfoo": { "blake2s256": hash_to_bytes( "e1252f2caa4a72653c4efd9af871b62b" "f2abb7bb2f1b0e95969204bd8a70d4cd" ), "data": b"bar/barfoo", "length": 10, "perms": DentryPerms.symlink, "sha1": hash_to_bytes("9057ee6d0162506e01c4d9d5459a7add1fedac37"), "sha1_git": hash_to_bytes("8185dfb2c0c2c597d16f75a8a0c37668567c3d7e"), "sha256": hash_to_bytes( "29ad3f5725321b940332c78e403601af" "ff61daea85e9c80b4a7063b6887ead68" ), }, b"foo/quotes.md": { "blake2s256": hash_to_bytes( "bf7ce4fe304378651ee6348d3e9336ed" "5ad603d33e83c83ba4e14b46f9b8a80b" ), "length": 66, "perms": DentryPerms.content, "sha1": hash_to_bytes("1bf0bb721ac92c18a19b13c0eb3d741cbfadebfc"), "sha1_git": hash_to_bytes("7c4c57ba9ff496ad179b8f65b1d286edbda34c9a"), "sha256": hash_to_bytes( "caca942aeda7b308859eb56f909ec96d" "07a499491690c453f73b9800a93b1659" ), }, b"foo/rel-link-to-barfoo": { "blake2s256": hash_to_bytes( "d9c327421588a1cf61f316615005a2e9" "c13ac3a4e96d43a24138d718fa0e30db" ), "data": b"../bar/barfoo", "length": 13, "perms": DentryPerms.symlink, "sha1": hash_to_bytes("dc51221d308f3aeb2754db48391b85687c2869f4"), "sha1_git": hash_to_bytes("acac326ddd63b0bc70840659d4ac43619484e69f"), "sha256": hash_to_bytes( "8007d20db2af40435f42ddef4b8ad76b" "80adbec26b249fdf0473353f8d99df08" ), }, b"link-to-another-quote": { "blake2s256": hash_to_bytes( "2d0e73cea01ba949c1022dc10c8a43e6" "6180639662e5dc2737b843382f7b1910" ), "data": b"bar/barfoo/another-quote.org", "length": 28, "perms": DentryPerms.symlink, "sha1": hash_to_bytes("cbeed15e79599c90de7383f420fed7acb48ea171"), "sha1_git": hash_to_bytes("7d5c08111e21c8a9f71540939998551683375fad"), "sha256": hash_to_bytes( "e6e17d0793aa750a0440eb9ad5b80b25" "8076637ef0fb68f3ac2e59e4b9ac3ba6" ), }, b"link-to-binary": { "blake2s256": hash_to_bytes( "9ce18b1adecb33f891ca36664da676e1" "2c772cc193778aac9a137b8dc5834b9b" ), "data": b"some-binary", "length": 11, "perms": DentryPerms.symlink, "sha1": hash_to_bytes("d0248714948b3a48a25438232a6f99f0318f59f1"), "sha1_git": hash_to_bytes("e86b45e538d9b6888c969c89fbd22a85aa0e0366"), "sha256": hash_to_bytes( "14126e97d83f7d261c5a6889cee73619" "770ff09e40c5498685aba745be882eff" ), }, b"link-to-foo": { "blake2s256": hash_to_bytes( "08d6cad88075de8f192db097573d0e82" "9411cd91eb6ec65e8fc16c017edfdb74" ), "data": b"foo", "length": 3, "perms": DentryPerms.symlink, "sha1": hash_to_bytes("0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33"), "sha1_git": hash_to_bytes("19102815663d23f8b75a47e7a01965dcdc96468c"), "sha256": hash_to_bytes( "2c26b46b68ffc68ff99b453c1d304134" "13422d706483bfa0f98a5e886266e7ae" ), }, b"some-binary": { "blake2s256": hash_to_bytes( "922e0f7015035212495b090c27577357" "a740ddd77b0b9e0cd23b5480c07a18c6" ), "length": 5, "perms": DentryPerms.executable_content, "sha1": hash_to_bytes("0bbc12d7f4a2a15b143da84617d95cb223c9b23c"), "sha1_git": hash_to_bytes("68769579c3eaadbe555379b9c3538e6628bae1eb"), "sha256": hash_to_bytes( "bac650d34a7638bb0aeb5342646d24e3" "b9ad6b44c9b383621faa482b990a367d" ), }, } def tearDown(self): self.tmpdir.cleanup() def assertContentEqual(self, left, right, *, check_path=False): # noqa if not isinstance(left, Content): raise ValueError("%s is not a Content" % left) if isinstance(right, Content): right = right.get_data() # Compare dictionaries keys = DEFAULT_ALGORITHMS | { "length", "perms", } if check_path: keys |= {"path"} failed = [] for key in keys: try: lvalue = left.data[key] if key == "perms" and "perms" not in right: rvalue = from_disk.mode_to_perms(right["mode"]) else: rvalue = right[key] except KeyError: failed.append(key) continue if lvalue != rvalue: failed.append(key) if failed: raise self.failureException( "Content mismatched:\n" + "\n".join( "content[%s] = %r != %r" % (key, left.data.get(key), right.get(key)) for key in failed ) ) def assertDirectoryEqual(self, left, right): # NoQA if not isinstance(left, Directory): raise ValueError("%s is not a Directory" % left) if isinstance(right, Directory): right = right.get_data() assert left.entries == right["entries"] assert left.hash == right["id"] assert left.to_model() == model.Directory.from_dict(right) def make_contents(self, directory): for filename, content in self.contents.items(): path = os.path.join(directory, filename) with open(path, "wb") as f: f.write(content["data"]) os.chmod(path, content["mode"]) def make_symlinks(self, directory): for filename, symlink in self.symlinks.items(): path = os.path.join(directory, filename) os.symlink(symlink["data"], path) def make_specials(self, directory): for filename, fn in self.specials.items(): path = os.path.join(directory, filename) fn(path) def make_from_tarball(self, directory): tarball = os.path.join(TEST_DATA, "dir-folders", "sample-folder.tgz") with tarfile.open(tarball, "r:gz") as f: f.extractall(os.fsdecode(directory)) class TestContent(DataMixin, unittest.TestCase): def setUp(self): super().setUp() def test_data_to_content(self): for filename, content in self.contents.items(): conv_content = Content.from_bytes( mode=content["mode"], data=content["data"] ) self.assertContentEqual(conv_content, content) self.assertIn(hash_to_hex(conv_content.hash), repr(conv_content)) class SymlinkToContent(DataMixin, unittest.TestCase): def setUp(self): super().setUp() self.make_symlinks(self.tmpdir_name) def test_symlink_to_content(self): for filename, symlink in self.symlinks.items(): path = os.path.join(self.tmpdir_name, filename) perms = 0o120000 conv_content = Content.from_symlink(path=path, mode=perms) self.assertContentEqual(conv_content, symlink) def test_symlink_to_base_model(self): for filename, symlink in self.symlinks.items(): path = os.path.join(self.tmpdir_name, filename) perms = 0o120000 model_content = Content.from_symlink(path=path, mode=perms).to_model() right = symlink.copy() for key in ("perms", "path", "mode"): right.pop(key, None) right["status"] = "visible" assert model_content == model.Content.from_dict(right) class FileToContent(DataMixin, unittest.TestCase): def setUp(self): super().setUp() self.make_contents(self.tmpdir_name) self.make_symlinks(self.tmpdir_name) self.make_specials(self.tmpdir_name) def test_symlink_to_content(self): for filename, symlink in self.symlinks.items(): path = os.path.join(self.tmpdir_name, filename) conv_content = Content.from_file(path=path) self.assertContentEqual(conv_content, symlink) def test_file_to_content(self): for filename, content in self.contents.items(): path = os.path.join(self.tmpdir_name, filename) conv_content = Content.from_file(path=path) self.assertContentEqual(conv_content, content) def test_special_to_content(self): for filename in self.specials: path = os.path.join(self.tmpdir_name, filename) conv_content = Content.from_file(path=path) self.assertContentEqual(conv_content, self.empty_content) for path in ["/dev/null", "/dev/zero"]: path = os.path.join(self.tmpdir_name, filename) conv_content = Content.from_file(path=path) self.assertContentEqual(conv_content, self.empty_content) def test_symlink_to_content_model(self): for filename, symlink in self.symlinks.items(): path = os.path.join(self.tmpdir_name, filename) model_content = Content.from_file(path=path).to_model() right = symlink.copy() for key in ("perms", "path", "mode"): right.pop(key, None) right["status"] = "visible" assert model_content == model.Content.from_dict(right) def test_file_to_content_model(self): for filename, content in self.contents.items(): path = os.path.join(self.tmpdir_name, filename) model_content = Content.from_file(path=path).to_model() right = content.copy() for key in ("perms", "mode"): right.pop(key, None) assert model_content.with_data() == model.Content.from_dict(right) right["path"] = path del right["data"] assert model_content == DiskBackedContent.from_dict(right) def test_special_to_content_model(self): for filename in self.specials: path = os.path.join(self.tmpdir_name, filename) model_content = Content.from_file(path=path).to_model() right = self.empty_content.copy() for key in ("perms", "path", "mode"): right.pop(key, None) right["status"] = "visible" assert model_content == model.Content.from_dict(right) for path in ["/dev/null", "/dev/zero"]: model_content = Content.from_file(path=path).to_model() right = self.empty_content.copy() for key in ("perms", "path", "mode"): right.pop(key, None) right["status"] = "visible" assert model_content == model.Content.from_dict(right) def test_symlink_max_length(self): for max_content_length in [4, 10]: for filename, symlink in self.symlinks.items(): path = os.path.join(self.tmpdir_name, filename) content = Content.from_file(path=path) if content.data["length"] > max_content_length: with pytest.raises(Exception, match="too large"): Content.from_file( path=path, max_content_length=max_content_length ) else: limited_content = Content.from_file( path=path, max_content_length=max_content_length ) assert content == limited_content def test_file_max_length(self): for max_content_length in [2, 4]: for filename, content in self.contents.items(): path = os.path.join(self.tmpdir_name, filename) content = Content.from_file(path=path) limited_content = Content.from_file( path=path, max_content_length=max_content_length ) assert content.data["length"] == limited_content.data["length"] assert content.data["status"] == "visible" if content.data["length"] > max_content_length: assert limited_content.data["status"] == "absent" assert limited_content.data["reason"] == "Content too large" else: assert limited_content.data["status"] == "visible" def test_special_file_max_length(self): for max_content_length in [None, 0, 1]: for filename in self.specials: path = os.path.join(self.tmpdir_name, filename) content = Content.from_file(path=path) limited_content = Content.from_file( path=path, max_content_length=max_content_length ) assert limited_content == content def test_file_to_content_with_path(self): for filename, content in self.contents.items(): content_w_path = content.copy() path = os.path.join(self.tmpdir_name, filename) content_w_path["path"] = path conv_content = Content.from_file(path=path) self.assertContentEqual(conv_content, content_w_path, check_path=True) @pytest.mark.fs class DirectoryToObjects(DataMixin, unittest.TestCase): def setUp(self): super().setUp() contents = os.path.join(self.tmpdir_name, b"contents") os.mkdir(contents) self.make_contents(contents) symlinks = os.path.join(self.tmpdir_name, b"symlinks") os.mkdir(symlinks) self.make_symlinks(symlinks) specials = os.path.join(self.tmpdir_name, b"specials") os.mkdir(specials) self.make_specials(specials) empties = os.path.join(self.tmpdir_name, b"empty1", b"empty2") os.makedirs(empties) def test_directory_to_objects(self): directory = Directory.from_disk(path=self.tmpdir_name) for name, value in self.contents.items(): self.assertContentEqual(directory[b"contents/" + name], value) for name, value in self.symlinks.items(): self.assertContentEqual(directory[b"symlinks/" + name], value) for name in self.specials: self.assertContentEqual( directory[b"specials/" + name], self.empty_content, ) self.assertEqual( directory[b"empty1/empty2"].get_data(), self.empty_directory, ) # Raise on non existent file with self.assertRaisesRegex(KeyError, "b'nonexistent'"): directory[b"empty1/nonexistent"] # Raise on non existent directory with self.assertRaisesRegex(KeyError, "b'nonexistentdir'"): directory[b"nonexistentdir/file"] objs = directory.collect() self.assertCountEqual(["content", "directory"], objs) self.assertEqual(len(objs["directory"]), 6) self.assertEqual( len(objs["content"]), len(self.contents) + len(self.symlinks) + 1 ) def test_directory_to_objects_ignore_empty(self): directory = Directory.from_disk( path=self.tmpdir_name, dir_filter=from_disk.ignore_empty_directories ) for name, value in self.contents.items(): self.assertContentEqual(directory[b"contents/" + name], value) for name, value in self.symlinks.items(): self.assertContentEqual(directory[b"symlinks/" + name], value) for name in self.specials: self.assertContentEqual( directory[b"specials/" + name], self.empty_content, ) # empty directories have been ignored recursively with self.assertRaisesRegex(KeyError, "b'empty1'"): directory[b"empty1"] with self.assertRaisesRegex(KeyError, "b'empty1'"): directory[b"empty1/empty2"] objs = directory.collect() self.assertCountEqual(["content", "directory"], objs) self.assertEqual(len(objs["directory"]), 4) self.assertEqual( len(objs["content"]), len(self.contents) + len(self.symlinks) + 1 ) def test_directory_to_objects_ignore_name(self): directory = Directory.from_disk( path=self.tmpdir_name, dir_filter=from_disk.ignore_named_directories([b"symlinks"]), ) for name, value in self.contents.items(): self.assertContentEqual(directory[b"contents/" + name], value) for name in self.specials: self.assertContentEqual( directory[b"specials/" + name], self.empty_content, ) self.assertEqual( directory[b"empty1/empty2"].get_data(), self.empty_directory, ) with self.assertRaisesRegex(KeyError, "b'symlinks'"): directory[b"symlinks"] objs = directory.collect() self.assertCountEqual(["content", "directory"], objs) self.assertEqual(len(objs["directory"]), 5) self.assertEqual(len(objs["content"]), len(self.contents) + 1) def test_directory_to_objects_ignore_name_case(self): directory = Directory.from_disk( path=self.tmpdir_name, dir_filter=from_disk.ignore_named_directories( [b"symLiNks"], case_sensitive=False ), ) for name, value in self.contents.items(): self.assertContentEqual(directory[b"contents/" + name], value) for name in self.specials: self.assertContentEqual( directory[b"specials/" + name], self.empty_content, ) self.assertEqual( directory[b"empty1/empty2"].get_data(), self.empty_directory, ) with self.assertRaisesRegex(KeyError, "b'symlinks'"): directory[b"symlinks"] objs = directory.collect() self.assertCountEqual(["content", "directory"], objs) self.assertEqual(len(objs["directory"]), 5) self.assertEqual(len(objs["content"]), len(self.contents) + 1) def test_directory_entry_order(self): with tempfile.TemporaryDirectory() as dirname: dirname = os.fsencode(dirname) open(os.path.join(dirname, b"foo."), "a") open(os.path.join(dirname, b"foo0"), "a") os.mkdir(os.path.join(dirname, b"foo")) directory = Directory.from_disk(path=dirname) assert [entry["name"] for entry in directory.entries] == [ b"foo.", b"foo", b"foo0", ] @pytest.mark.fs class TarballTest(DataMixin, unittest.TestCase): def setUp(self): super().setUp() self.make_from_tarball(self.tmpdir_name) def test_contents_match(self): directory = Directory.from_disk( path=os.path.join(self.tmpdir_name, b"sample-folder") ) for name, expected in self.tarball_contents.items(): obj = directory[name] if isinstance(obj, Content): self.assertContentEqual(obj, expected) elif isinstance(obj, Directory): self.assertDirectoryEqual(obj, expected) else: raise self.failureException("Unknown type for %s" % obj) class TarballIterDirectory(DataMixin, unittest.TestCase): def setUp(self): super().setUp() self.make_from_tarball(self.tmpdir_name) def test_iter_directory(self): """Iter from_disk.directory should yield the full arborescence tree """ directory = Directory.from_disk( path=os.path.join(self.tmpdir_name, b"sample-folder") ) contents, skipped_contents, directories = from_disk.iter_directory(directory) expected_nb = defaultdict(int) for name in self.tarball_contents.keys(): obj = directory[name] expected_nb[obj.object_type] += 1 assert len(contents) == expected_nb["content"] and len(contents) > 0 assert len(skipped_contents) == 0 assert len(directories) == expected_nb["directory"] and len(directories) > 0 class DirectoryManipulation(DataMixin, unittest.TestCase): def test_directory_access_nested(self): d = Directory() d[b"a"] = Directory() d[b"a/b"] = Directory() self.assertEqual(d[b"a/b"].get_data(), self.empty_directory) def test_directory_del_nested(self): d = Directory() d[b"a"] = Directory() d[b"a/b"] = Directory() with self.assertRaisesRegex(KeyError, "b'c'"): del d[b"a/b/c"] with self.assertRaisesRegex(KeyError, "b'level2'"): del d[b"a/level2/c"] del d[b"a/b"] self.assertEqual(d[b"a"].get_data(), self.empty_directory) def test_directory_access_self(self): d = Directory() self.assertIs(d, d[b""]) self.assertIs(d, d[b"/"]) self.assertIs(d, d[b"//"]) def test_directory_access_wrong_type(self): d = Directory() with self.assertRaisesRegex(ValueError, "bytes from Directory"): d["foo"] with self.assertRaisesRegex(ValueError, "bytes from Directory"): d[42] def test_directory_repr(self): entries = [b"a", b"b", b"c"] d = Directory() for entry in entries: d[entry] = Directory() r = repr(d) self.assertIn(hash_to_hex(d.hash), r) for entry in entries: self.assertIn(str(entry), r) def test_directory_set_wrong_type_name(self): d = Directory() with self.assertRaisesRegex(ValueError, "bytes Directory entry"): d["foo"] = Directory() with self.assertRaisesRegex(ValueError, "bytes Directory entry"): d[42] = Directory() def test_directory_set_nul_in_name(self): d = Directory() with self.assertRaisesRegex(ValueError, "nul bytes"): d[b"\x00\x01"] = Directory() def test_directory_set_empty_name(self): d = Directory() with self.assertRaisesRegex(ValueError, "must have a name"): d[b""] = Directory() with self.assertRaisesRegex(ValueError, "must have a name"): d[b"/"] = Directory() def test_directory_set_wrong_type(self): d = Directory() with self.assertRaisesRegex(ValueError, "Content or Directory"): d[b"entry"] = object() def test_directory_del_wrong_type(self): d = Directory() with self.assertRaisesRegex(ValueError, "bytes Directory entry"): del d["foo"] with self.assertRaisesRegex(ValueError, "bytes Directory entry"): del d[42] diff --git a/swh/model/tests/test_generate_testdata.py b/swh/model/tests/test_generate_testdata.py index aa9c8af..6ed2e63 100644 --- a/swh/model/tests/test_generate_testdata.py +++ b/swh/model/tests/test_generate_testdata.py @@ -1,54 +1,54 @@ # Copyright (C) 2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information -from .generate_testdata import gen_contents, gen_origins, ORIGINS +from swh.model.model import BaseContent, Origin -from swh.model.model import Origin, BaseContent +from .generate_testdata import ORIGINS, gen_contents, gen_origins def test_gen_origins_empty(): origins = gen_origins(0) assert not origins def test_gen_origins_one(): origins = gen_origins(1) assert len(origins) == 1 assert [Origin.from_dict(d) for d in origins] def test_gen_origins_default(): origins = gen_origins() assert len(origins) == 100 models = [Origin.from_dict(d).url for d in origins] assert len(origins) == len(set(models)) def test_gen_origins_max(): nmax = len(ORIGINS) origins = gen_origins(nmax + 1) assert len(origins) == nmax models = {Origin.from_dict(d).url for d in origins} # ensure we did not generate the same origin twice assert len(origins) == len(models) def test_gen_contents_empty(): contents = gen_contents(0) assert not contents def test_gen_contents_one(): contents = gen_contents(1) assert len(contents) == 1 assert [BaseContent.from_dict(d) for d in contents] def test_gen_contents_default(): contents = gen_contents() assert len(contents) == 20 models = {BaseContent.from_dict(d) for d in contents} # ensure we did not generate the same content twice assert len(contents) == len(models) diff --git a/swh/model/tests/test_hypothesis_strategies.py b/swh/model/tests/test_hypothesis_strategies.py index e1ab9b7..c93b24b 100644 --- a/swh/model/tests/test_hypothesis_strategies.py +++ b/swh/model/tests/test_hypothesis_strategies.py @@ -1,210 +1,209 @@ # Copyright (C) 2019-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime import attr -import iso8601 from hypothesis import given, settings +import iso8601 from swh.model.hashutil import DEFAULT_ALGORITHMS from swh.model.hypothesis_strategies import ( aware_datetimes, - objects, - object_dicts, contents, - skipped_contents, - snapshots, + object_dicts, + objects, origin_visits, persons, + skipped_contents, + snapshots, ) from swh.model.model import TargetType - target_types = ("content", "directory", "revision", "release", "snapshot", "alias") all_but_skipped_content = ( "origin", "origin_visit", "origin_visit_status", "snapshot", "release", "revision", "directory", "content", ) @given(objects(blacklist_types=())) def test_generation(obj_type_and_obj): (obj_type, object_) = obj_type_and_obj attr.validate(object_) @given(objects(split_content=False)) def test_generation_merged_content(obj_type_and_obj): # we should never generate a "skipped_content" here assert obj_type_and_obj[0] != "skipped_content" @given(objects(split_content=True, blacklist_types=all_but_skipped_content)) def test_generation_split_content(obj_type_and_obj): # we should only generate "skipped_content" assert obj_type_and_obj[0] == "skipped_content" @given(objects(blacklist_types=("origin_visit", "directory"))) def test_generation_blacklist(obj_type_and_obj): assert obj_type_and_obj[0] not in ("origin_visit", "directory") def assert_nested_dict(obj): """Tests the object is a nested dict and contains no more class from swh.model.model.""" if isinstance(obj, dict): for (key, value) in obj.items(): assert isinstance(key, (str, bytes)), key assert_nested_dict(value) elif isinstance(obj, tuple): for value in obj: assert_nested_dict(value) elif isinstance(obj, (int, float, str, bytes, bool, type(None), datetime.datetime)): pass else: assert False, obj @given(object_dicts(blacklist_types=())) def test_dicts_generation(obj_type_and_obj): (obj_type, object_) = obj_type_and_obj assert_nested_dict(object_) if obj_type == "content": COMMON_KEYS = set(DEFAULT_ALGORITHMS) | {"length", "status", "ctime"} if object_["status"] == "visible": assert set(object_) <= COMMON_KEYS | {"data"} elif object_["status"] == "absent": assert set(object_) == COMMON_KEYS | {"reason"} elif object_["status"] == "hidden": assert set(object_) <= COMMON_KEYS | {"data"} else: assert False, object_ elif obj_type == "release": assert object_["target_type"] in target_types elif obj_type == "snapshot": for branch in object_["branches"].values(): assert branch is None or branch["target_type"] in target_types @given(aware_datetimes()) def test_datetimes(dt): # Checks this doesn't raise an error, eg. about seconds in the TZ offset iso8601.parse_date(dt.isoformat()) assert dt.tzinfo is not None @given(object_dicts(split_content=False)) def test_dicts_generation_merged_content(obj_type_and_obj): # we should never generate a "skipped_content" here assert obj_type_and_obj[0] != "skipped_content" @given(object_dicts(split_content=True, blacklist_types=all_but_skipped_content)) def test_dicts_generation_split_content(obj_type_and_obj): # we should only generate "skipped_content" assert obj_type_and_obj[0] == "skipped_content" @given(object_dicts(blacklist_types=("release", "content"))) def test_dicts_generation_blacklist(obj_type_and_obj): assert obj_type_and_obj[0] not in ("release", "content") @given(objects()) def test_model_to_dicts(obj_type_and_obj): _, object_ = obj_type_and_obj object_type = object_.object_type obj_dict = object_.to_dict() assert_nested_dict(obj_dict) if object_type in ("content", "skipped_content"): COMMON_KEYS = set(DEFAULT_ALGORITHMS) | {"length", "status"} if object_.ctime is not None: COMMON_KEYS |= {"ctime"} if obj_dict["status"] == "visible": assert set(obj_dict) == COMMON_KEYS | {"data"} elif obj_dict["status"] == "absent": assert set(obj_dict) == COMMON_KEYS | {"reason"} elif obj_dict["status"] == "hidden": assert set(obj_dict) == COMMON_KEYS | {"data"} else: assert False, obj_dict elif object_type == "release": assert obj_dict["target_type"] in target_types elif object_type == "snapshot": for branch in obj_dict["branches"].values(): assert branch is None or branch["target_type"] in target_types @given(contents()) def test_content_aware_datetime(cont): assert cont.ctime is None or cont.ctime.tzinfo is not None @given(skipped_contents()) def test_skipped_content_aware_datetime(cont): assert cont.ctime is None or cont.ctime.tzinfo is not None _min_snp_size = 10 _max_snp_size = 100 @given(snapshots(min_size=_min_snp_size, max_size=_max_snp_size)) @settings(max_examples=1) def test_snapshots_strategy(snapshot): branches = snapshot.branches assert len(branches) >= _min_snp_size assert len(branches) <= _max_snp_size aliases = [] # check snapshot integrity for name, branch in branches.items(): assert branch is None or branch.target_type.value in target_types if branch is not None and branch.target_type == TargetType.ALIAS: aliases.append(name) assert branch.target in branches # check no cycles between aliases for alias in aliases: processed_alias = set() current_alias = alias while ( branches[current_alias] is not None and branches[current_alias].target_type == TargetType.ALIAS ): assert branches[current_alias].target not in processed_alias processed_alias.add(current_alias) current_alias = branches[current_alias].target @given(snapshots(min_size=_min_snp_size, max_size=_min_snp_size)) @settings(max_examples=1) def test_snapshots_strategy_fixed_size(snapshot): assert len(snapshot.branches) == _min_snp_size @given(origin_visits()) def test_origin_visit_aware_datetime(visit): assert visit.date.tzinfo is not None @given(persons()) def test_person_do_not_look_like_anonimized(person): assert not ( len(person.fullname) == 32 and person.name is None and person.email is None ) diff --git a/swh/model/tests/test_identifiers.py b/swh/model/tests/test_identifiers.py index 5acbd2d..3741b70 100644 --- a/swh/model/tests/test_identifiers.py +++ b/swh/model/tests/test_identifiers.py @@ -1,1076 +1,1077 @@ # Copyright (C) 2015-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import binascii import datetime -import pytest import unittest +import pytest + from swh.model import hashutil, identifiers from swh.model.exceptions import ValidationError from swh.model.hashutil import hash_to_bytes as _x from swh.model.identifiers import ( CONTENT, DIRECTORY, RELEASE, REVISION, SNAPSHOT, SWHID, normalize_timestamp, ) class UtilityFunctionsIdentifier(unittest.TestCase): def setUp(self): self.str_id = "c2e41aae41ac17bd4a650770d6ee77f62e52235b" self.bytes_id = binascii.unhexlify(self.str_id) self.bad_type_id = object() def test_identifier_to_bytes(self): for id in [self.str_id, self.bytes_id]: self.assertEqual(identifiers.identifier_to_bytes(id), self.bytes_id) # wrong length with self.assertRaises(ValueError) as cm: identifiers.identifier_to_bytes(id[:-2]) self.assertIn("length", str(cm.exception)) with self.assertRaises(ValueError) as cm: identifiers.identifier_to_bytes(self.bad_type_id) self.assertIn("type", str(cm.exception)) def test_identifier_to_str(self): for id in [self.str_id, self.bytes_id]: self.assertEqual(identifiers.identifier_to_str(id), self.str_id) # wrong length with self.assertRaises(ValueError) as cm: identifiers.identifier_to_str(id[:-2]) self.assertIn("length", str(cm.exception)) with self.assertRaises(ValueError) as cm: identifiers.identifier_to_str(self.bad_type_id) self.assertIn("type", str(cm.exception)) class UtilityFunctionsDateOffset(unittest.TestCase): def setUp(self): self.dates = { b"1448210036": {"seconds": 1448210036, "microseconds": 0,}, b"1448210036.002342": {"seconds": 1448210036, "microseconds": 2342,}, b"1448210036.12": {"seconds": 1448210036, "microseconds": 120000,}, } self.broken_dates = [ 1448210036.12, ] self.offsets = { 0: b"+0000", -630: b"-1030", 800: b"+1320", } def test_format_date(self): for date_repr, date in self.dates.items(): self.assertEqual(identifiers.format_date(date), date_repr) def test_format_date_fail(self): for date in self.broken_dates: with self.assertRaises(ValueError): identifiers.format_date(date) def test_format_offset(self): for offset, res in self.offsets.items(): self.assertEqual(identifiers.format_offset(offset), res) class ContentIdentifier(unittest.TestCase): def setUp(self): self.content = { "status": "visible", "length": 5, "data": b"1984\n", "ctime": datetime.datetime( 2015, 11, 22, 16, 33, 56, tzinfo=datetime.timezone.utc ), } self.content_id = hashutil.MultiHash.from_data(self.content["data"]).digest() def test_content_identifier(self): self.assertEqual(identifiers.content_identifier(self.content), self.content_id) directory_example = { "id": "d7ed3d2c31d608823be58b1cbe57605310615231", "entries": [ { "type": "file", "perms": 33188, "name": b"README", "target": _x("37ec8ea2110c0b7a32fbb0e872f6e7debbf95e21"), }, { "type": "file", "perms": 33188, "name": b"Rakefile", "target": _x("3bb0e8592a41ae3185ee32266c860714980dbed7"), }, { "type": "dir", "perms": 16384, "name": b"app", "target": _x("61e6e867f5d7ba3b40540869bc050b0c4fed9e95"), }, { "type": "file", "perms": 33188, "name": b"1.megabyte", "target": _x("7c2b2fbdd57d6765cdc9d84c2d7d333f11be7fb3"), }, { "type": "dir", "perms": 16384, "name": b"config", "target": _x("591dfe784a2e9ccc63aaba1cb68a765734310d98"), }, { "type": "dir", "perms": 16384, "name": b"public", "target": _x("9588bf4522c2b4648bfd1c61d175d1f88c1ad4a5"), }, { "type": "file", "perms": 33188, "name": b"development.sqlite3", "target": _x("e69de29bb2d1d6434b8b29ae775ad8c2e48c5391"), }, { "type": "dir", "perms": 16384, "name": b"doc", "target": _x("154705c6aa1c8ead8c99c7915373e3c44012057f"), }, { "type": "dir", "perms": 16384, "name": b"db", "target": _x("85f157bdc39356b7bc7de9d0099b4ced8b3b382c"), }, { "type": "dir", "perms": 16384, "name": b"log", "target": _x("5e3d3941c51cce73352dff89c805a304ba96fffe"), }, { "type": "dir", "perms": 16384, "name": b"script", "target": _x("1b278423caf176da3f3533592012502aa10f566c"), }, { "type": "dir", "perms": 16384, "name": b"test", "target": _x("035f0437c080bfd8711670b3e8677e686c69c763"), }, { "type": "dir", "perms": 16384, "name": b"vendor", "target": _x("7c0dc9ad978c1af3f9a4ce061e50f5918bd27138"), }, { "type": "rev", "perms": 57344, "name": b"will_paginate", "target": _x("3d531e169db92a16a9a8974f0ae6edf52e52659e"), }, # in git order, the dir named "order" should be between the files # named "order." and "order0" { "type": "dir", "perms": 16384, "name": b"order", "target": _x("62cdb7020ff920e5aa642c3d4066950dd1f01f4d"), }, { "type": "file", "perms": 16384, "name": b"order.", "target": _x("0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33"), }, { "type": "file", "perms": 16384, "name": b"order0", "target": _x("bbe960a25ea311d21d40669e93df2003ba9b90a2"), }, ], } class DirectoryIdentifier(unittest.TestCase): def setUp(self): self.directory = directory_example self.empty_directory = { "id": "4b825dc642cb6eb9a060e54bf8d69288fbee4904", "entries": [], } def test_dir_identifier(self): self.assertEqual( identifiers.directory_identifier(self.directory), self.directory["id"] ) def test_dir_identifier_entry_order(self): # Reverse order of entries, check the id is still the same. directory = {"entries": reversed(self.directory["entries"])} self.assertEqual( identifiers.directory_identifier(directory), self.directory["id"] ) def test_dir_identifier_empty_directory(self): self.assertEqual( identifiers.directory_identifier(self.empty_directory), self.empty_directory["id"], ) linus_tz = datetime.timezone(datetime.timedelta(minutes=-420)) revision_example = { "id": "bc0195aad0daa2ad5b0d76cce22b167bc3435590", "directory": _x("85a74718d377195e1efd0843ba4f3260bad4fe07"), "parents": [_x("01e2d0627a9a6edb24c37db45db5ecb31e9de808")], "author": { "name": b"Linus Torvalds", "email": b"torvalds@linux-foundation.org", "fullname": b"Linus Torvalds ", }, "date": datetime.datetime(2015, 7, 12, 15, 10, 30, tzinfo=linus_tz), "committer": { "name": b"Linus Torvalds", "email": b"torvalds@linux-foundation.org", "fullname": b"Linus Torvalds ", }, "committer_date": datetime.datetime(2015, 7, 12, 15, 10, 30, tzinfo=linus_tz), "message": b"Linux 4.2-rc2\n", "type": "git", "synthetic": False, } class RevisionIdentifier(unittest.TestCase): def setUp(self): gpgsig = b"""\ -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.13 (Darwin) iQIcBAABAgAGBQJVJcYsAAoJEBiY3kIkQRNJVAUQAJ8/XQIfMqqC5oYeEFfHOPYZ L7qy46bXHVBa9Qd8zAJ2Dou3IbI2ZoF6/Et89K/UggOycMlt5FKV/9toWyuZv4Po L682wonoxX99qvVTHo6+wtnmYO7+G0f82h+qHMErxjP+I6gzRNBvRr+SfY7VlGdK wikMKOMWC5smrScSHITnOq1Ews5pe3N7qDYMzK0XVZmgDoaem4RSWMJs4My/qVLN e0CqYWq2A22GX7sXl6pjneJYQvcAXUX+CAzp24QnPSb+Q22Guj91TcxLFcHCTDdn qgqMsEyMiisoglwrCbO+D+1xq9mjN9tNFWP66SQ48mrrHYTBV5sz9eJyDfroJaLP CWgbDTgq6GzRMehHT3hXfYS5NNatjnhkNISXR7pnVP/obIi/vpWh5ll6Gd8q26z+ a/O41UzOaLTeNI365MWT4/cnXohVLRG7iVJbAbCxoQmEgsYMRc/pBAzWJtLfcB2G jdTswYL6+MUdL8sB9pZ82D+BP/YAdHe69CyTu1lk9RT2pYtI/kkfjHubXBCYEJSG +VGllBbYG6idQJpyrOYNRJyrDi9yvDJ2W+S0iQrlZrxzGBVGTB/y65S8C+2WTBcE lf1Qb5GDsQrZWgD+jtWTywOYHtCBwyCKSAXxSARMbNPeak9WPlcW/Jmu+fUcMe2x dg1KdHOa34shrKDaOVzW =od6m -----END PGP SIGNATURE-----""" self.revision = revision_example self.revision_none_metadata = { "id": "bc0195aad0daa2ad5b0d76cce22b167bc3435590", "directory": _x("85a74718d377195e1efd0843ba4f3260bad4fe07"), "parents": [_x("01e2d0627a9a6edb24c37db45db5ecb31e9de808")], "author": { "name": b"Linus Torvalds", "email": b"torvalds@linux-foundation.org", }, "date": datetime.datetime(2015, 7, 12, 15, 10, 30, tzinfo=linus_tz), "committer": { "name": b"Linus Torvalds", "email": b"torvalds@linux-foundation.org", }, "committer_date": datetime.datetime( 2015, 7, 12, 15, 10, 30, tzinfo=linus_tz ), "message": b"Linux 4.2-rc2\n", "metadata": None, } self.synthetic_revision = { "id": b"\xb2\xa7\xe1&\x04\x92\xe3D\xfa\xb3\xcb\xf9\x1b\xc1<\x91" b"\xe0T&\xfd", "author": { "name": b"Software Heritage", "email": b"robot@softwareheritage.org", }, "date": { "timestamp": {"seconds": 1437047495}, "offset": 0, "negative_utc": False, }, "type": "tar", "committer": { "name": b"Software Heritage", "email": b"robot@softwareheritage.org", }, "committer_date": 1437047495, "synthetic": True, "parents": [None], "message": b"synthetic revision message\n", "directory": b"\xd1\x1f\x00\xa6\xa0\xfe\xa6\x05SA\xd2U\x84\xb5\xa9" b"e\x16\xc0\xd2\xb8", "metadata": { "original_artifact": [ { "archive_type": "tar", "name": "gcc-5.2.0.tar.bz2", "sha1_git": "39d281aff934d44b439730057e55b055e206a586", "sha1": "fe3f5390949d47054b613edc36c557eb1d51c18e", "sha256": "5f835b04b5f7dd4f4d2dc96190ec1621b8d89f" "2dc6f638f9f8bc1b1014ba8cad", } ] }, } # cat commit.txt | git hash-object -t commit --stdin self.revision_with_extra_headers = { "id": "010d34f384fa99d047cdd5e2f41e56e5c2feee45", "directory": _x("85a74718d377195e1efd0843ba4f3260bad4fe07"), "parents": [_x("01e2d0627a9a6edb24c37db45db5ecb31e9de808")], "author": { "name": b"Linus Torvalds", "email": b"torvalds@linux-foundation.org", "fullname": b"Linus Torvalds ", }, "date": datetime.datetime(2015, 7, 12, 15, 10, 30, tzinfo=linus_tz), "committer": { "name": b"Linus Torvalds", "email": b"torvalds@linux-foundation.org", "fullname": b"Linus Torvalds ", }, "committer_date": datetime.datetime( 2015, 7, 12, 15, 10, 30, tzinfo=linus_tz ), "message": b"Linux 4.2-rc2\n", "extra_headers": ( (b"svn-repo-uuid", b"046f1af7-66c2-d61b-5410-ce57b7db7bff"), (b"svn-revision", b"10"), ), } self.revision_with_gpgsig = { "id": "44cc742a8ca17b9c279be4cc195a93a6ef7a320e", "directory": _x("b134f9b7dc434f593c0bab696345548b37de0558"), "parents": [ _x("689664ae944b4692724f13b709a4e4de28b54e57"), _x("c888305e1efbaa252d01b4e5e6b778f865a97514"), ], "author": { "name": b"Jiang Xin", "email": b"worldhello.net@gmail.com", "fullname": b"Jiang Xin ", }, "date": {"timestamp": 1428538899, "offset": 480,}, "committer": {"name": b"Jiang Xin", "email": b"worldhello.net@gmail.com",}, "committer_date": {"timestamp": 1428538899, "offset": 480,}, "extra_headers": ((b"gpgsig", gpgsig),), "message": b"""Merge branch 'master' of git://github.com/alexhenrie/git-po * 'master' of git://github.com/alexhenrie/git-po: l10n: ca.po: update translation """, } self.revision_no_message = { "id": "4cfc623c9238fa92c832beed000ce2d003fd8333", "directory": _x("b134f9b7dc434f593c0bab696345548b37de0558"), "parents": [ _x("689664ae944b4692724f13b709a4e4de28b54e57"), _x("c888305e1efbaa252d01b4e5e6b778f865a97514"), ], "author": { "name": b"Jiang Xin", "email": b"worldhello.net@gmail.com", "fullname": b"Jiang Xin ", }, "date": {"timestamp": 1428538899, "offset": 480,}, "committer": {"name": b"Jiang Xin", "email": b"worldhello.net@gmail.com",}, "committer_date": {"timestamp": 1428538899, "offset": 480,}, "message": None, } self.revision_empty_message = { "id": "7442cd78bd3b4966921d6a7f7447417b7acb15eb", "directory": _x("b134f9b7dc434f593c0bab696345548b37de0558"), "parents": [ _x("689664ae944b4692724f13b709a4e4de28b54e57"), _x("c888305e1efbaa252d01b4e5e6b778f865a97514"), ], "author": { "name": b"Jiang Xin", "email": b"worldhello.net@gmail.com", "fullname": b"Jiang Xin ", }, "date": {"timestamp": 1428538899, "offset": 480,}, "committer": {"name": b"Jiang Xin", "email": b"worldhello.net@gmail.com",}, "committer_date": {"timestamp": 1428538899, "offset": 480,}, "message": b"", } self.revision_only_fullname = { "id": "010d34f384fa99d047cdd5e2f41e56e5c2feee45", "directory": _x("85a74718d377195e1efd0843ba4f3260bad4fe07"), "parents": [_x("01e2d0627a9a6edb24c37db45db5ecb31e9de808")], "author": {"fullname": b"Linus Torvalds ",}, "date": datetime.datetime(2015, 7, 12, 15, 10, 30, tzinfo=linus_tz), "committer": { "fullname": b"Linus Torvalds ", }, "committer_date": datetime.datetime( 2015, 7, 12, 15, 10, 30, tzinfo=linus_tz ), "message": b"Linux 4.2-rc2\n", "extra_headers": ( (b"svn-repo-uuid", b"046f1af7-66c2-d61b-5410-ce57b7db7bff"), (b"svn-revision", b"10"), ), } def test_revision_identifier(self): self.assertEqual( identifiers.revision_identifier(self.revision), identifiers.identifier_to_str(self.revision["id"]), ) def test_revision_identifier_none_metadata(self): self.assertEqual( identifiers.revision_identifier(self.revision_none_metadata), identifiers.identifier_to_str(self.revision_none_metadata["id"]), ) def test_revision_identifier_synthetic(self): self.assertEqual( identifiers.revision_identifier(self.synthetic_revision), identifiers.identifier_to_str(self.synthetic_revision["id"]), ) def test_revision_identifier_with_extra_headers(self): self.assertEqual( identifiers.revision_identifier(self.revision_with_extra_headers), identifiers.identifier_to_str(self.revision_with_extra_headers["id"]), ) def test_revision_identifier_with_gpgsig(self): self.assertEqual( identifiers.revision_identifier(self.revision_with_gpgsig), identifiers.identifier_to_str(self.revision_with_gpgsig["id"]), ) def test_revision_identifier_no_message(self): self.assertEqual( identifiers.revision_identifier(self.revision_no_message), identifiers.identifier_to_str(self.revision_no_message["id"]), ) def test_revision_identifier_empty_message(self): self.assertEqual( identifiers.revision_identifier(self.revision_empty_message), identifiers.identifier_to_str(self.revision_empty_message["id"]), ) def test_revision_identifier_only_fullname(self): self.assertEqual( identifiers.revision_identifier(self.revision_only_fullname), identifiers.identifier_to_str(self.revision_only_fullname["id"]), ) release_example = { "id": "2b10839e32c4c476e9d94492756bb1a3e1ec4aa8", "target": b't\x1b"R\xa5\xe1Ml`\xa9\x13\xc7z`\x99\xab\xe7:\x85J', "target_type": "revision", "name": b"v2.6.14", "author": { "name": b"Linus Torvalds", "email": b"torvalds@g5.osdl.org", "fullname": b"Linus Torvalds ", }, "date": datetime.datetime(2005, 10, 27, 17, 2, 33, tzinfo=linus_tz), "message": b"""\ Linux 2.6.14 release -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.1 (GNU/Linux) iD8DBQBDYWq6F3YsRnbiHLsRAmaeAJ9RCez0y8rOBbhSv344h86l/VVcugCeIhO1 wdLOnvj91G4wxYqrvThthbE= =7VeT -----END PGP SIGNATURE----- """, "synthetic": False, } class ReleaseIdentifier(unittest.TestCase): def setUp(self): linus_tz = datetime.timezone(datetime.timedelta(minutes=-420)) self.release = release_example self.release_no_author = { "id": b"&y\x1a\x8b\xcf\x0em3\xf4:\xefv\x82\xbd\xb5U#mV\xde", "target": "9ee1c939d1cb936b1f98e8d81aeffab57bae46ab", "target_type": "revision", "name": b"v2.6.12", "message": b"""\ This is the final 2.6.12 release -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.2.4 (GNU/Linux) iD8DBQBCsykyF3YsRnbiHLsRAvPNAJ482tCZwuxp/bJRz7Q98MHlN83TpACdHr37 o6X/3T+vm8K3bf3driRr34c= =sBHn -----END PGP SIGNATURE----- """, "synthetic": False, } self.release_no_message = { "id": "b6f4f446715f7d9543ef54e41b62982f0db40045", "target": "9ee1c939d1cb936b1f98e8d81aeffab57bae46ab", "target_type": "revision", "name": b"v2.6.12", "author": {"name": b"Linus Torvalds", "email": b"torvalds@g5.osdl.org",}, "date": datetime.datetime(2005, 10, 27, 17, 2, 33, tzinfo=linus_tz), "message": None, } self.release_empty_message = { "id": "71a0aea72444d396575dc25ac37fec87ee3c6492", "target": "9ee1c939d1cb936b1f98e8d81aeffab57bae46ab", "target_type": "revision", "name": b"v2.6.12", "author": {"name": b"Linus Torvalds", "email": b"torvalds@g5.osdl.org",}, "date": datetime.datetime(2005, 10, 27, 17, 2, 33, tzinfo=linus_tz), "message": b"", } self.release_negative_utc = { "id": "97c8d2573a001f88e72d75f596cf86b12b82fd01", "name": b"20081029", "target": "54e9abca4c77421e2921f5f156c9fe4a9f7441c7", "target_type": "revision", "date": { "timestamp": {"seconds": 1225281976}, "offset": 0, "negative_utc": True, }, "author": { "name": b"Otavio Salvador", "email": b"otavio@debian.org", "id": 17640, }, "synthetic": False, "message": b"tagging version 20081029\n\nr56558\n", } self.release_newline_in_author = { "author": { "email": b"esycat@gmail.com", "fullname": b"Eugene Janusov\n", "name": b"Eugene Janusov\n", }, "date": { "negative_utc": None, "offset": 600, "timestamp": {"microseconds": 0, "seconds": 1377480558,}, }, "id": b"\\\x98\xf5Y\xd04\x16-\xe2->\xbe\xb9T3\xe6\xf8\x88R1", "message": b"Release of v0.3.2.", "name": b"0.3.2", "synthetic": False, "target": (b"\xc0j\xa3\xd9;x\xa2\x86\\I5\x17" b"\x000\xf8\xc2\xd79o\xd3"), "target_type": "revision", } self.release_snapshot_target = dict(self.release) self.release_snapshot_target["target_type"] = "snapshot" self.release_snapshot_target["id"] = "c29c3ddcc6769a04e54dd69d63a6fdcbc566f850" def test_release_identifier(self): self.assertEqual( identifiers.release_identifier(self.release), identifiers.identifier_to_str(self.release["id"]), ) def test_release_identifier_no_author(self): self.assertEqual( identifiers.release_identifier(self.release_no_author), identifiers.identifier_to_str(self.release_no_author["id"]), ) def test_release_identifier_no_message(self): self.assertEqual( identifiers.release_identifier(self.release_no_message), identifiers.identifier_to_str(self.release_no_message["id"]), ) def test_release_identifier_empty_message(self): self.assertEqual( identifiers.release_identifier(self.release_empty_message), identifiers.identifier_to_str(self.release_empty_message["id"]), ) def test_release_identifier_negative_utc(self): self.assertEqual( identifiers.release_identifier(self.release_negative_utc), identifiers.identifier_to_str(self.release_negative_utc["id"]), ) def test_release_identifier_newline_in_author(self): self.assertEqual( identifiers.release_identifier(self.release_newline_in_author), identifiers.identifier_to_str(self.release_newline_in_author["id"]), ) def test_release_identifier_snapshot_target(self): self.assertEqual( identifiers.release_identifier(self.release_snapshot_target), identifiers.identifier_to_str(self.release_snapshot_target["id"]), ) snapshot_example = { "id": _x("6e65b86363953b780d92b0a928f3e8fcdd10db36"), "branches": { b"directory": { "target": _x("1bd0e65f7d2ff14ae994de17a1e7fe65111dcad8"), "target_type": "directory", }, b"content": { "target": _x("fe95a46679d128ff167b7c55df5d02356c5a1ae1"), "target_type": "content", }, b"alias": {"target": b"revision", "target_type": "alias",}, b"revision": { "target": _x("aafb16d69fd30ff58afdd69036a26047f3aebdc6"), "target_type": "revision", }, b"release": { "target": _x("7045404f3d1c54e6473c71bbb716529fbad4be24"), "target_type": "release", }, b"snapshot": { "target": _x("1a8893e6a86f444e8be8e7bda6cb34fb1735a00e"), "target_type": "snapshot", }, b"dangling": None, }, } class SnapshotIdentifier(unittest.TestCase): def setUp(self): super().setUp() self.empty = { "id": "1a8893e6a86f444e8be8e7bda6cb34fb1735a00e", "branches": {}, } self.dangling_branch = { "id": "c84502e821eb21ed84e9fd3ec40973abc8b32353", "branches": {b"HEAD": None,}, } self.unresolved = { "id": "84b4548ea486e4b0a7933fa541ff1503a0afe1e0", "branches": {b"foo": {"target": b"bar", "target_type": "alias",},}, } self.all_types = snapshot_example def test_empty_snapshot(self): self.assertEqual( identifiers.snapshot_identifier(self.empty), identifiers.identifier_to_str(self.empty["id"]), ) def test_dangling_branch(self): self.assertEqual( identifiers.snapshot_identifier(self.dangling_branch), identifiers.identifier_to_str(self.dangling_branch["id"]), ) def test_unresolved(self): with self.assertRaisesRegex(ValueError, "b'foo' -> b'bar'"): identifiers.snapshot_identifier(self.unresolved) def test_unresolved_force(self): self.assertEqual( identifiers.snapshot_identifier(self.unresolved, ignore_unresolved=True,), identifiers.identifier_to_str(self.unresolved["id"]), ) def test_all_types(self): self.assertEqual( identifiers.snapshot_identifier(self.all_types), identifiers.identifier_to_str(self.all_types["id"]), ) def test_swhid(self): _snapshot_id = _x("c7c108084bc0bf3d81436bf980b46e98bd338453") _release_id = "22ece559cc7cc2364edc5e5593d63ae8bd229f9f" _revision_id = "309cf2674ee7a0749978cf8265ab91a60aea0f7d" _directory_id = "d198bc9d7a6bcf6db04f476d29314f157507d505" _content_id = "94a9ed024d3859793618152ea559a168bbcbb5e2" _snapshot = {"id": _snapshot_id} _release = {"id": _release_id} _revision = {"id": _revision_id} _directory = {"id": _directory_id} _content = {"sha1_git": _content_id} for full_type, _hash, expected_swhid, version, _meta in [ ( SNAPSHOT, _snapshot_id, "swh:1:snp:c7c108084bc0bf3d81436bf980b46e98bd338453", None, {}, ), ( RELEASE, _release_id, "swh:1:rel:22ece559cc7cc2364edc5e5593d63ae8bd229f9f", 1, {}, ), ( REVISION, _revision_id, "swh:1:rev:309cf2674ee7a0749978cf8265ab91a60aea0f7d", None, {}, ), ( DIRECTORY, _directory_id, "swh:1:dir:d198bc9d7a6bcf6db04f476d29314f157507d505", None, {}, ), ( CONTENT, _content_id, "swh:1:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2", 1, {}, ), ( SNAPSHOT, _snapshot, "swh:1:snp:c7c108084bc0bf3d81436bf980b46e98bd338453", None, {}, ), ( RELEASE, _release, "swh:1:rel:22ece559cc7cc2364edc5e5593d63ae8bd229f9f", 1, {}, ), ( REVISION, _revision, "swh:1:rev:309cf2674ee7a0749978cf8265ab91a60aea0f7d", None, {}, ), ( DIRECTORY, _directory, "swh:1:dir:d198bc9d7a6bcf6db04f476d29314f157507d505", None, {}, ), ( CONTENT, _content, "swh:1:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2", 1, {}, ), ( CONTENT, _content, "swh:1:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2;origin=1", 1, {"origin": "1"}, ), ]: if version: actual_value = identifiers.swhid( full_type, _hash, version, metadata=_meta ) else: actual_value = identifiers.swhid(full_type, _hash, metadata=_meta) self.assertEqual(actual_value, expected_swhid) def test_swhid_wrong_input(self): _snapshot_id = "notahash4bc0bf3d81436bf980b46e98bd338453" _snapshot = {"id": _snapshot_id} for _type, _hash in [ (SNAPSHOT, _snapshot_id), (SNAPSHOT, _snapshot), ("foo", ""), ]: with self.assertRaises(ValidationError): identifiers.swhid(_type, _hash) def test_parse_swhid(self): for swhid, _type, _version, _hash in [ ( "swh:1:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2", CONTENT, 1, "94a9ed024d3859793618152ea559a168bbcbb5e2", ), ( "swh:1:dir:d198bc9d7a6bcf6db04f476d29314f157507d505", DIRECTORY, 1, "d198bc9d7a6bcf6db04f476d29314f157507d505", ), ( "swh:1:rev:309cf2674ee7a0749978cf8265ab91a60aea0f7d", REVISION, 1, "309cf2674ee7a0749978cf8265ab91a60aea0f7d", ), ( "swh:1:rel:22ece559cc7cc2364edc5e5593d63ae8bd229f9f", RELEASE, 1, "22ece559cc7cc2364edc5e5593d63ae8bd229f9f", ), ( "swh:1:snp:c7c108084bc0bf3d81436bf980b46e98bd338453", SNAPSHOT, 1, "c7c108084bc0bf3d81436bf980b46e98bd338453", ), ]: expected_result = SWHID( namespace="swh", scheme_version=_version, object_type=_type, object_id=_hash, metadata={}, ) actual_result = identifiers.parse_swhid(swhid) self.assertEqual(actual_result, expected_result) for swhid, _type, _version, _hash, _metadata in [ ( "swh:1:cnt:9c95815d9e9d91b8dae8e05d8bbc696fe19f796b;lines=1-18;origin=https://github.com/python/cpython", # noqa CONTENT, 1, "9c95815d9e9d91b8dae8e05d8bbc696fe19f796b", {"lines": "1-18", "origin": "https://github.com/python/cpython"}, ), ( "swh:1:dir:0b6959356d30f1a4e9b7f6bca59b9a336464c03d;origin=deb://Debian/packages/linuxdoc-tools", # noqa DIRECTORY, 1, "0b6959356d30f1a4e9b7f6bca59b9a336464c03d", {"origin": "deb://Debian/packages/linuxdoc-tools"}, ), ]: expected_result = SWHID( namespace="swh", scheme_version=_version, object_type=_type, object_id=_hash, metadata=_metadata, ) actual_result = identifiers.parse_swhid(swhid) self.assertEqual(actual_result, expected_result) self.assertEqual( expected_result.to_dict(), { "namespace": "swh", "scheme_version": _version, "object_type": _type, "object_id": _hash, "metadata": _metadata, }, ) def test_parse_swhid_parsing_error(self): for swhid in [ ("swh:1:cnt"), ("swh:1:"), ("swh:"), ("swh:1:cnt:"), ("foo:1:cnt:abc8bc9d7a6bcf6db04f476d29314f157507d505"), ("swh:2:dir:def8bc9d7a6bcf6db04f476d29314f157507d505"), ("swh:1:foo:fed8bc9d7a6bcf6db04f476d29314f157507d505"), ("swh:1:dir:0b6959356d30f1a4e9b7f6bca59b9a336464c03d;invalid;" "malformed"), ("swh:1:snp:gh6959356d30f1a4e9b7f6bca59b9a336464c03d"), ("swh:1:snp:foo"), ]: with self.assertRaises(ValidationError): identifiers.parse_swhid(swhid) def test_persistentid_class_validation_error(self): for _ns, _version, _type, _id in [ ("foo", 1, CONTENT, "abc8bc9d7a6bcf6db04f476d29314f157507d505"), ("swh", 2, DIRECTORY, "def8bc9d7a6bcf6db04f476d29314f157507d505"), ("swh", 1, "foo", "fed8bc9d7a6bcf6db04f476d29314f157507d505"), ("swh", 1, SNAPSHOT, "gh6959356d30f1a4e9b7f6bca59b9a336464c03d"), ]: with self.assertRaises(ValidationError): SWHID( namespace=_ns, scheme_version=_version, object_type=_type, object_id=_id, ) class OriginIdentifier(unittest.TestCase): def setUp(self): self.origin = { "url": "https://github.com/torvalds/linux", } def test_content_identifier(self): self.assertEqual( identifiers.origin_identifier(self.origin), "b63a575fe3faab7692c9f38fb09d4bb45651bb0f", ) TS_DICTS = [ ( {"timestamp": 12345, "offset": 0}, { "timestamp": {"seconds": 12345, "microseconds": 0}, "offset": 0, "negative_utc": False, }, ), ( {"timestamp": 12345, "offset": 0, "negative_utc": False}, { "timestamp": {"seconds": 12345, "microseconds": 0}, "offset": 0, "negative_utc": False, }, ), ( {"timestamp": 12345, "offset": 0, "negative_utc": False}, { "timestamp": {"seconds": 12345, "microseconds": 0}, "offset": 0, "negative_utc": False, }, ), ( {"timestamp": 12345, "offset": 0, "negative_utc": None}, { "timestamp": {"seconds": 12345, "microseconds": 0}, "offset": 0, "negative_utc": False, }, ), ( {"timestamp": {"seconds": 12345}, "offset": 0, "negative_utc": None}, { "timestamp": {"seconds": 12345, "microseconds": 0}, "offset": 0, "negative_utc": False, }, ), ( { "timestamp": {"seconds": 12345, "microseconds": 0}, "offset": 0, "negative_utc": None, }, { "timestamp": {"seconds": 12345, "microseconds": 0}, "offset": 0, "negative_utc": False, }, ), ( { "timestamp": {"seconds": 12345, "microseconds": 100}, "offset": 0, "negative_utc": None, }, { "timestamp": {"seconds": 12345, "microseconds": 100}, "offset": 0, "negative_utc": False, }, ), ( {"timestamp": 12345, "offset": 0, "negative_utc": True}, { "timestamp": {"seconds": 12345, "microseconds": 0}, "offset": 0, "negative_utc": True, }, ), ( {"timestamp": 12345, "offset": 0, "negative_utc": None}, { "timestamp": {"seconds": 12345, "microseconds": 0}, "offset": 0, "negative_utc": False, }, ), ] @pytest.mark.parametrize("dict_input,expected", TS_DICTS) def test_normalize_timestamp_dict(dict_input, expected): assert normalize_timestamp(dict_input) == expected TS_DICTS_INVALID_TIMESTAMP = [ {"timestamp": 1.2, "offset": 0}, {"timestamp": "1", "offset": 0}, # these below should really also trigger a ValueError... # {"timestamp": {"seconds": "1"}, "offset": 0}, # {"timestamp": {"seconds": 1.2}, "offset": 0}, # {"timestamp": {"seconds": 1.2}, "offset": 0}, ] @pytest.mark.parametrize("dict_input", TS_DICTS_INVALID_TIMESTAMP) def test_normalize_timestamp_dict_invalid_timestamp(dict_input): with pytest.raises(ValueError, match="non-integer timestamp"): normalize_timestamp(dict_input) diff --git a/swh/model/tests/test_model.py b/swh/model/tests/test_model.py index 0404dcd..fdd5e04 100644 --- a/swh/model/tests/test_model.py +++ b/swh/model/tests/test_model.py @@ -1,1187 +1,1187 @@ # Copyright (C) 2019-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import copy import datetime import attr from attrs_strict import AttributeTypeError from hypothesis import given from hypothesis.strategies import binary import pytest +from swh.model.hashutil import MultiHash, hash_to_bytes +import swh.model.hypothesis_strategies as strategies +from swh.model.identifiers import ( + SWHID, + directory_identifier, + parse_swhid, + release_identifier, + revision_identifier, + snapshot_identifier, +) from swh.model.model import ( BaseModel, Content, - SkippedContent, Directory, - Revision, - Release, - Snapshot, + MetadataAuthority, + MetadataAuthorityType, + MetadataFetcher, + MetadataTargetType, + MissingData, Origin, OriginVisit, OriginVisitStatus, - Timestamp, - TimestampWithTimezone, - MissingData, Person, RawExtrinsicMetadata, - MetadataTargetType, - MetadataAuthority, - MetadataAuthorityType, - MetadataFetcher, -) -from swh.model.hashutil import hash_to_bytes, MultiHash -import swh.model.hypothesis_strategies as strategies -from swh.model.identifiers import ( - directory_identifier, - revision_identifier, - release_identifier, - snapshot_identifier, - parse_swhid, - SWHID, + Release, + Revision, + SkippedContent, + Snapshot, + Timestamp, + TimestampWithTimezone, ) from swh.model.tests.test_identifiers import ( directory_example, - revision_example, release_example, + revision_example, snapshot_example, ) @given(strategies.objects()) def test_todict_inverse_fromdict(objtype_and_obj): (obj_type, obj) = objtype_and_obj if obj_type in ("origin", "origin_visit"): return obj_as_dict = obj.to_dict() obj_as_dict_copy = copy.deepcopy(obj_as_dict) # Check the composition of to_dict and from_dict is the identity assert obj == type(obj).from_dict(obj_as_dict) # Check from_dict() does not change the input dict assert obj_as_dict == obj_as_dict_copy # Check the composition of from_dict and to_dict is the identity assert obj_as_dict == type(obj).from_dict(obj_as_dict).to_dict() # Anonymization @given(strategies.objects()) def test_anonymization(objtype_and_obj): (obj_type, obj) = objtype_and_obj def check_person(p): if p is not None: assert p.name is None assert p.email is None assert len(p.fullname) == 32 anon_obj = obj.anonymize() if obj_type == "person": assert anon_obj is not None check_person(anon_obj) elif obj_type == "release": assert anon_obj is not None check_person(anon_obj.author) elif obj_type == "revision": assert anon_obj is not None check_person(anon_obj.author) check_person(anon_obj.committer) else: assert anon_obj is None # Origin, OriginVisit, OriginVisitStatus @given(strategies.origins()) def test_todict_origins(origin): obj = origin.to_dict() assert "type" not in obj assert type(origin)(url=origin.url) == type(origin).from_dict(obj) @given(strategies.origin_visits()) def test_todict_origin_visits(origin_visit): obj = origin_visit.to_dict() assert origin_visit == type(origin_visit).from_dict(obj) def test_origin_visit_naive_datetime(): with pytest.raises(ValueError, match="must be a timezone-aware datetime"): OriginVisit( origin="http://foo/", date=datetime.datetime.now(), type="git", ) @given(strategies.origin_visit_statuses()) def test_todict_origin_visit_statuses(origin_visit_status): obj = origin_visit_status.to_dict() assert origin_visit_status == type(origin_visit_status).from_dict(obj) def test_origin_visit_status_naive_datetime(): with pytest.raises(ValueError, match="must be a timezone-aware datetime"): OriginVisitStatus( origin="http://foo/", visit=42, date=datetime.datetime.now(), status="ongoing", snapshot=None, ) # Timestamp @given(strategies.timestamps()) def test_timestamps_strategy(timestamp): attr.validate(timestamp) def test_timestamp_seconds(): attr.validate(Timestamp(seconds=0, microseconds=0)) with pytest.raises(AttributeTypeError): Timestamp(seconds="0", microseconds=0) attr.validate(Timestamp(seconds=2 ** 63 - 1, microseconds=0)) with pytest.raises(ValueError): Timestamp(seconds=2 ** 63, microseconds=0) attr.validate(Timestamp(seconds=-(2 ** 63), microseconds=0)) with pytest.raises(ValueError): Timestamp(seconds=-(2 ** 63) - 1, microseconds=0) def test_timestamp_microseconds(): attr.validate(Timestamp(seconds=0, microseconds=0)) with pytest.raises(AttributeTypeError): Timestamp(seconds=0, microseconds="0") attr.validate(Timestamp(seconds=0, microseconds=10 ** 6 - 1)) with pytest.raises(ValueError): Timestamp(seconds=0, microseconds=10 ** 6) with pytest.raises(ValueError): Timestamp(seconds=0, microseconds=-1) def test_timestamp_from_dict(): assert Timestamp.from_dict({"seconds": 10, "microseconds": 5}) with pytest.raises(AttributeTypeError): Timestamp.from_dict({"seconds": "10", "microseconds": 5}) with pytest.raises(AttributeTypeError): Timestamp.from_dict({"seconds": 10, "microseconds": "5"}) with pytest.raises(ValueError): Timestamp.from_dict({"seconds": 0, "microseconds": -1}) Timestamp.from_dict({"seconds": 0, "microseconds": 10 ** 6 - 1}) with pytest.raises(ValueError): Timestamp.from_dict({"seconds": 0, "microseconds": 10 ** 6}) # TimestampWithTimezone def test_timestampwithtimezone(): ts = Timestamp(seconds=0, microseconds=0) tstz = TimestampWithTimezone(timestamp=ts, offset=0, negative_utc=False) attr.validate(tstz) assert tstz.negative_utc is False attr.validate(TimestampWithTimezone(timestamp=ts, offset=10, negative_utc=False)) attr.validate(TimestampWithTimezone(timestamp=ts, offset=-10, negative_utc=False)) tstz = TimestampWithTimezone(timestamp=ts, offset=0, negative_utc=True) attr.validate(tstz) assert tstz.negative_utc is True with pytest.raises(AttributeTypeError): TimestampWithTimezone( timestamp=datetime.datetime.now(), offset=0, negative_utc=False ) with pytest.raises(AttributeTypeError): TimestampWithTimezone(timestamp=ts, offset="0", negative_utc=False) with pytest.raises(AttributeTypeError): TimestampWithTimezone(timestamp=ts, offset=1.0, negative_utc=False) with pytest.raises(AttributeTypeError): TimestampWithTimezone(timestamp=ts, offset=1, negative_utc=0) with pytest.raises(ValueError): TimestampWithTimezone(timestamp=ts, offset=1, negative_utc=True) with pytest.raises(ValueError): TimestampWithTimezone(timestamp=ts, offset=-1, negative_utc=True) def test_timestampwithtimezone_from_datetime(): tz = datetime.timezone(datetime.timedelta(minutes=+60)) date = datetime.datetime(2020, 2, 27, 14, 39, 19, tzinfo=tz) tstz = TimestampWithTimezone.from_datetime(date) assert tstz == TimestampWithTimezone( timestamp=Timestamp(seconds=1582810759, microseconds=0,), offset=60, negative_utc=False, ) def test_timestampwithtimezone_from_naive_datetime(): date = datetime.datetime(2020, 2, 27, 14, 39, 19) with pytest.raises(ValueError, match="datetime without timezone"): TimestampWithTimezone.from_datetime(date) def test_timestampwithtimezone_from_iso8601(): date = "2020-02-27 14:39:19.123456+0100" tstz = TimestampWithTimezone.from_iso8601(date) assert tstz == TimestampWithTimezone( timestamp=Timestamp(seconds=1582810759, microseconds=123456,), offset=60, negative_utc=False, ) def test_timestampwithtimezone_from_iso8601_negative_utc(): date = "2020-02-27 13:39:19-0000" tstz = TimestampWithTimezone.from_iso8601(date) assert tstz == TimestampWithTimezone( timestamp=Timestamp(seconds=1582810759, microseconds=0,), offset=0, negative_utc=True, ) def test_person_from_fullname(): """The author should have name, email and fullname filled. """ actual_person = Person.from_fullname(b"tony ") assert actual_person == Person( fullname=b"tony ", name=b"tony", email=b"ynot@dagobah", ) def test_person_from_fullname_no_email(): """The author and fullname should be the same as the input (author). """ actual_person = Person.from_fullname(b"tony") assert actual_person == Person(fullname=b"tony", name=b"tony", email=None,) def test_person_from_fullname_empty_person(): """Empty person has only its fullname filled with the empty byte-string. """ actual_person = Person.from_fullname(b"") assert actual_person == Person(fullname=b"", name=None, email=None,) def test_git_author_line_to_author(): # edge case out of the way with pytest.raises(TypeError): Person.from_fullname(None) tests = { b"a ": Person(name=b"a", email=b"b@c.com", fullname=b"a ",), b"": Person( name=None, email=b"foo@bar.com", fullname=b"", ), b"malformed ': Person( name=b"malformed", email=b'"', ), b"trailing ": Person( name=b"trailing", email=b"sp@c.e", fullname=b"trailing ", ), b"no": Person(name=b"no", email=b"sp@c.e", fullname=b"no",), b" more ": Person( name=b"more", email=b"sp@c.es", fullname=b" more ", ), b" <>": Person(name=None, email=None, fullname=b" <>",), } for person in sorted(tests): expected_person = tests[person] assert expected_person == Person.from_fullname(person) # Content def test_content_get_hash(): hashes = dict(sha1=b"foo", sha1_git=b"bar", sha256=b"baz", blake2s256=b"qux") c = Content(length=42, status="visible", **hashes) for (hash_name, hash_) in hashes.items(): assert c.get_hash(hash_name) == hash_ def test_content_hashes(): hashes = dict(sha1=b"foo", sha1_git=b"bar", sha256=b"baz", blake2s256=b"qux") c = Content(length=42, status="visible", **hashes) assert c.hashes() == hashes def test_content_data(): c = Content( length=42, status="visible", data=b"foo", sha1=b"foo", sha1_git=b"bar", sha256=b"baz", blake2s256=b"qux", ) assert c.with_data() == c def test_content_data_missing(): c = Content( length=42, status="visible", sha1=b"foo", sha1_git=b"bar", sha256=b"baz", blake2s256=b"qux", ) with pytest.raises(MissingData): c.with_data() @given(strategies.present_contents_d()) def test_content_from_dict(content_d): c = Content.from_data(**content_d) assert c assert c.ctime == content_d["ctime"] content_d2 = c.to_dict() c2 = Content.from_dict(content_d2) assert c2.ctime == c.ctime def test_content_from_dict_str_ctime(): # test with ctime as a string n = datetime.datetime(2020, 5, 6, 12, 34, tzinfo=datetime.timezone.utc) content_d = { "ctime": n.isoformat(), "data": b"", "length": 0, "sha1": b"\x00", "sha256": b"\x00", "sha1_git": b"\x00", "blake2s256": b"\x00", } c = Content.from_dict(content_d) assert c.ctime == n def test_content_from_dict_str_naive_ctime(): # test with ctime as a string n = datetime.datetime(2020, 5, 6, 12, 34) content_d = { "ctime": n.isoformat(), "data": b"", "length": 0, "sha1": b"\x00", "sha256": b"\x00", "sha1_git": b"\x00", "blake2s256": b"\x00", } with pytest.raises(ValueError, match="must be a timezone-aware datetime."): Content.from_dict(content_d) @given(binary(max_size=4096)) def test_content_from_data(data): c = Content.from_data(data) assert c.data == data assert c.length == len(data) assert c.status == "visible" for key, value in MultiHash.from_data(data).digest().items(): assert getattr(c, key) == value @given(binary(max_size=4096)) def test_hidden_content_from_data(data): c = Content.from_data(data, status="hidden") assert c.data == data assert c.length == len(data) assert c.status == "hidden" for key, value in MultiHash.from_data(data).digest().items(): assert getattr(c, key) == value def test_content_naive_datetime(): c = Content.from_data(b"foo") with pytest.raises(ValueError, match="must be a timezone-aware datetime"): Content( **c.to_dict(), ctime=datetime.datetime.now(), ) # SkippedContent @given(binary(max_size=4096)) def test_skipped_content_from_data(data): c = SkippedContent.from_data(data, reason="reason") assert c.reason == "reason" assert c.length == len(data) assert c.status == "absent" for key, value in MultiHash.from_data(data).digest().items(): assert getattr(c, key) == value @given(strategies.skipped_contents_d()) def test_skipped_content_origin_is_str(skipped_content_d): assert SkippedContent.from_dict(skipped_content_d) skipped_content_d["origin"] = "http://path/to/origin" assert SkippedContent.from_dict(skipped_content_d) skipped_content_d["origin"] = Origin(url="http://path/to/origin") with pytest.raises(ValueError, match="origin"): SkippedContent.from_dict(skipped_content_d) def test_skipped_content_naive_datetime(): c = SkippedContent.from_data(b"foo", reason="reason") with pytest.raises(ValueError, match="must be a timezone-aware datetime"): SkippedContent( **c.to_dict(), ctime=datetime.datetime.now(), ) # Revision def test_revision_extra_headers_no_headers(): rev_dict = revision_example.copy() rev_dict.pop("id") rev = Revision.from_dict(rev_dict) rev_dict = attr.asdict(rev, recurse=False) rev_model = Revision(**rev_dict) assert rev_model.metadata is None assert rev_model.extra_headers == () rev_dict["metadata"] = { "something": "somewhere", "some other thing": "stranger", } rev_model = Revision(**rev_dict) assert rev_model.metadata == rev_dict["metadata"] assert rev_model.extra_headers == () def test_revision_extra_headers_with_headers(): rev_dict = revision_example.copy() rev_dict.pop("id") rev = Revision.from_dict(rev_dict) rev_dict = attr.asdict(rev, recurse=False) rev_dict["metadata"] = { "something": "somewhere", "some other thing": "stranger", } extra_headers = ( (b"header1", b"value1"), (b"header2", b"42"), (b"header3", b"should I?\x00"), (b"header1", b"again"), ) rev_dict["extra_headers"] = extra_headers rev_model = Revision(**rev_dict) assert "extra_headers" not in rev_model.metadata assert rev_model.extra_headers == extra_headers def test_revision_extra_headers_in_metadata(): rev_dict = revision_example.copy() rev_dict.pop("id") rev = Revision.from_dict(rev_dict) rev_dict = attr.asdict(rev, recurse=False) rev_dict["metadata"] = { "something": "somewhere", "some other thing": "stranger", } extra_headers = ( (b"header1", b"value1"), (b"header2", b"42"), (b"header3", b"should I?\x00"), (b"header1", b"again"), ) # check the bw-compat init hook does the job # ie. extra_headers are given in the metadata field rev_dict["metadata"]["extra_headers"] = extra_headers rev_model = Revision(**rev_dict) assert "extra_headers" not in rev_model.metadata assert rev_model.extra_headers == extra_headers def test_revision_extra_headers_as_lists(): rev_dict = revision_example.copy() rev_dict.pop("id") rev = Revision.from_dict(rev_dict) rev_dict = attr.asdict(rev, recurse=False) rev_dict["metadata"] = {} extra_headers = ( (b"header1", b"value1"), (b"header2", b"42"), (b"header3", b"should I?\x00"), (b"header1", b"again"), ) # check Revision.extra_headers tuplify does the job rev_dict["extra_headers"] = [list(x) for x in extra_headers] rev_model = Revision(**rev_dict) assert "extra_headers" not in rev_model.metadata assert rev_model.extra_headers == extra_headers def test_revision_extra_headers_type_error(): rev_dict = revision_example.copy() rev_dict.pop("id") rev = Revision.from_dict(rev_dict) orig_rev_dict = attr.asdict(rev, recurse=False) orig_rev_dict["metadata"] = { "something": "somewhere", "some other thing": "stranger", } extra_headers = ( ("header1", b"value1"), (b"header2", 42), ("header1", "again"), ) # check headers one at a time # if given as extra_header for extra_header in extra_headers: rev_dict = copy.deepcopy(orig_rev_dict) rev_dict["extra_headers"] = (extra_header,) with pytest.raises(AttributeTypeError): Revision(**rev_dict) # if given as metadata for extra_header in extra_headers: rev_dict = copy.deepcopy(orig_rev_dict) rev_dict["metadata"]["extra_headers"] = (extra_header,) with pytest.raises(AttributeTypeError): Revision(**rev_dict) def test_revision_extra_headers_from_dict(): rev_dict = revision_example.copy() rev_dict.pop("id") rev_model = Revision.from_dict(rev_dict) assert rev_model.metadata is None assert rev_model.extra_headers == () rev_dict["metadata"] = { "something": "somewhere", "some other thing": "stranger", } rev_model = Revision.from_dict(rev_dict) assert rev_model.metadata == rev_dict["metadata"] assert rev_model.extra_headers == () extra_headers = ( (b"header1", b"value1"), (b"header2", b"42"), (b"header3", b"should I?\nmaybe\x00\xff"), (b"header1", b"again"), ) rev_dict["extra_headers"] = extra_headers rev_model = Revision.from_dict(rev_dict) assert "extra_headers" not in rev_model.metadata assert rev_model.extra_headers == extra_headers def test_revision_extra_headers_in_metadata_from_dict(): rev_dict = revision_example.copy() rev_dict.pop("id") rev_dict["metadata"] = { "something": "somewhere", "some other thing": "stranger", } extra_headers = ( (b"header1", b"value1"), (b"header2", b"42"), (b"header3", b"should I?\nmaybe\x00\xff"), (b"header1", b"again"), ) # check the bw-compat init hook does the job rev_dict["metadata"]["extra_headers"] = extra_headers rev_model = Revision.from_dict(rev_dict) assert "extra_headers" not in rev_model.metadata assert rev_model.extra_headers == extra_headers def test_revision_extra_headers_as_lists_from_dict(): rev_dict = revision_example.copy() rev_dict.pop("id") rev_model = Revision.from_dict(rev_dict) rev_dict["metadata"] = { "something": "somewhere", "some other thing": "stranger", } extra_headers = ( (b"header1", b"value1"), (b"header2", b"42"), (b"header3", b"should I?\nmaybe\x00\xff"), (b"header1", b"again"), ) # check Revision.extra_headers converter does the job rev_dict["extra_headers"] = [list(x) for x in extra_headers] rev_model = Revision.from_dict(rev_dict) assert "extra_headers" not in rev_model.metadata assert rev_model.extra_headers == extra_headers # ID computation def test_directory_model_id_computation(): dir_dict = directory_example.copy() del dir_dict["id"] dir_id = hash_to_bytes(directory_identifier(dir_dict)) dir_model = Directory.from_dict(dir_dict) assert dir_model.id == dir_id def test_revision_model_id_computation(): rev_dict = revision_example.copy() del rev_dict["id"] rev_id = hash_to_bytes(revision_identifier(rev_dict)) rev_model = Revision.from_dict(rev_dict) assert rev_model.id == rev_id def test_revision_model_id_computation_with_no_date(): """We can have revision with date to None """ rev_dict = revision_example.copy() rev_dict["date"] = None rev_dict["committer_date"] = None del rev_dict["id"] rev_id = hash_to_bytes(revision_identifier(rev_dict)) rev_model = Revision.from_dict(rev_dict) assert rev_model.date is None assert rev_model.committer_date is None assert rev_model.id == rev_id def test_release_model_id_computation(): rel_dict = release_example.copy() del rel_dict["id"] rel_id = hash_to_bytes(release_identifier(rel_dict)) rel_model = Release.from_dict(rel_dict) assert isinstance(rel_model.date, TimestampWithTimezone) assert rel_model.id == hash_to_bytes(rel_id) def test_snapshot_model_id_computation(): snp_dict = snapshot_example.copy() del snp_dict["id"] snp_id = hash_to_bytes(snapshot_identifier(snp_dict)) snp_model = Snapshot.from_dict(snp_dict) assert snp_model.id == snp_id @given(strategies.objects(split_content=True)) def test_object_type(objtype_and_obj): obj_type, obj = objtype_and_obj assert obj_type == obj.object_type def test_object_type_is_final(): object_types = set() def check_final(cls): if hasattr(cls, "object_type"): assert cls.object_type not in object_types object_types.add(cls.object_type) if cls.__subclasses__(): assert not hasattr(cls, "object_type") for subcls in cls.__subclasses__(): check_final(subcls) check_final(BaseModel) _metadata_authority = MetadataAuthority( type=MetadataAuthorityType.FORGE, url="https://forge.softwareheritage.org", ) _metadata_fetcher = MetadataFetcher(name="test-fetcher", version="0.0.1",) _content_swhid = parse_swhid("swh:1:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2") _origin_url = "https://forge.softwareheritage.org/source/swh-model.git" _common_metadata_fields = dict( discovery_date=datetime.datetime.now(tz=datetime.timezone.utc), authority=_metadata_authority, fetcher=_metadata_fetcher, format="json", metadata=b'{"foo": "bar"}', ) def test_metadata_valid(): """Checks valid RawExtrinsicMetadata objects don't raise an error.""" # Simplest case RawExtrinsicMetadata( type=MetadataTargetType.ORIGIN, id=_origin_url, **_common_metadata_fields ) # Object with an SWHID RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, id=_content_swhid, **_common_metadata_fields ) def test_metadata_to_dict(): """Checks valid RawExtrinsicMetadata objects don't raise an error.""" common_fields = { "authority": {"type": "forge", "url": "https://forge.softwareheritage.org",}, "fetcher": {"name": "test-fetcher", "version": "0.0.1",}, "discovery_date": _common_metadata_fields["discovery_date"], "format": "json", "metadata": b'{"foo": "bar"}', } m = RawExtrinsicMetadata( type=MetadataTargetType.ORIGIN, id=_origin_url, **_common_metadata_fields ) assert m.to_dict() == { "type": "origin", "id": _origin_url, **common_fields, } assert RawExtrinsicMetadata.from_dict(m.to_dict()) == m m = RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, id=_content_swhid, **_common_metadata_fields ) assert m.to_dict() == { "type": "content", "id": "swh:1:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2", **common_fields, } assert RawExtrinsicMetadata.from_dict(m.to_dict()) == m def test_metadata_invalid_id(): """Checks various invalid values for the 'id' field.""" # SWHID for an origin with pytest.raises(ValueError, match="expected an URL"): RawExtrinsicMetadata( type=MetadataTargetType.ORIGIN, id=_content_swhid, **_common_metadata_fields ) # SWHID for an origin (even when passed as string) with pytest.raises(ValueError, match="expected an URL"): RawExtrinsicMetadata( type=MetadataTargetType.ORIGIN, id="swh:1:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2", **_common_metadata_fields, ) # URL for a non-origin with pytest.raises(ValueError, match="Expected SWHID, got a string"): RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, id=_origin_url, **_common_metadata_fields ) # SWHID passed as string instead of SWHID with pytest.raises(ValueError, match="Expected SWHID, got a string"): RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, id="swh:1:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2", **_common_metadata_fields, ) # Object type does not match the SWHID with pytest.raises( ValueError, match="Expected SWHID type 'revision', got 'content'" ): RawExtrinsicMetadata( type=MetadataTargetType.REVISION, id=_content_swhid, **_common_metadata_fields, ) # Non-core SWHID with pytest.raises(ValueError, match="Expected core SWHID"): RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, id=SWHID( object_type="content", object_id="94a9ed024d3859793618152ea559a168bbcbb5e2", metadata={"foo": "bar"}, ), **_common_metadata_fields, ) def test_metadata_naive_datetime(): with pytest.raises(ValueError, match="must be a timezone-aware datetime"): RawExtrinsicMetadata( type=MetadataTargetType.ORIGIN, id=_origin_url, **{**_common_metadata_fields, "discovery_date": datetime.datetime.now()}, ) def test_metadata_validate_context_origin(): """Checks validation of RawExtrinsicMetadata.origin.""" # Origins can't have an 'origin' context with pytest.raises( ValueError, match="Unexpected 'origin' context for origin object" ): RawExtrinsicMetadata( type=MetadataTargetType.ORIGIN, id=_origin_url, origin=_origin_url, **_common_metadata_fields, ) # but all other types can RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, id=_content_swhid, origin=_origin_url, **_common_metadata_fields, ) # SWHIDs aren't valid origin URLs with pytest.raises(ValueError, match="SWHID used as context origin URL"): RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, id=_content_swhid, origin="swh:1:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2", **_common_metadata_fields, ) def test_metadata_validate_context_visit(): """Checks validation of RawExtrinsicMetadata.visit.""" # Origins can't have a 'visit' context with pytest.raises( ValueError, match="Unexpected 'visit' context for origin object" ): RawExtrinsicMetadata( type=MetadataTargetType.ORIGIN, id=_origin_url, visit=42, **_common_metadata_fields, ) # but all other types can RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, id=_content_swhid, origin=_origin_url, visit=42, **_common_metadata_fields, ) # Missing 'origin' with pytest.raises(ValueError, match="'origin' context must be set if 'visit' is"): RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, id=_content_swhid, visit=42, **_common_metadata_fields, ) # visit id must be positive with pytest.raises(ValueError, match="Nonpositive visit id"): RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, id=_content_swhid, origin=_origin_url, visit=-42, **_common_metadata_fields, ) def test_metadata_validate_context_snapshot(): """Checks validation of RawExtrinsicMetadata.snapshot.""" # Origins can't have a 'snapshot' context with pytest.raises( ValueError, match="Unexpected 'snapshot' context for origin object" ): RawExtrinsicMetadata( type=MetadataTargetType.ORIGIN, id=_origin_url, snapshot=SWHID( object_type="snapshot", object_id="94a9ed024d3859793618152ea559a168bbcbb5e2", ), **_common_metadata_fields, ) # but content can RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, id=_content_swhid, snapshot=SWHID( object_type="snapshot", object_id="94a9ed024d3859793618152ea559a168bbcbb5e2" ), **_common_metadata_fields, ) # Non-core SWHID with pytest.raises(ValueError, match="Expected core SWHID"): RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, id=_content_swhid, snapshot=SWHID( object_type="snapshot", object_id="94a9ed024d3859793618152ea559a168bbcbb5e2", metadata={"foo": "bar"}, ), **_common_metadata_fields, ) # SWHID type doesn't match the expected type of this context key with pytest.raises( ValueError, match="Expected SWHID type 'snapshot', got 'content'" ): RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, id=_content_swhid, snapshot=SWHID( object_type="content", object_id="94a9ed024d3859793618152ea559a168bbcbb5e2", ), **_common_metadata_fields, ) def test_metadata_validate_context_release(): """Checks validation of RawExtrinsicMetadata.release.""" # Origins can't have a 'release' context with pytest.raises( ValueError, match="Unexpected 'release' context for origin object" ): RawExtrinsicMetadata( type=MetadataTargetType.ORIGIN, id=_origin_url, release=SWHID( object_type="release", object_id="94a9ed024d3859793618152ea559a168bbcbb5e2", ), **_common_metadata_fields, ) # but content can RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, id=_content_swhid, release=SWHID( object_type="release", object_id="94a9ed024d3859793618152ea559a168bbcbb5e2" ), **_common_metadata_fields, ) # Non-core SWHID with pytest.raises(ValueError, match="Expected core SWHID"): RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, id=_content_swhid, release=SWHID( object_type="release", object_id="94a9ed024d3859793618152ea559a168bbcbb5e2", metadata={"foo": "bar"}, ), **_common_metadata_fields, ) # SWHID type doesn't match the expected type of this context key with pytest.raises( ValueError, match="Expected SWHID type 'release', got 'content'" ): RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, id=_content_swhid, release=SWHID( object_type="content", object_id="94a9ed024d3859793618152ea559a168bbcbb5e2", ), **_common_metadata_fields, ) def test_metadata_validate_context_revision(): """Checks validation of RawExtrinsicMetadata.revision.""" # Origins can't have a 'revision' context with pytest.raises( ValueError, match="Unexpected 'revision' context for origin object" ): RawExtrinsicMetadata( type=MetadataTargetType.ORIGIN, id=_origin_url, revision=SWHID( object_type="revision", object_id="94a9ed024d3859793618152ea559a168bbcbb5e2", ), **_common_metadata_fields, ) # but content can RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, id=_content_swhid, revision=SWHID( object_type="revision", object_id="94a9ed024d3859793618152ea559a168bbcbb5e2" ), **_common_metadata_fields, ) # Non-core SWHID with pytest.raises(ValueError, match="Expected core SWHID"): RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, id=_content_swhid, revision=SWHID( object_type="revision", object_id="94a9ed024d3859793618152ea559a168bbcbb5e2", metadata={"foo": "bar"}, ), **_common_metadata_fields, ) # SWHID type doesn't match the expected type of this context key with pytest.raises( ValueError, match="Expected SWHID type 'revision', got 'content'" ): RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, id=_content_swhid, revision=SWHID( object_type="content", object_id="94a9ed024d3859793618152ea559a168bbcbb5e2", ), **_common_metadata_fields, ) def test_metadata_validate_context_path(): """Checks validation of RawExtrinsicMetadata.path.""" # Origins can't have a 'path' context with pytest.raises(ValueError, match="Unexpected 'path' context for origin object"): RawExtrinsicMetadata( type=MetadataTargetType.ORIGIN, id=_origin_url, path=b"/foo/bar", **_common_metadata_fields, ) # but content can RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, id=_content_swhid, path=b"/foo/bar", **_common_metadata_fields, ) def test_metadata_validate_context_directory(): """Checks validation of RawExtrinsicMetadata.directory.""" # Origins can't have a 'directory' context with pytest.raises( ValueError, match="Unexpected 'directory' context for origin object" ): RawExtrinsicMetadata( type=MetadataTargetType.ORIGIN, id=_origin_url, directory=SWHID( object_type="directory", object_id="94a9ed024d3859793618152ea559a168bbcbb5e2", ), **_common_metadata_fields, ) # but content can RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, id=_content_swhid, directory=SWHID( object_type="directory", object_id="94a9ed024d3859793618152ea559a168bbcbb5e2", ), **_common_metadata_fields, ) # Non-core SWHID with pytest.raises(ValueError, match="Expected core SWHID"): RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, id=_content_swhid, directory=SWHID( object_type="directory", object_id="94a9ed024d3859793618152ea559a168bbcbb5e2", metadata={"foo": "bar"}, ), **_common_metadata_fields, ) # SWHID type doesn't match the expected type of this context key with pytest.raises( ValueError, match="Expected SWHID type 'directory', got 'content'" ): RawExtrinsicMetadata( type=MetadataTargetType.CONTENT, id=_content_swhid, directory=SWHID( object_type="content", object_id="94a9ed024d3859793618152ea559a168bbcbb5e2", ), **_common_metadata_fields, ) diff --git a/swh/model/validators.py b/swh/model/validators.py index 6cd7fc1..a2f9dbf 100644 --- a/swh/model/validators.py +++ b/swh/model/validators.py @@ -1,78 +1,78 @@ # Copyright (C) 2015-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information -from .exceptions import ValidationError, NON_FIELD_ERRORS from . import fields +from .exceptions import NON_FIELD_ERRORS, ValidationError from .hashutil import MultiHash, hash_to_bytes def validate_content(content): """Validate that a content has the correct schema. Args: a content (dictionary) to validate.""" def validate_content_status(status): return fields.validate_enum(status, {"absent", "visible", "hidden"}) def validate_keys(content): hashes = {"sha1", "sha1_git", "sha256"} errors = [] out = True if content["status"] == "absent": try: out = out and fields.validate_all_keys(content, {"reason", "origin"}) except ValidationError as e: errors.append(e) try: out = out and fields.validate_any_key(content, hashes) except ValidationError as e: errors.append(e) else: try: out = out and fields.validate_all_keys(content, hashes) except ValidationError as e: errors.append(e) if errors: raise ValidationError(errors) return out def validate_hashes(content): errors = [] if "data" in content: hashes = MultiHash.from_data(content["data"]).digest() for hash_type, computed_hash in hashes.items(): if hash_type not in content: continue content_hash = hash_to_bytes(content[hash_type]) if content_hash != computed_hash: errors.append( ValidationError( "hash mismatch in content for hash %(hash)s", params={"hash": hash_type}, code="content-hash-mismatch", ) ) if errors: raise ValidationError(errors) return True content_schema = { "sha1": (False, fields.validate_sha1), "sha1_git": (False, fields.validate_sha1_git), "sha256": (False, fields.validate_sha256), "status": (True, validate_content_status), "length": (True, fields.validate_int), "ctime": (True, fields.validate_datetime), "reason": (False, fields.validate_str), "origin": (False, fields.validate_int), "data": (False, fields.validate_bytes), NON_FIELD_ERRORS: [validate_keys, validate_hashes], } return fields.validate_against_schema("content", content_schema, content)