diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 380c658..69b3349 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,46 +1,40 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v2.4.0 hooks: - id: trailing-whitespace - id: flake8 - id: check-json - id: check-yaml - repo: https://github.com/codespell-project/codespell rev: v1.16.0 hooks: - id: codespell - repo: local hooks: - id: mypy name: mypy entry: mypy args: [swh] pass_filenames: false language: system types: [python] +- repo: https://github.com/python/black + rev: 19.10b0 + hooks: + - id: black + # unfortunately, we are far from being able to enable this... # - repo: https://github.com/PyCQA/pydocstyle.git # rev: 4.0.0 # hooks: # - id: pydocstyle # name: pydocstyle # description: pydocstyle is a static analysis tool for checking compliance with Python docstring conventions. # entry: pydocstyle --convention=google # language: python # types: [python] -# black requires py3.6+ -#- repo: https://github.com/python/black -# rev: 19.3b0 -# hooks: -# - id: black -# language_version: python3 -#- repo: https://github.com/asottile/blacken-docs -# rev: v1.0.0-1 -# hooks: -# - id: blacken-docs -# additional_dependencies: [black==19.3b0] diff --git a/bin/swh-hashtree b/bin/swh-hashtree index faf258f..5b85b7b 100755 --- a/bin/swh-hashtree +++ b/bin/swh-hashtree @@ -1,58 +1,56 @@ #!/usr/bin/env python3 # Use sample: # swh-hashtree --path . --ignore '.svn' --ignore '.git-svn' \ # --ignore-empty-folders # 38f8d2c3a951f6b94007896d0981077e48bbd702 import click import os from swh.model import from_disk, hashutil def combine_filters(*filters): """Combine several ignore filters""" if len(filters) == 0: return from_disk.accept_all_directories elif len(filters) == 1: return filters[0] def combined_filter(*args, **kwargs): return all(filter(*args, **kwargs) for filter in filters) return combined_filter @click.command() -@click.option('--path', default='.', - help='Optional path to hash.') -@click.option('--ignore-empty-folder', is_flag=True, default=False, - help='Ignore empty folder.') -@click.option('--ignore', multiple=True, - help='Ignore pattern.') +@click.option("--path", default=".", help="Optional path to hash.") +@click.option( + "--ignore-empty-folder", is_flag=True, default=False, help="Ignore empty folder." +) +@click.option("--ignore", multiple=True, help="Ignore pattern.") def main(path, ignore_empty_folder=False, ignore=None): filters = [] if ignore_empty_folder: filters.append(from_disk.ignore_empty_directories) if ignore: filters.append( - from_disk.ignore_named_directories( - [os.fsencode(name) for name in ignore] - ) + from_disk.ignore_named_directories([os.fsencode(name) for name in ignore]) ) try: - d = from_disk.Directory.from_disk(path=os.fsencode(path), - dir_filter=combine_filters(*filters)) + d = from_disk.Directory.from_disk( + path=os.fsencode(path), dir_filter=combine_filters(*filters) + ) hash = d.hash except Exception as e: print(e) return else: print(hashutil.hash_to_hex(hash)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/bin/swh-revhash b/bin/swh-revhash index 6de0a26..d3a8caf 100755 --- a/bin/swh-revhash +++ b/bin/swh-revhash @@ -1,31 +1,31 @@ #!/usr/bin/env python3 # Use: # swh-revhash 'tree 4b825dc642cb6eb9a060e54bf8d69288fbee4904\nparent 22c0fa5195a53f2e733ec75a9b6e9d1624a8b771\nauthor seanius 1138341044 +0000\ncommitter seanius 1138341044 +0000\n\nmaking dir structure...\n' # noqa # output: 17a631d474f49bbebfdf3d885dcde470d7faafd7 # To compare with git: # git-revhash 'tree 4b825dc642cb6eb9a060e54bf8d69288fbee4904\nparent 22c0fa5195a53f2e733ec75a9b6e9d1624a8b771\nauthor seanius 1138341044 +0000\ncommitter seanius 1138341044 +0000\n\nmaking dir structure...\n' # noqa # output: 17a631d474f49bbebfdf3d885dcde470d7faafd7 import sys from swh.model import identifiers, hashutil def revhash(revision_raw): """Compute the revision hash. """ # HACK: string have somehow their \n expanded to \\n - if b'\\n' in revision_raw: - revision_raw = revision_raw.replace(b'\\n', b'\n') + if b"\\n" in revision_raw: + revision_raw = revision_raw.replace(b"\\n", b"\n") - h = hashutil.hash_git_data(revision_raw, 'commit') + h = hashutil.hash_git_data(revision_raw, "commit") return identifiers.identifier_to_str(h) -if __name__ == '__main__': - revision_raw = sys.argv[1].encode('utf-8') +if __name__ == "__main__": + revision_raw = sys.argv[1].encode("utf-8") print(revhash(revision_raw)) diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000..8d79b7e --- /dev/null +++ b/setup.cfg @@ -0,0 +1,6 @@ +[flake8] +# E203: whitespaces before ':' +# E231: missing whitespace after ',' +# W503: line break before binary operator +ignore = E203,E231,W503 +max-line-length = 88 diff --git a/setup.py b/setup.py index 6f2eb37..f0ea604 100755 --- a/setup.py +++ b/setup.py @@ -1,76 +1,77 @@ #!/usr/bin/env python3 # Copyright (C) 2015-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from setuptools import setup, find_packages from os import path from io import open here = path.abspath(path.dirname(__file__)) # Get the long description from the README file -with open(path.join(here, 'README.md'), encoding='utf-8') as f: +with open(path.join(here, "README.md"), encoding="utf-8") as f: long_description = f.read() def parse_requirements(name=None): if name: - reqf = 'requirements-%s.txt' % name + reqf = "requirements-%s.txt" % name else: - reqf = 'requirements.txt' + reqf = "requirements.txt" requirements = [] if not path.exists(reqf): return requirements with open(reqf) as f: for line in f.readlines(): line = line.strip() - if not line or line.startswith('#'): + if not line or line.startswith("#"): continue requirements.append(line) return requirements blake2_requirements = ['pyblake2;python_version<"3.6"'] setup( - name='swh.model', - description='Software Heritage data model', + name="swh.model", + description="Software Heritage data model", long_description=long_description, - long_description_content_type='text/markdown', - author='Software Heritage developers', - author_email='swh-devel@inria.fr', - url='https://forge.softwareheritage.org/diffusion/DMOD/', + long_description_content_type="text/markdown", + author="Software Heritage developers", + author_email="swh-devel@inria.fr", + url="https://forge.softwareheritage.org/diffusion/DMOD/", packages=find_packages(), - setup_requires=['vcversioner'], - install_requires=(parse_requirements() + parse_requirements('swh') + - blake2_requirements), + setup_requires=["vcversioner"], + install_requires=( + parse_requirements() + parse_requirements("swh") + blake2_requirements + ), extras_require={ - 'cli': parse_requirements('cli'), - 'testing': parse_requirements('test'), + "cli": parse_requirements("cli"), + "testing": parse_requirements("test"), }, vcversioner={}, include_package_data=True, - entry_points=''' + entry_points=""" [console_scripts] swh-identify=swh.model.cli:identify [swh.cli.subcommands] identify=swh.model.cli:identify - ''', + """, classifiers=[ "Programming Language :: Python :: 3", "Intended Audience :: Developers", "License :: OSI Approved :: GNU General Public License v3 (GPLv3)", "Operating System :: OS Independent", "Development Status :: 5 - Production/Stable", ], project_urls={ - 'Bug Reports': 'https://forge.softwareheritage.org/maniphest', - 'Funding': 'https://www.softwareheritage.org/donate', - 'Source': 'https://forge.softwareheritage.org/source/swh-model', + "Bug Reports": "https://forge.softwareheritage.org/maniphest", + "Funding": "https://www.softwareheritage.org/donate", + "Source": "https://forge.softwareheritage.org/source/swh-model", }, ) diff --git a/swh/model/cli.py b/swh/model/cli.py index 581bb45..ae51d19 100644 --- a/swh/model/cli.py +++ b/swh/model/cli.py @@ -1,199 +1,216 @@ # Copyright (C) 2018-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import click import dulwich.repo import os import sys from functools import partial from urllib.parse import urlparse from swh.model import hashutil from swh.model import identifiers as pids from swh.model.exceptions import ValidationError from swh.model.from_disk import Content, Directory -CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help']) +CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"]) # Mapping between dulwich types and Software Heritage ones. Used by snapshot ID # computation. _DULWICH_TYPES = { - b'blob': 'content', - b'tree': 'directory', - b'commit': 'revision', - b'tag': 'release', + b"blob": "content", + b"tree": "directory", + b"commit": "revision", + b"tag": "release", } class PidParamType(click.ParamType): - name = 'persistent identifier' + name = "persistent identifier" def convert(self, value, param, ctx): try: pids.parse_persistent_identifier(value) return value # return as string, as we need just that except ValidationError as e: - self.fail('%s is not a valid PID. %s.' % (value, e), param, ctx) + self.fail("%s is not a valid PID. %s." % (value, e), param, ctx) def pid_of_file(path): object = Content.from_file(path=path).get_data() return pids.persistent_identifier(pids.CONTENT, object) def pid_of_file_content(data): object = Content.from_bytes(mode=644, data=data).get_data() return pids.persistent_identifier(pids.CONTENT, object) def pid_of_dir(path): object = Directory.from_disk(path=path).get_data() return pids.persistent_identifier(pids.DIRECTORY, object) def pid_of_origin(url): - pid = pids.PersistentId(object_type='origin', - object_id=pids.origin_identifier({'url': url})) + pid = pids.PersistentId( + object_type="origin", object_id=pids.origin_identifier({"url": url}) + ) return str(pid) def pid_of_git_repo(path): repo = dulwich.repo.Repo(path) branches = {} for ref, target in repo.refs.as_dict().items(): obj = repo[target] if obj: branches[ref] = { - 'target': hashutil.bytehex_to_hash(target), - 'target_type': _DULWICH_TYPES[obj.type_name], + "target": hashutil.bytehex_to_hash(target), + "target_type": _DULWICH_TYPES[obj.type_name], } else: branches[ref] = None for ref, target in repo.refs.get_symrefs().items(): branches[ref] = { - 'target': target, - 'target_type': 'alias', + "target": target, + "target_type": "alias", } - snapshot = {'branches': branches} + snapshot = {"branches": branches} - pid = pids.PersistentId(object_type='snapshot', - object_id=pids.snapshot_identifier(snapshot)) + pid = pids.PersistentId( + object_type="snapshot", object_id=pids.snapshot_identifier(snapshot) + ) return str(pid) def identify_object(obj_type, follow_symlinks, obj): - if obj_type == 'auto': - if obj == '-' or os.path.isfile(obj): - obj_type = 'content' + if obj_type == "auto": + if obj == "-" or os.path.isfile(obj): + obj_type = "content" elif os.path.isdir(obj): - obj_type = 'directory' + obj_type = "directory" else: try: # URL parsing if urlparse(obj).scheme: - obj_type = 'origin' + obj_type = "origin" else: raise ValueError except ValueError: - raise click.BadParameter('cannot detect object type for %s' % - obj) + raise click.BadParameter("cannot detect object type for %s" % obj) pid = None - if obj == '-': + if obj == "-": content = sys.stdin.buffer.read() pid = pid_of_file_content(content) - elif obj_type in ['content', 'directory']: + elif obj_type in ["content", "directory"]: path = obj.encode(sys.getfilesystemencoding()) if follow_symlinks and os.path.islink(obj): path = os.path.realpath(obj) - if obj_type == 'content': + if obj_type == "content": pid = pid_of_file(path) - elif obj_type == 'directory': + elif obj_type == "directory": pid = pid_of_dir(path) - elif obj_type == 'origin': + elif obj_type == "origin": pid = pid_of_origin(obj) - elif obj_type == 'snapshot': + elif obj_type == "snapshot": pid = pid_of_git_repo(obj) else: # shouldn't happen, due to option validation - raise click.BadParameter('invalid object type: ' + obj_type) + raise click.BadParameter("invalid object type: " + obj_type) # note: we return original obj instead of path here, to preserve user-given # file name in output return (obj, pid) @click.command(context_settings=CONTEXT_SETTINGS) -@click.option('--dereference/--no-dereference', 'follow_symlinks', - default=True, - help='follow (or not) symlinks for OBJECTS passed as arguments ' - + '(default: follow)') -@click.option('--filename/--no-filename', 'show_filename', default=True, - help='show/hide file name (default: show)') -@click.option('--type', '-t', 'obj_type', default='auto', - type=click.Choice(['auto', 'content', 'directory', 'origin', - 'snapshot']), - help='type of object to identify (default: auto)') -@click.option('--verify', '-v', metavar='PID', type=PidParamType(), - help='reference identifier to be compared with computed one') -@click.argument('objects', nargs=-1) +@click.option( + "--dereference/--no-dereference", + "follow_symlinks", + default=True, + help="follow (or not) symlinks for OBJECTS passed as arguments " + + "(default: follow)", +) +@click.option( + "--filename/--no-filename", + "show_filename", + default=True, + help="show/hide file name (default: show)", +) +@click.option( + "--type", + "-t", + "obj_type", + default="auto", + type=click.Choice(["auto", "content", "directory", "origin", "snapshot"]), + help="type of object to identify (default: auto)", +) +@click.option( + "--verify", + "-v", + metavar="PID", + type=PidParamType(), + help="reference identifier to be compared with computed one", +) +@click.argument("objects", nargs=-1) def identify(obj_type, verify, show_filename, follow_symlinks, objects): """Compute the Software Heritage persistent identifier (PID) for the given source code object(s). For more details about Software Heritage PIDs see: \b https://docs.softwareheritage.org/devel/swh-model/persistent-identifiers.html \b Examples: \b $ swh identify fork.c kmod.c sched/deadline.c swh:1:cnt:2e391c754ae730bd2d8520c2ab497c403220c6e3 fork.c swh:1:cnt:0277d1216f80ae1adeed84a686ed34c9b2931fc2 kmod.c swh:1:cnt:57b939c81bce5d06fa587df8915f05affbe22b82 sched/deadline.c \b $ swh identify --no-filename /usr/src/linux/kernel/ swh:1:dir:f9f858a48d663b3809c9e2f336412717496202ab \b $ git clone --mirror https://forge.softwareheritage.org/source/helloworld.git $ swh identify --type snapshot helloworld.git/ swh:1:snp:510aa88bdc517345d258c1fc2babcd0e1f905e93 helloworld.git """ # NoQA # overlong lines in shell examples are fine if not objects: - objects = ['-'] + objects = ["-"] if verify and len(objects) != 1: - raise click.BadParameter('verification requires a single object') + raise click.BadParameter("verification requires a single object") results = map(partial(identify_object, obj_type, follow_symlinks), objects) if verify: pid = next(results)[1] if verify == pid: - click.echo('PID match: %s' % pid) + click.echo("PID match: %s" % pid) sys.exit(0) else: - click.echo('PID mismatch: %s != %s' % (verify, pid)) + click.echo("PID mismatch: %s != %s" % (verify, pid)) sys.exit(1) else: for (obj, pid) in results: msg = pid if show_filename: - msg = '%s\t%s' % (pid, os.fsdecode(obj)) + msg = "%s\t%s" % (pid, os.fsdecode(obj)) click.echo(msg) -if __name__ == '__main__': +if __name__ == "__main__": identify() diff --git a/swh/model/exceptions.py b/swh/model/exceptions.py index 147c5ad..774dfc2 100644 --- a/swh/model/exceptions.py +++ b/swh/model/exceptions.py @@ -1,132 +1,131 @@ # Copyright (C) 2015 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # # The ValidationError code derives from Django, and is available under the # following license terms: # # Copyright (c) Django Software Foundation and individual contributors. All # rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of Django nor the names of its contributors may be # used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. -NON_FIELD_ERRORS = '__all__' +NON_FIELD_ERRORS = "__all__" class ValidationError(Exception): """An error while validating data.""" + def __init__(self, message, code=None, params=None): """ The `message` argument can be a single error, a list of errors, or a dictionary that maps field names to lists of errors. What we define as an "error" can be either a simple string or an instance of ValidationError with its message attribute set, and what we define as list or dictionary can be an actual `list` or `dict` or an instance of ValidationError with its `error_list` or `error_dict` attribute set. """ super().__init__(message, code, params) if isinstance(message, list) and len(message) == 1: message = message[0] if isinstance(message, ValidationError): - if hasattr(message, 'error_dict'): + if hasattr(message, "error_dict"): message = message.error_dict # PY2 has a `message` property which is always there so we can't # duck-type on it. It was introduced in Python 2.5 and already # deprecated in Python 2.6. - elif not hasattr(message, 'message'): + elif not hasattr(message, "message"): message = message.error_list else: - message, code, params = (message.message, message.code, - message.params) + message, code, params = (message.message, message.code, message.params) if isinstance(message, dict): self.error_dict = {} for field, messages in message.items(): if not isinstance(messages, ValidationError): messages = ValidationError(messages) self.error_dict[field] = messages.error_list elif isinstance(message, list): self.error_list = [] for message in message: # Normalize plain strings to instances of ValidationError. if not isinstance(message, ValidationError): message = ValidationError(message) - if hasattr(message, 'error_dict'): - self.error_list.extend(sum(message.error_dict.values(), - [])) + if hasattr(message, "error_dict"): + self.error_list.extend(sum(message.error_dict.values(), [])) else: self.error_list.extend(message.error_list) else: self.message = message self.code = code self.params = params self.error_list = [self] @property def message_dict(self): # Trigger an AttributeError if this ValidationError # doesn't have an error_dict. - getattr(self, 'error_dict') + getattr(self, "error_dict") return dict(self) @property def messages(self): - if hasattr(self, 'error_dict'): + if hasattr(self, "error_dict"): return sum(dict(self).values(), []) return list(self) def update_error_dict(self, error_dict): - if hasattr(self, 'error_dict'): + if hasattr(self, "error_dict"): for field, error_list in self.error_dict.items(): error_dict.setdefault(field, []).extend(error_list) else: error_dict.setdefault(NON_FIELD_ERRORS, []).extend(self.error_list) return error_dict def __iter__(self): - if hasattr(self, 'error_dict'): + if hasattr(self, "error_dict"): for field, errors in self.error_dict.items(): yield field, list(ValidationError(errors)) else: for error in self.error_list: message = error.message if error.params: message %= error.params yield message def __str__(self): - if hasattr(self, 'error_dict'): + if hasattr(self, "error_dict"): return repr(dict(self)) return repr(list(self)) def __repr__(self): - return 'ValidationError(%s)' % self + return "ValidationError(%s)" % self diff --git a/swh/model/fields/__init__.py b/swh/model/fields/__init__.py index d2b3cef..a5b1ed3 100644 --- a/swh/model/fields/__init__.py +++ b/swh/model/fields/__init__.py @@ -1,13 +1,18 @@ # Copyright (C) 2015 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information # We do our imports here but we don't use them, so flake8 complains # flake8: noqa -from .simple import (validate_type, validate_int, validate_str, validate_bytes, - validate_datetime, validate_enum) -from .hashes import (validate_sha1, validate_sha1_git, validate_sha256) -from .compound import (validate_against_schema, validate_all_keys, - validate_any_key) +from .simple import ( + validate_type, + validate_int, + validate_str, + validate_bytes, + validate_datetime, + validate_enum, +) +from .hashes import validate_sha1, validate_sha1_git, validate_sha256 +from .compound import validate_against_schema, validate_all_keys, validate_any_key diff --git a/swh/model/fields/compound.py b/swh/model/fields/compound.py index 00eb252..3133f59 100644 --- a/swh/model/fields/compound.py +++ b/swh/model/fields/compound.py @@ -1,126 +1,125 @@ # Copyright (C) 2015 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from collections import defaultdict import itertools from ..exceptions import ValidationError, NON_FIELD_ERRORS def validate_against_schema(model, schema, value): """Validate a value for the given model against the given schema. Args: model: the name of the model schema: the schema to validate against value: the value to validate Returns: True if the value is correct against the schema Raises: ValidationError if the value does not validate against the schema """ if not isinstance(value, dict): raise ValidationError( - 'Unexpected type %(type)s for %(model)s, expected dict', - params={ - 'model': model, - 'type': value.__class__.__name__, - }, - code='model-unexpected-type', + "Unexpected type %(type)s for %(model)s, expected dict", + params={"model": model, "type": value.__class__.__name__,}, + code="model-unexpected-type", ) errors = defaultdict(list) for key, (mandatory, validators) in itertools.chain( ((k, v) for k, v in schema.items() if k != NON_FIELD_ERRORS), - [(NON_FIELD_ERRORS, (False, schema.get(NON_FIELD_ERRORS, [])))] + [(NON_FIELD_ERRORS, (False, schema.get(NON_FIELD_ERRORS, [])))], ): if not validators: continue if not isinstance(validators, list): validators = [validators] validated_value = value if key != NON_FIELD_ERRORS: try: validated_value = value[key] except KeyError: if mandatory: errors[key].append( ValidationError( - 'Field %(field)s is mandatory', - params={'field': key}, - code='model-field-mandatory', + "Field %(field)s is mandatory", + params={"field": key}, + code="model-field-mandatory", ) ) continue else: if errors: # Don't validate the whole object if some fields are broken continue for validator in validators: try: valid = validator(validated_value) except ValidationError as e: errors[key].append(e) else: if not valid: errdata = { - 'validator': validator.__name__, + "validator": validator.__name__, } if key == NON_FIELD_ERRORS: - errmsg = 'Validation of model %(model)s failed in ' \ - '%(validator)s' - errdata['model'] = model - errcode = 'model-validation-failed' + errmsg = ( + "Validation of model %(model)s failed in " "%(validator)s" + ) + errdata["model"] = model + errcode = "model-validation-failed" else: - errmsg = 'Validation of field %(field)s failed in ' \ - '%(validator)s' - errdata['field'] = key - errcode = 'field-validation-failed' + errmsg = ( + "Validation of field %(field)s failed in " "%(validator)s" + ) + errdata["field"] = key + errcode = "field-validation-failed" errors[key].append( ValidationError(errmsg, params=errdata, code=errcode) ) if errors: raise ValidationError(dict(errors)) return True def validate_all_keys(value, keys): """Validate that all the given keys are present in value""" missing_keys = set(keys) - set(value) if missing_keys: - missing_fields = ', '.join(sorted(missing_keys)) + missing_fields = ", ".join(sorted(missing_keys)) raise ValidationError( - 'Missing mandatory fields %(missing_fields)s', - params={'missing_fields': missing_fields}, - code='missing-mandatory-field' + "Missing mandatory fields %(missing_fields)s", + params={"missing_fields": missing_fields}, + code="missing-mandatory-field", ) return True def validate_any_key(value, keys): """Validate that any of the given keys is present in value""" present_keys = set(keys) & set(value) if not present_keys: - missing_fields = ', '.join(sorted(keys)) + missing_fields = ", ".join(sorted(keys)) raise ValidationError( - 'Must contain one of the alternative fields %(missing_fields)s', - params={'missing_fields': missing_fields}, - code='missing-alternative-field', + "Must contain one of the alternative fields %(missing_fields)s", + params={"missing_fields": missing_fields}, + code="missing-alternative-field", ) return True diff --git a/swh/model/fields/hashes.py b/swh/model/fields/hashes.py index 3819565..47e872c 100644 --- a/swh/model/fields/hashes.py +++ b/swh/model/fields/hashes.py @@ -1,117 +1,115 @@ # Copyright (C) 2015 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import string from ..exceptions import ValidationError def validate_hash(value, hash_type): """Validate that the given value represents a hash of the given hash_type. Args: value: the value to check hash_type: the type of hash the value is representing Returns: True if the hash validates Raises: ValueError if the hash does not validate """ hash_lengths = { - 'sha1': 20, - 'sha1_git': 20, - 'sha256': 32, + "sha1": 20, + "sha1_git": 20, + "sha256": 32, } hex_digits = set(string.hexdigits) if hash_type not in hash_lengths: raise ValidationError( - 'Unexpected hash type %(hash_type)s, expected one of' - ' %(hash_types)s', + "Unexpected hash type %(hash_type)s, expected one of" " %(hash_types)s", params={ - 'hash_type': hash_type, - 'hash_types': ', '.join(sorted(hash_lengths)), + "hash_type": hash_type, + "hash_types": ", ".join(sorted(hash_lengths)), }, - code='unexpected-hash-type') + code="unexpected-hash-type", + ) if isinstance(value, str): errors = [] extra_chars = set(value) - hex_digits if extra_chars: errors.append( ValidationError( "Unexpected characters `%(unexpected_chars)s' for hash " "type %(hash_type)s", params={ - 'unexpected_chars': ', '.join(sorted(extra_chars)), - 'hash_type': hash_type, + "unexpected_chars": ", ".join(sorted(extra_chars)), + "hash_type": hash_type, }, - code='unexpected-hash-contents', + code="unexpected-hash-contents", ) ) length = len(value) expected_length = 2 * hash_lengths[hash_type] if length != expected_length: errors.append( ValidationError( - 'Unexpected length %(length)d for hash type ' - '%(hash_type)s, expected %(expected_length)d', + "Unexpected length %(length)d for hash type " + "%(hash_type)s, expected %(expected_length)d", params={ - 'length': length, - 'expected_length': expected_length, - 'hash_type': hash_type, + "length": length, + "expected_length": expected_length, + "hash_type": hash_type, }, - code='unexpected-hash-length', + code="unexpected-hash-length", ) ) if errors: raise ValidationError(errors) return True if isinstance(value, bytes): length = len(value) expected_length = hash_lengths[hash_type] if length != expected_length: raise ValidationError( - 'Unexpected length %(length)d for hash type ' - '%(hash_type)s, expected %(expected_length)d', + "Unexpected length %(length)d for hash type " + "%(hash_type)s, expected %(expected_length)d", params={ - 'length': length, - 'expected_length': expected_length, - 'hash_type': hash_type, + "length": length, + "expected_length": expected_length, + "hash_type": hash_type, }, - code='unexpected-hash-length', + code="unexpected-hash-length", ) return True raise ValidationError( - 'Unexpected type %(type)s for hash, expected str or bytes', - params={ - 'type': value.__class__.__name__, - }, - code='unexpected-hash-value-type', + "Unexpected type %(type)s for hash, expected str or bytes", + params={"type": value.__class__.__name__,}, + code="unexpected-hash-value-type", ) def validate_sha1(sha1): """Validate that sha1 is a valid sha1 hash""" - return validate_hash(sha1, 'sha1') + return validate_hash(sha1, "sha1") def validate_sha1_git(sha1_git): """Validate that sha1_git is a valid sha1_git hash""" - return validate_hash(sha1_git, 'sha1_git') + return validate_hash(sha1_git, "sha1_git") def validate_sha256(sha256): """Validate that sha256 is a valid sha256 hash""" - return validate_hash(sha256, 'sha256') + return validate_hash(sha256, "sha256") diff --git a/swh/model/fields/simple.py b/swh/model/fields/simple.py index 3020997..98fcc11 100644 --- a/swh/model/fields/simple.py +++ b/swh/model/fields/simple.py @@ -1,80 +1,79 @@ # Copyright (C) 2015 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime import numbers from ..exceptions import ValidationError def validate_type(value, type): """Validate that value is an integer""" if not isinstance(value, type): if isinstance(type, tuple): - typestr = 'one of %s' % ', '.join(typ.__name__ for typ in type) + typestr = "one of %s" % ", ".join(typ.__name__ for typ in type) else: typestr = type.__name__ raise ValidationError( - 'Unexpected type %(type)s, expected %(expected_type)s', - params={ - 'type': value.__class__.__name__, - 'expected_type': typestr, - }, - code='unexpected-type' + "Unexpected type %(type)s, expected %(expected_type)s", + params={"type": value.__class__.__name__, "expected_type": typestr,}, + code="unexpected-type", ) return True def validate_int(value): """Validate that the given value is an int""" return validate_type(value, numbers.Integral) def validate_str(value): """Validate that the given value is a string""" return validate_type(value, str) def validate_bytes(value): """Validate that the given value is a bytes object""" return validate_type(value, bytes) def validate_datetime(value): """Validate that the given value is either a datetime, or a numeric number of seconds since the UNIX epoch.""" errors = [] try: validate_type(value, (datetime.datetime, numbers.Real)) except ValidationError as e: errors.append(e) if isinstance(value, datetime.datetime) and value.tzinfo is None: - errors.append(ValidationError( - 'Datetimes must be timezone-aware in swh', - code='datetime-without-tzinfo', - )) + errors.append( + ValidationError( + "Datetimes must be timezone-aware in swh", + code="datetime-without-tzinfo", + ) + ) if errors: raise ValidationError(errors) return True def validate_enum(value, expected_values): """Validate that value is contained in expected_values""" if value not in expected_values: raise ValidationError( - 'Unexpected value %(value)s, expected one of %(expected_values)s', + "Unexpected value %(value)s, expected one of %(expected_values)s", params={ - 'value': value, - 'expected_values': ', '.join(sorted(expected_values)), + "value": value, + "expected_values": ", ".join(sorted(expected_values)), }, - code='unexpected-value', + code="unexpected-value", ) return True diff --git a/swh/model/from_disk.py b/swh/model/from_disk.py index 3ba068a..5176dc9 100644 --- a/swh/model/from_disk.py +++ b/swh/model/from_disk.py @@ -1,410 +1,419 @@ # Copyright (C) 2017-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import enum import os import stat import attr from typing import List, Optional, Iterable, Any from .hashutil import MultiHash from .merkle import MerkleLeaf, MerkleNode from .identifiers import ( - directory_entry_sort_key, directory_identifier, + directory_entry_sort_key, + directory_identifier, identifier_to_bytes as id_to_bytes, identifier_to_str as id_to_str, ) from . import model @attr.s class DiskBackedContent(model.Content): """Subclass of Content, which allows lazy-loading data from the disk.""" + path = attr.ib(type=Optional[bytes], default=None) def __attrs_post_init__(self): if self.path is None: - raise TypeError('path must not be None.') + raise TypeError("path must not be None.") def with_data(self) -> model.Content: args = self.to_dict() - del args['path'] + del args["path"] assert self.path is not None - with open(self.path, 'rb') as fd: - return model.Content.from_dict({ - **args, - 'data': fd.read()}) + with open(self.path, "rb") as fd: + return model.Content.from_dict({**args, "data": fd.read()}) class DentryPerms(enum.IntEnum): """Admissible permissions for directory entries.""" + content = 0o100644 """Content""" executable_content = 0o100755 """Executable content (e.g. executable script)""" symlink = 0o120000 """Symbolic link""" directory = 0o040000 """Directory""" revision = 0o160000 """Revision (e.g. submodule)""" def mode_to_perms(mode): """Convert a file mode to a permission compatible with Software Heritage directory entries Args: mode (int): a file mode as returned by :func:`os.stat` in :attr:`os.stat_result.st_mode` Returns: DentryPerms: one of the following values: :const:`DentryPerms.content`: plain file :const:`DentryPerms.executable_content`: executable file :const:`DentryPerms.symlink`: symbolic link :const:`DentryPerms.directory`: directory """ if stat.S_ISLNK(mode): return DentryPerms.symlink if stat.S_ISDIR(mode): return DentryPerms.directory else: # file is executable in any way if mode & (0o111): return DentryPerms.executable_content else: return DentryPerms.content class Content(MerkleLeaf): """Representation of a Software Heritage content as a node in a Merkle tree. The current Merkle hash for the Content nodes is the `sha1_git`, which makes it consistent with what :class:`Directory` uses for its own hash computation. """ + __slots__ = [] # type: List[str] - type = 'content' + type = "content" @classmethod def from_bytes(cls, *, mode, data): """Convert data (raw :class:`bytes`) to a Software Heritage content entry Args: mode (int): a file mode (passed to :func:`mode_to_perms`) data (bytes): raw contents of the file """ ret = MultiHash.from_data(data).digest() - ret['length'] = len(data) - ret['perms'] = mode_to_perms(mode) - ret['data'] = data - ret['status'] = 'visible' + ret["length"] = len(data) + ret["perms"] = mode_to_perms(mode) + ret["data"] = data + ret["status"] = "visible" return cls(ret) @classmethod def from_symlink(cls, *, path, mode): """Convert a symbolic link to a Software Heritage content entry""" return cls.from_bytes(mode=mode, data=os.readlink(path)) @classmethod - def from_file( - cls, *, path, max_content_length=None): + def from_file(cls, *, path, max_content_length=None): """Compute the Software Heritage content entry corresponding to an on-disk file. The returned dictionary contains keys useful for both: - loading the content in the archive (hashes, `length`) - using the content as a directory entry in a directory Args: save_path (bool): add the file path to the entry max_content_length (Optional[int]): if given, all contents larger than this will be skipped. """ file_stat = os.lstat(path) mode = file_stat.st_mode length = file_stat.st_size - too_large = max_content_length is not None \ - and length > max_content_length + too_large = max_content_length is not None and length > max_content_length if stat.S_ISLNK(mode): # Symbolic link: return a file whose contents are the link target if too_large: # Unlike large contents, we can't stream symlinks to # MultiHash, and we don't want to fit them in memory if # they exceed max_content_length either. # Thankfully, this should not happen for reasonable values of # max_content_length because of OS/filesystem limitations, # so let's just raise an error. - raise Exception(f'Symlink too large ({length} bytes)') + raise Exception(f"Symlink too large ({length} bytes)") return cls.from_symlink(path=path, mode=mode) elif not stat.S_ISREG(mode): # not a regular file: return the empty file instead - return cls.from_bytes(mode=mode, data=b'') + return cls.from_bytes(mode=mode, data=b"") if too_large: - skip_reason = 'Content too large' + skip_reason = "Content too large" else: skip_reason = None hashes = MultiHash.from_path(path).digest() if skip_reason: ret = { **hashes, - 'status': 'absent', - 'reason': skip_reason, + "status": "absent", + "reason": skip_reason, } else: ret = { **hashes, - 'status': 'visible', + "status": "visible", } - ret['path'] = path - ret['perms'] = mode_to_perms(mode) - ret['length'] = length + ret["path"] = path + ret["perms"] = mode_to_perms(mode) + ret["length"] = length obj = cls(ret) return obj def __repr__(self): - return 'Content(id=%s)' % id_to_str(self.hash) + return "Content(id=%s)" % id_to_str(self.hash) def compute_hash(self): - return self.data['sha1_git'] + return self.data["sha1_git"] def to_model(self) -> model.BaseContent: """Builds a `model.BaseContent` object based on this leaf.""" data = self.get_data().copy() - data.pop('perms', None) - if data['status'] == 'absent': - data.pop('path', None) + data.pop("perms", None) + if data["status"] == "absent": + data.pop("path", None) return model.SkippedContent.from_dict(data) - elif 'data' in data: + elif "data" in data: return model.Content.from_dict(data) else: return DiskBackedContent.from_dict(data) -def accept_all_directories( - dirpath: str, dirname: str, entries: Iterable[Any]) -> bool: +def accept_all_directories(dirpath: str, dirname: str, entries: Iterable[Any]) -> bool: """Default filter for :func:`Directory.from_disk` accepting all directories Args: dirname (bytes): directory name entries (list): directory entries """ return True def ignore_empty_directories( - dirpath: str, dirname: str, entries: Iterable[Any]) -> bool: + dirpath: str, dirname: str, entries: Iterable[Any] +) -> bool: """Filter for :func:`directory_to_objects` ignoring empty directories Args: dirname (bytes): directory name entries (list): directory entries Returns: True if the directory is not empty, false if the directory is empty """ return bool(entries) def ignore_named_directories(names, *, case_sensitive=True): """Filter for :func:`directory_to_objects` to ignore directories named one of names. Args: names (list of bytes): names to ignore case_sensitive (bool): whether to do the filtering in a case sensitive way Returns: a directory filter for :func:`directory_to_objects` """ if not case_sensitive: names = [name.lower() for name in names] - def named_filter(dirpath: str, dirname: str, - entries: Iterable[Any], - names: Iterable[Any] = names, - case_sensitive: bool = case_sensitive): + def named_filter( + dirpath: str, + dirname: str, + entries: Iterable[Any], + names: Iterable[Any] = names, + case_sensitive: bool = case_sensitive, + ): if case_sensitive: return dirname not in names else: return dirname.lower() not in names return named_filter class Directory(MerkleNode): """Representation of a Software Heritage directory as a node in a Merkle Tree. This class can be used to generate, from an on-disk directory, all the objects that need to be sent to the Software Heritage archive. The :func:`from_disk` constructor allows you to generate the data structure from a directory on disk. The resulting :class:`Directory` can then be manipulated as a dictionary, using the path as key. The :func:`collect` method is used to retrieve all the objects that need to be added to the Software Heritage archive since the last collection, by class (contents and directories). When using the dict-like methods to update the contents of the directory, the affected levels of hierarchy are reset and can be collected again using the same method. This enables the efficient collection of updated nodes, for instance when the client is applying diffs. """ - __slots__ = ['__entries'] - type = 'directory' + + __slots__ = ["__entries"] + type = "directory" @classmethod - def from_disk(cls, *, path, - dir_filter=accept_all_directories, - max_content_length=None): + def from_disk( + cls, *, path, dir_filter=accept_all_directories, max_content_length=None + ): """Compute the Software Heritage objects for a given directory tree Args: path (bytes): the directory to traverse data (bool): whether to add the data to the content objects save_path (bool): whether to add the path to the content objects dir_filter (function): a filter to ignore some directories by name or contents. Takes two arguments: dirname and entries, and returns True if the directory should be added, False if the directory should be ignored. max_content_length (Optional[int]): if given, all contents larger than this will be skipped. """ top_path = path dirs = {} for root, dentries, fentries in os.walk(top_path, topdown=False): entries = {} # Join fentries and dentries in the same processing, as symbolic # links to directories appear in dentries... for name in fentries + dentries: path = os.path.join(root, name) if not os.path.isdir(path) or os.path.islink(path): content = Content.from_file( - path=path, max_content_length=max_content_length) + path=path, max_content_length=max_content_length + ) entries[name] = content else: if dir_filter(path, name, dirs[path].entries): entries[name] = dirs[path] - dirs[root] = cls({'name': os.path.basename(root)}) + dirs[root] = cls({"name": os.path.basename(root)}) dirs[root].update(entries) return dirs[top_path] def __init__(self, data=None): super().__init__(data=data) self.__entries = None def invalidate_hash(self): self.__entries = None super().invalidate_hash() @staticmethod def child_to_directory_entry(name, child): if isinstance(child, Directory): return { - 'type': 'dir', - 'perms': DentryPerms.directory, - 'target': child.hash, - 'name': name, + "type": "dir", + "perms": DentryPerms.directory, + "target": child.hash, + "name": name, } elif isinstance(child, Content): return { - 'type': 'file', - 'perms': child.data['perms'], - 'target': child.hash, - 'name': name, + "type": "file", + "perms": child.data["perms"], + "target": child.hash, + "name": name, } else: - raise ValueError('unknown child') + raise ValueError("unknown child") def get_data(self, **kwargs): return { - 'id': self.hash, - 'entries': self.entries, + "id": self.hash, + "entries": self.entries, } @property def entries(self): """Child nodes, sorted by name in the same way `directory_identifier` does.""" if self.__entries is None: - self.__entries = sorted(( - self.child_to_directory_entry(name, child) - for name, child in self.items() - ), key=directory_entry_sort_key) + self.__entries = sorted( + ( + self.child_to_directory_entry(name, child) + for name, child in self.items() + ), + key=directory_entry_sort_key, + ) return self.__entries def compute_hash(self): - return id_to_bytes(directory_identifier({'entries': self.entries})) + return id_to_bytes(directory_identifier({"entries": self.entries})) def to_model(self) -> model.Directory: """Builds a `model.Directory` object based on this node; ignoring its children.""" return model.Directory.from_dict(self.get_data()) def __getitem__(self, key): if not isinstance(key, bytes): - raise ValueError('Can only get a bytes from Directory') + raise ValueError("Can only get a bytes from Directory") # Convenience shortcut - if key == b'': + if key == b"": return self - if b'/' not in key: + if b"/" not in key: return super().__getitem__(key) else: - key1, key2 = key.split(b'/', 1) + key1, key2 = key.split(b"/", 1) return self.__getitem__(key1)[key2] def __setitem__(self, key, value): if not isinstance(key, bytes): - raise ValueError('Can only set a bytes Directory entry') + raise ValueError("Can only set a bytes Directory entry") if not isinstance(value, (Content, Directory)): - raise ValueError('Can only set a Directory entry to a Content or ' - 'Directory') + raise ValueError( + "Can only set a Directory entry to a Content or " "Directory" + ) - if key == b'': - raise ValueError('Directory entry must have a name') - if b'\x00' in key: - raise ValueError('Directory entry name must not contain nul bytes') + if key == b"": + raise ValueError("Directory entry must have a name") + if b"\x00" in key: + raise ValueError("Directory entry name must not contain nul bytes") - if b'/' not in key: + if b"/" not in key: return super().__setitem__(key, value) else: - key1, key2 = key.rsplit(b'/', 1) + key1, key2 = key.rsplit(b"/", 1) self[key1].__setitem__(key2, value) def __delitem__(self, key): if not isinstance(key, bytes): - raise ValueError('Can only delete a bytes Directory entry') + raise ValueError("Can only delete a bytes Directory entry") - if b'/' not in key: + if b"/" not in key: super().__delitem__(key) else: - key1, key2 = key.rsplit(b'/', 1) + key1, key2 = key.rsplit(b"/", 1) del self[key1][key2] def __repr__(self): - return 'Directory(id=%s, entries=[%s])' % ( + return "Directory(id=%s, entries=[%s])" % ( id_to_str(self.hash), - ', '.join(str(entry) for entry in self), + ", ".join(str(entry) for entry in self), ) diff --git a/swh/model/hashutil.py b/swh/model/hashutil.py index f045fb0..954ae95 100644 --- a/swh/model/hashutil.py +++ b/swh/model/hashutil.py @@ -1,361 +1,363 @@ # Copyright (C) 2015-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information """Module in charge of hashing function definitions. This is the base module use to compute swh's hashes. Only a subset of hashing algorithms is supported as defined in the ALGORITHMS set. Any provided algorithms not in that list will result in a ValueError explaining the error. This module defines a MultiHash class to ease the softwareheritage hashing algorithms computation. This allows to compute hashes from file object, path, data using a similar interface as what the standard hashlib module provides. Basic usage examples: - file object: MultiHash.from_file( file_object, hash_names=DEFAULT_ALGORITHMS).digest() - path (filepath): MultiHash.from_path(b'foo').hexdigest() - data (bytes): MultiHash.from_data(b'foo').bytehexdigest() "Complex" usage, defining a swh hashlib instance first: - To compute length, integrate the length to the set of algorithms to compute, for example: .. code-block:: python h = MultiHash(hash_names=set({'length'}).union(DEFAULT_ALGORITHMS)) with open(filepath, 'rb') as f: h.update(f.read(HASH_BLOCK_SIZE)) hashes = h.digest() # returns a dict of {hash_algo_name: hash_in_bytes} - Write alongside computing hashing algorithms (from a stream), example: .. code-block:: python h = MultiHash(length=length) with open(filepath, 'wb') as f: for chunk in r.iter_content(): # r a stream of sort h.update(chunk) f.write(chunk) hashes = h.hexdigest() # returns a dict of {hash_algo_name: hash_in_hex} """ import binascii import functools import hashlib import os from io import BytesIO from typing import Callable, Dict -ALGORITHMS = set(['sha1', 'sha256', 'sha1_git', 'blake2s256', 'blake2b512']) +ALGORITHMS = set(["sha1", "sha256", "sha1_git", "blake2s256", "blake2b512"]) """Hashing algorithms supported by this module""" -DEFAULT_ALGORITHMS = set(['sha1', 'sha256', 'sha1_git', 'blake2s256']) +DEFAULT_ALGORITHMS = set(["sha1", "sha256", "sha1_git", "blake2s256"]) """Algorithms computed by default when calling the functions from this module. Subset of :const:`ALGORITHMS`. """ HASH_BLOCK_SIZE = 32768 """Block size for streaming hash computations made in this module""" _blake2_hash_cache = {} # type: Dict[str, Callable] class MultiHash: """Hashutil class to support multiple hashes computation. Args: hash_names (set): Set of hash algorithms (+ optionally length) to compute hashes (cf. DEFAULT_ALGORITHMS) length (int): Length of the total sum of chunks to read If the length is provided as algorithm, the length is also computed and returned. """ + def __init__(self, hash_names=DEFAULT_ALGORITHMS, length=None): self.state = {} self.track_length = False for name in hash_names: - if name == 'length': - self.state['length'] = 0 + if name == "length": + self.state["length"] = 0 self.track_length = True else: self.state[name] = _new_hash(name, length) @classmethod def from_state(cls, state, track_length): ret = cls([]) ret.state = state ret.track_length = track_length @classmethod def from_file(cls, fobj, hash_names=DEFAULT_ALGORITHMS, length=None): ret = cls(length=length, hash_names=hash_names) while True: chunk = fobj.read(HASH_BLOCK_SIZE) if not chunk: break ret.update(chunk) return ret @classmethod def from_path(cls, path, hash_names=DEFAULT_ALGORITHMS): length = os.path.getsize(path) - with open(path, 'rb') as f: + with open(path, "rb") as f: ret = cls.from_file(f, hash_names=hash_names, length=length) return ret @classmethod def from_data(cls, data, hash_names=DEFAULT_ALGORITHMS): length = len(data) fobj = BytesIO(data) return cls.from_file(fobj, hash_names=hash_names, length=length) def update(self, chunk): for name, h in self.state.items(): - if name == 'length': + if name == "length": continue h.update(chunk) if self.track_length: - self.state['length'] += len(chunk) + self.state["length"] += len(chunk) def digest(self): return { - name: h.digest() if name != 'length' else h + name: h.digest() if name != "length" else h for name, h in self.state.items() } def hexdigest(self): return { - name: h.hexdigest() if name != 'length' else h + name: h.hexdigest() if name != "length" else h for name, h in self.state.items() } def bytehexdigest(self): return { - name: hash_to_bytehex(h.digest()) if name != 'length' else h + name: hash_to_bytehex(h.digest()) if name != "length" else h for name, h in self.state.items() } def copy(self): copied_state = { - name: h.copy() if name != 'length' else h - for name, h in self.state.items() + name: h.copy() if name != "length" else h for name, h in self.state.items() } return self.from_state(copied_state, self.track_length) def _new_blake2_hash(algo): """Return a function that initializes a blake2 hash. """ if algo in _blake2_hash_cache: return _blake2_hash_cache[algo]() lalgo = algo.lower() - if not lalgo.startswith('blake2'): - raise ValueError('Algorithm %s is not a blake2 hash' % algo) + if not lalgo.startswith("blake2"): + raise ValueError("Algorithm %s is not a blake2 hash" % algo) blake_family = lalgo[:7] digest_size = None if lalgo[7:]: try: digest_size, remainder = divmod(int(lalgo[7:]), 8) except ValueError: - raise ValueError( - 'Unknown digest size for algo %s' % algo - ) from None + raise ValueError("Unknown digest size for algo %s" % algo) from None if remainder: raise ValueError( - 'Digest size for algorithm %s must be a multiple of 8' % algo + "Digest size for algorithm %s must be a multiple of 8" % algo ) if lalgo in hashlib.algorithms_available: # Handle the case where OpenSSL ships the given algorithm # (e.g. Python 3.5 on Debian 9 stretch) _blake2_hash_cache[algo] = lambda: hashlib.new(lalgo) else: # Try using the built-in implementation for Python 3.6+ if blake_family in hashlib.algorithms_available: blake2 = getattr(hashlib, blake_family) else: import pyblake2 + blake2 = getattr(pyblake2, blake_family) _blake2_hash_cache[algo] = lambda: blake2(digest_size=digest_size) return _blake2_hash_cache[algo]() def _new_hashlib_hash(algo): """Initialize a digest object from hashlib. Handle the swh-specific names for the blake2-related algorithms """ - if algo.startswith('blake2'): + if algo.startswith("blake2"): return _new_blake2_hash(algo) else: return hashlib.new(algo) def _new_git_hash(base_algo, git_type, length): """Initialize a digest object (as returned by python's hashlib) for the requested algorithm, and feed it with the header for a git object of the given type and length. The header for hashing a git object consists of: - The type of the object (encoded in ASCII) - One ASCII space (\x20) - The length of the object (decimal encoded in ASCII) - One NUL byte Args: base_algo (str from :const:`ALGORITHMS`): a hashlib-supported algorithm git_type: the type of the git object (supposedly one of 'blob', 'commit', 'tag', 'tree') length: the length of the git object you're encoding Returns: a hashutil.hash object """ h = _new_hashlib_hash(base_algo) - git_header = '%s %d\0' % (git_type, length) - h.update(git_header.encode('ascii')) + git_header = "%s %d\0" % (git_type, length) + h.update(git_header.encode("ascii")) return h def _new_hash(algo, length=None): """Initialize a digest object (as returned by python's hashlib) for the requested algorithm. See the constant ALGORITHMS for the list of supported algorithms. If a git-specific hashing algorithm is requested (e.g., "sha1_git"), the hashing object will be pre-fed with the needed header; for this to work, length must be given. Args: algo (str): a hashing algorithm (one of ALGORITHMS) length (int): the length of the hashed payload (needed for git-specific algorithms) Returns: a hashutil.hash object Raises: ValueError if algo is unknown, or length is missing for a git-specific hash. """ if algo not in ALGORITHMS: raise ValueError( - 'Unexpected hashing algorithm %s, expected one of %s' % - (algo, ', '.join(sorted(ALGORITHMS)))) + "Unexpected hashing algorithm %s, expected one of %s" + % (algo, ", ".join(sorted(ALGORITHMS))) + ) - if algo.endswith('_git'): + if algo.endswith("_git"): if length is None: - raise ValueError('Missing length for git hashing algorithm') + raise ValueError("Missing length for git hashing algorithm") base_algo = algo[:-4] - return _new_git_hash(base_algo, 'blob', length) + return _new_git_hash(base_algo, "blob", length) return _new_hashlib_hash(algo) -def hash_git_data(data, git_type, base_algo='sha1'): +def hash_git_data(data, git_type, base_algo="sha1"): """Hash the given data as a git object of type git_type. Args: data: a bytes object git_type: the git object type base_algo: the base hashing algorithm used (default: sha1) Returns: a dict mapping each algorithm to a bytes digest Raises: ValueError if the git_type is unexpected. """ - git_object_types = {'blob', 'tree', 'commit', 'tag', 'snapshot'} + git_object_types = {"blob", "tree", "commit", "tag", "snapshot"} if git_type not in git_object_types: - raise ValueError('Unexpected git object type %s, expected one of %s' % - (git_type, ', '.join(sorted(git_object_types)))) + raise ValueError( + "Unexpected git object type %s, expected one of %s" + % (git_type, ", ".join(sorted(git_object_types))) + ) h = _new_git_hash(base_algo, git_type, len(data)) h.update(data) return h.digest() @functools.lru_cache() def hash_to_hex(hash): """Converts a hash (in hex or bytes form) to its hexadecimal ascii form Args: hash (str or bytes): a :class:`bytes` hash or a :class:`str` containing the hexadecimal form of the hash Returns: str: the hexadecimal form of the hash """ if isinstance(hash, str): return hash - return binascii.hexlify(hash).decode('ascii') + return binascii.hexlify(hash).decode("ascii") @functools.lru_cache() def hash_to_bytehex(hash): """Converts a hash to its hexadecimal bytes representation Args: hash (bytes): a :class:`bytes` hash Returns: bytes: the hexadecimal form of the hash, as :class:`bytes` """ return binascii.hexlify(hash) @functools.lru_cache() def hash_to_bytes(hash): """Converts a hash (in hex or bytes form) to its raw bytes form Args: hash (str or bytes): a :class:`bytes` hash or a :class:`str` containing the hexadecimal form of the hash Returns: bytes: the :class:`bytes` form of the hash """ if isinstance(hash, bytes): return hash return bytes.fromhex(hash) @functools.lru_cache() def bytehex_to_hash(hex): """Converts a hexadecimal bytes representation of a hash to that hash Args: hash (bytes): a :class:`bytes` containing the hexadecimal form of the hash encoded in ascii Returns: bytes: the :class:`bytes` form of the hash """ return hash_to_bytes(hex.decode()) diff --git a/swh/model/hypothesis_strategies.py b/swh/model/hypothesis_strategies.py index 320b160..a66efdc 100644 --- a/swh/model/hypothesis_strategies.py +++ b/swh/model/hypothesis_strategies.py @@ -1,380 +1,409 @@ # Copyright (C) 2019-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime from hypothesis import assume from hypothesis.strategies import ( - binary, booleans, builds, characters, - composite, datetimes, dictionaries, from_regex, integers, just, lists, - none, one_of, sampled_from, sets, text, ) + binary, + booleans, + builds, + characters, + composite, + datetimes, + dictionaries, + from_regex, + integers, + just, + lists, + none, + one_of, + sampled_from, + sets, + text, +) from .from_disk import DentryPerms from .model import ( - Person, Timestamp, TimestampWithTimezone, Origin, - OriginVisit, OriginVisitUpdate, Snapshot, SnapshotBranch, ObjectType, - TargetType, Release, Revision, RevisionType, BaseContent, Directory, - DirectoryEntry, Content, SkippedContent, ) + Person, + Timestamp, + TimestampWithTimezone, + Origin, + OriginVisit, + OriginVisitUpdate, + Snapshot, + SnapshotBranch, + ObjectType, + TargetType, + Release, + Revision, + RevisionType, + BaseContent, + Directory, + DirectoryEntry, + Content, + SkippedContent, +) from .identifiers import snapshot_identifier, identifier_to_bytes pgsql_alphabet = characters( - blacklist_categories=('Cs', ), - blacklist_characters=['\u0000']) # postgresql does not like these + blacklist_categories=("Cs",), blacklist_characters=["\u0000"] +) # postgresql does not like these def optional(strategy): return one_of(none(), strategy) def pgsql_text(): return text(alphabet=pgsql_alphabet) def sha1_git(): return binary(min_size=20, max_size=20) def sha1(): return binary(min_size=20, max_size=20) @composite def urls(draw): - protocol = draw(sampled_from(['git', 'http', 'https', 'deb'])) - domain = draw(from_regex(r'\A([a-z]([a-z0-9-]*)\.){1,3}[a-z0-9]+\Z')) + protocol = draw(sampled_from(["git", "http", "https", "deb"])) + domain = draw(from_regex(r"\A([a-z]([a-z0-9-]*)\.){1,3}[a-z0-9]+\Z")) - return '%s://%s' % (protocol, domain) + return "%s://%s" % (protocol, domain) def persons_d(): return builds( - dict, - fullname=binary(), - email=optional(binary()), - name=optional(binary()), + dict, fullname=binary(), email=optional(binary()), name=optional(binary()), ) def persons(): return persons_d().map(Person.from_dict) def timestamps_d(): max_seconds = datetime.datetime.max.replace( - tzinfo=datetime.timezone.utc).timestamp() + tzinfo=datetime.timezone.utc + ).timestamp() min_seconds = datetime.datetime.min.replace( - tzinfo=datetime.timezone.utc).timestamp() + tzinfo=datetime.timezone.utc + ).timestamp() return builds( dict, seconds=integers(min_seconds, max_seconds), - microseconds=integers(0, 1000000)) + microseconds=integers(0, 1000000), + ) def timestamps(): return timestamps_d().map(Timestamp.from_dict) @composite def timestamps_with_timezone_d( - draw, - timestamp=timestamps_d(), - offset=integers(min_value=-14*60, max_value=14*60), - negative_utc=booleans()): + draw, + timestamp=timestamps_d(), + offset=integers(min_value=-14 * 60, max_value=14 * 60), + negative_utc=booleans(), +): timestamp = draw(timestamp) offset = draw(offset) negative_utc = draw(negative_utc) assume(not (negative_utc and offset)) - return dict( - timestamp=timestamp, - offset=offset, - negative_utc=negative_utc) + return dict(timestamp=timestamp, offset=offset, negative_utc=negative_utc) timestamps_with_timezone = timestamps_with_timezone_d().map( - TimestampWithTimezone.from_dict) + TimestampWithTimezone.from_dict +) def origins_d(): - return builds( - dict, - url=urls()) + return builds(dict, url=urls()) def origins(): return origins_d().map(Origin.from_dict) def origin_visits_d(): return builds( dict, visit=integers(0, 1000), origin=urls(), date=datetimes(), - status=sampled_from(['ongoing', 'full', 'partial']), + status=sampled_from(["ongoing", "full", "partial"]), type=pgsql_text(), snapshot=optional(sha1_git()), ) def origin_visits(): return origin_visits_d().map(OriginVisit.from_dict) def metadata_dicts(): return dictionaries(pgsql_text(), pgsql_text()) def origin_visit_updates_d(): return builds( dict, visit=integers(0, 1000), origin=urls(), - status=sampled_from(['ongoing', 'full', 'partial']), + status=sampled_from(["ongoing", "full", "partial"]), date=datetimes(), snapshot=optional(sha1_git()), - metadata=one_of(none(), metadata_dicts())) + metadata=one_of(none(), metadata_dicts()), + ) def origin_visit_updates(): return origin_visit_updates_d().map(OriginVisitUpdate.from_dict) @composite def releases_d(draw): target_type = sampled_from([x.value for x in ObjectType]) name = binary() message = binary() synthetic = booleans() target = sha1_git() metadata = one_of(none(), revision_metadata()) - return draw(one_of( - builds( - dict, - name=name, - message=message, - synthetic=synthetic, - author=none(), - date=none(), - target=target, - target_type=target_type, - metadata=metadata, - ), - builds( - dict, - name=name, - message=message, - synthetic=synthetic, - date=timestamps_with_timezone_d(), - author=persons_d(), - target=target, - target_type=target_type, - metadata=metadata, - ), - )) + return draw( + one_of( + builds( + dict, + name=name, + message=message, + synthetic=synthetic, + author=none(), + date=none(), + target=target, + target_type=target_type, + metadata=metadata, + ), + builds( + dict, + name=name, + message=message, + synthetic=synthetic, + date=timestamps_with_timezone_d(), + author=persons_d(), + target=target, + target_type=target_type, + metadata=metadata, + ), + ) + ) def releases(): return releases_d().map(Release.from_dict) revision_metadata = metadata_dicts def revisions_d(): return builds( dict, message=binary(), synthetic=booleans(), author=persons_d(), committer=persons_d(), date=timestamps_with_timezone_d(), committer_date=timestamps_with_timezone_d(), parents=lists(sha1_git()), directory=sha1_git(), type=sampled_from([x.value for x in RevisionType]), - metadata=one_of(none(), revision_metadata())) + metadata=one_of(none(), revision_metadata()), + ) # TODO: metadata['extra_headers'] can have binary keys and values def revisions(): return revisions_d().map(Revision.from_dict) def directory_entries_d(): return builds( dict, name=binary(), target=sha1_git(), - type=sampled_from(['file', 'dir', 'rev']), - perms=sampled_from([perm.value for perm in DentryPerms])) + type=sampled_from(["file", "dir", "rev"]), + perms=sampled_from([perm.value for perm in DentryPerms]), + ) def directory_entries(): return directory_entries_d().map(DirectoryEntry) def directories_d(): - return builds( - dict, - entries=lists(directory_entries_d())) + return builds(dict, entries=lists(directory_entries_d())) def directories(): return directories_d().map(Directory.from_dict) def contents_d(): return one_of(present_contents_d(), skipped_contents_d()) def contents(): return one_of(present_contents(), skipped_contents()) def present_contents_d(): return builds( dict, data=binary(max_size=4096), ctime=optional(datetimes()), - status=one_of(just('visible'), just('hidden')), + status=one_of(just("visible"), just("hidden")), ) def present_contents(): return present_contents_d().map(lambda d: Content.from_data(**d)) @composite def skipped_contents_d(draw): result = BaseContent._hash_data(draw(binary(max_size=4096))) - result.pop('data') + result.pop("data") nullify_attrs = draw( - sets(sampled_from(['sha1', 'sha1_git', 'sha256', 'blake2s256'])) + sets(sampled_from(["sha1", "sha1_git", "sha256", "blake2s256"])) ) for k in nullify_attrs: result[k] = None - result['reason'] = draw(pgsql_text()) - result['status'] = 'absent' - result['ctime'] = draw(optional(datetimes())) + result["reason"] = draw(pgsql_text()) + result["status"] = "absent" + result["ctime"] = draw(optional(datetimes())) return result def skipped_contents(): return skipped_contents_d().map(SkippedContent.from_dict) def branch_names(): return binary(min_size=1) def branch_targets_object_d(): return builds( dict, target=sha1_git(), - target_type=sampled_from([ - x.value for x in TargetType - if x.value not in ('alias', )])) + target_type=sampled_from( + [x.value for x in TargetType if x.value not in ("alias",)] + ), + ) def branch_targets_alias_d(): return builds( - dict, - target=sha1_git(), - target_type=just('alias')) # TargetType.ALIAS.value)) + dict, target=sha1_git(), target_type=just("alias") + ) # TargetType.ALIAS.value)) def branch_targets_d(*, only_objects=False): if only_objects: return branch_targets_object_d() else: return one_of(branch_targets_alias_d(), branch_targets_object_d()) def branch_targets(*, only_objects=False): - return builds( - SnapshotBranch.from_dict, - branch_targets_d(only_objects=only_objects)) + return builds(SnapshotBranch.from_dict, branch_targets_d(only_objects=only_objects)) @composite def snapshots_d(draw, *, min_size=0, max_size=100, only_objects=False): - branches = draw(dictionaries( - keys=branch_names(), - values=one_of( - none(), - branch_targets_d(only_objects=only_objects) - ), - min_size=min_size, - max_size=max_size, - )) + branches = draw( + dictionaries( + keys=branch_names(), + values=one_of(none(), branch_targets_d(only_objects=only_objects)), + min_size=min_size, + max_size=max_size, + ) + ) if not only_objects: # Make sure aliases point to actual branches unresolved_aliases = { - branch: target['target'] + branch: target["target"] for branch, target in branches.items() - if (target - and target['target_type'] == 'alias' - and target['target'] not in branches) + if ( + target + and target["target_type"] == "alias" + and target["target"] not in branches + ) } for alias_name, alias_target in unresolved_aliases.items(): # Override alias branch with one pointing to a real object # if max_size constraint is reached alias = alias_target if len(branches) < max_size else alias_name branches[alias] = draw(branch_targets_d(only_objects=True)) # Ensure no cycles between aliases while True: try: - id_ = snapshot_identifier({ - 'branches': { - name: branch or None - for (name, branch) in branches.items()}}) + id_ = snapshot_identifier( + { + "branches": { + name: branch or None for (name, branch) in branches.items() + } + } + ) except ValueError as e: for (source, target) in e.args[1]: branches[source] = draw(branch_targets_d(only_objects=True)) else: break - return dict( - id=identifier_to_bytes(id_), - branches=branches) + return dict(id=identifier_to_bytes(id_), branches=branches) def snapshots(*, min_size=0, max_size=100, only_objects=False): - return snapshots_d(min_size=min_size, max_size=max_size, - only_objects=only_objects).map( - Snapshot.from_dict) + return snapshots_d( + min_size=min_size, max_size=max_size, only_objects=only_objects + ).map(Snapshot.from_dict) def objects(): return one_of( - origins().map(lambda x: ('origin', x)), - origin_visits().map(lambda x: ('origin_visit', x)), - origin_visit_updates().map(lambda x: ('origin_visit_update', x)), - snapshots().map(lambda x: ('snapshot', x)), - releases().map(lambda x: ('release', x)), - revisions().map(lambda x: ('revision', x)), - directories().map(lambda x: ('directory', x)), - contents().map(lambda x: ('content', x)), + origins().map(lambda x: ("origin", x)), + origin_visits().map(lambda x: ("origin_visit", x)), + origin_visit_updates().map(lambda x: ("origin_visit_update", x)), + snapshots().map(lambda x: ("snapshot", x)), + releases().map(lambda x: ("release", x)), + revisions().map(lambda x: ("revision", x)), + directories().map(lambda x: ("directory", x)), + contents().map(lambda x: ("content", x)), ) def object_dicts(): """generates a random couple (type, dict) which dict is suitable for .from_dict() factory methods. """ return one_of( - origins_d().map(lambda x: ('origin', x)), - origin_visits_d().map(lambda x: ('origin_visit', x)), - snapshots_d().map(lambda x: ('snapshot', x)), - releases_d().map(lambda x: ('release', x)), - revisions_d().map(lambda x: ('revision', x)), - directories_d().map(lambda x: ('directory', x)), - contents_d().map(lambda x: ('content', x)), + origins_d().map(lambda x: ("origin", x)), + origin_visits_d().map(lambda x: ("origin_visit", x)), + snapshots_d().map(lambda x: ("snapshot", x)), + releases_d().map(lambda x: ("release", x)), + revisions_d().map(lambda x: ("revision", x)), + directories_d().map(lambda x: ("directory", x)), + contents_d().map(lambda x: ("content", x)), ) diff --git a/swh/model/identifiers.py b/swh/model/identifiers.py index 6ccf948..dd34513 100644 --- a/swh/model/identifiers.py +++ b/swh/model/identifiers.py @@ -1,808 +1,826 @@ # Copyright (C) 2015-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import binascii import datetime import hashlib from functools import lru_cache from typing import Any, Dict, NamedTuple from .exceptions import ValidationError from .fields.hashes import validate_sha1 from .hashutil import hash_git_data, hash_to_hex, MultiHash -ORIGIN = 'origin' -SNAPSHOT = 'snapshot' -REVISION = 'revision' -RELEASE = 'release' -DIRECTORY = 'directory' -CONTENT = 'content' - -PID_NAMESPACE = 'swh' +ORIGIN = "origin" +SNAPSHOT = "snapshot" +REVISION = "revision" +RELEASE = "release" +DIRECTORY = "directory" +CONTENT = "content" + +PID_NAMESPACE = "swh" PID_VERSION = 1 -PID_TYPES = ['ori', 'snp', 'rel', 'rev', 'dir', 'cnt'] -PID_SEP = ':' -PID_CTXT_SEP = ';' +PID_TYPES = ["ori", "snp", "rel", "rev", "dir", "cnt"] +PID_SEP = ":" +PID_CTXT_SEP = ";" @lru_cache() def identifier_to_bytes(identifier): """Convert a text identifier to bytes. Args: identifier: an identifier, either a 40-char hexadecimal string or a bytes object of length 20 Returns: The length 20 bytestring corresponding to the given identifier Raises: ValueError: if the identifier is of an unexpected type or length. """ if isinstance(identifier, bytes): if len(identifier) != 20: raise ValueError( - 'Wrong length for bytes identifier %s, expected 20' % - len(identifier)) + "Wrong length for bytes identifier %s, expected 20" % len(identifier) + ) return identifier if isinstance(identifier, str): if len(identifier) != 40: raise ValueError( - 'Wrong length for str identifier %s, expected 40' % - len(identifier)) + "Wrong length for str identifier %s, expected 40" % len(identifier) + ) return bytes.fromhex(identifier) - raise ValueError('Wrong type for identifier %s, expected bytes or str' % - identifier.__class__.__name__) + raise ValueError( + "Wrong type for identifier %s, expected bytes or str" + % identifier.__class__.__name__ + ) @lru_cache() def identifier_to_str(identifier): """Convert an identifier to an hexadecimal string. Args: identifier: an identifier, either a 40-char hexadecimal string or a bytes object of length 20 Returns: The length 40 string corresponding to the given identifier, hex encoded Raises: ValueError: if the identifier is of an unexpected type or length. """ if isinstance(identifier, str): if len(identifier) != 40: raise ValueError( - 'Wrong length for str identifier %s, expected 40' % - len(identifier)) + "Wrong length for str identifier %s, expected 40" % len(identifier) + ) return identifier if isinstance(identifier, bytes): if len(identifier) != 20: raise ValueError( - 'Wrong length for bytes identifier %s, expected 20' % - len(identifier)) + "Wrong length for bytes identifier %s, expected 20" % len(identifier) + ) return binascii.hexlify(identifier).decode() - raise ValueError('Wrong type for identifier %s, expected bytes or str' % - identifier.__class__.__name__) + raise ValueError( + "Wrong type for identifier %s, expected bytes or str" + % identifier.__class__.__name__ + ) def content_identifier(content): """Return the intrinsic identifier for a content. A content's identifier is the sha1, sha1_git and sha256 checksums of its data. Args: content: a content conforming to the Software Heritage schema Returns: A dictionary with all the hashes for the data Raises: KeyError: if the content doesn't have a data member. """ - return MultiHash.from_data(content['data']).digest() + return MultiHash.from_data(content["data"]).digest() def directory_entry_sort_key(entry): """The sorting key for tree entries""" - if entry['type'] == 'dir': - return entry['name'] + b'/' + if entry["type"] == "dir": + return entry["name"] + b"/" else: - return entry['name'] + return entry["name"] @lru_cache() def _perms_to_bytes(perms): """Convert the perms value to its bytes representation""" oc = oct(perms)[2:] - return oc.encode('ascii') + return oc.encode("ascii") def escape_newlines(snippet): """Escape the newlines present in snippet according to git rules. New lines in git manifests are escaped by indenting the next line by one space. """ - if b'\n' in snippet: - return b'\n '.join(snippet.split(b'\n')) + if b"\n" in snippet: + return b"\n ".join(snippet.split(b"\n")) else: return snippet def directory_identifier(directory): """Return the intrinsic identifier for a directory. A directory's identifier is the tree sha1 à la git of a directory listing, using the following algorithm, which is equivalent to the git algorithm for trees: 1. Entries of the directory are sorted using the name (or the name with '/' appended for directory entries) as key, in bytes order. 2. For each entry of the directory, the following bytes are output: - the octal representation of the permissions for the entry (stored in the 'perms' member), which is a representation of the entry type: - b'100644' (int 33188) for files - b'100755' (int 33261) for executable files - b'120000' (int 40960) for symbolic links - b'40000' (int 16384) for directories - b'160000' (int 57344) for references to revisions - an ascii space (b'\x20') - the entry's name (as raw bytes), stored in the 'name' member - a null byte (b'\x00') - the 20 byte long identifier of the object pointed at by the entry, stored in the 'target' member: - for files or executable files: their blob sha1_git - for symbolic links: the blob sha1_git of a file containing the link destination - for directories: their intrinsic identifier - for revisions: their intrinsic identifier (Note that there is no separator between entries) """ components = [] - for entry in sorted(directory['entries'], key=directory_entry_sort_key): - components.extend([ - _perms_to_bytes(entry['perms']), - b'\x20', - entry['name'], - b'\x00', - identifier_to_bytes(entry['target']), - ]) + for entry in sorted(directory["entries"], key=directory_entry_sort_key): + components.extend( + [ + _perms_to_bytes(entry["perms"]), + b"\x20", + entry["name"], + b"\x00", + identifier_to_bytes(entry["target"]), + ] + ) - return identifier_to_str(hash_git_data(b''.join(components), 'tree')) + return identifier_to_str(hash_git_data(b"".join(components), "tree")) def format_date(date): """Convert a date object into an UTC timestamp encoded as ascii bytes. Git stores timestamps as an integer number of seconds since the UNIX epoch. However, Software Heritage stores timestamps as an integer number of microseconds (postgres type "datetime with timezone"). Therefore, we print timestamps with no microseconds as integers, and timestamps with microseconds as floating point values. We elide the trailing zeroes from microsecond values, to "future-proof" our representation if we ever need more precision in timestamps. """ if not isinstance(date, dict): - raise ValueError('format_date only supports dicts, %r received' % date) + raise ValueError("format_date only supports dicts, %r received" % date) - seconds = date.get('seconds', 0) - microseconds = date.get('microseconds', 0) + seconds = date.get("seconds", 0) + microseconds = date.get("microseconds", 0) if not microseconds: return str(seconds).encode() else: - float_value = ('%d.%06d' % (seconds, microseconds)) - return float_value.rstrip('0').encode() + float_value = "%d.%06d" % (seconds, microseconds) + return float_value.rstrip("0").encode() @lru_cache() def format_offset(offset, negative_utc=None): """Convert an integer number of minutes into an offset representation. The offset representation is [+-]hhmm where: - hh is the number of hours; - mm is the number of minutes. A null offset is represented as +0000. """ if offset < 0 or offset == 0 and negative_utc: - sign = '-' + sign = "-" else: - sign = '+' + sign = "+" hours = abs(offset) // 60 minutes = abs(offset) % 60 - t = '%s%02d%02d' % (sign, hours, minutes) + t = "%s%02d%02d" % (sign, hours, minutes) return t.encode() def normalize_timestamp(time_representation): """Normalize a time representation for processing by Software Heritage This function supports a numeric timestamp (representing a number of seconds since the UNIX epoch, 1970-01-01 at 00:00 UTC), a :obj:`datetime.datetime` object (with timezone information), or a normalized Software Heritage time representation (idempotency). Args: time_representation: the representation of a timestamp Returns: dict: a normalized dictionary with three keys: - timestamp: a dict with two optional keys: - seconds: the integral number of seconds since the UNIX epoch - microseconds: the integral number of microseconds - offset: the timezone offset as a number of minutes relative to UTC - negative_utc: a boolean representing whether the offset is -0000 when offset = 0. """ if time_representation is None: return None negative_utc = False if isinstance(time_representation, dict): - ts = time_representation['timestamp'] + ts = time_representation["timestamp"] if isinstance(ts, dict): - seconds = ts.get('seconds', 0) - microseconds = ts.get('microseconds', 0) + seconds = ts.get("seconds", 0) + microseconds = ts.get("microseconds", 0) elif isinstance(ts, int): seconds = ts microseconds = 0 else: raise ValueError( - 'normalize_timestamp received non-integer timestamp member:' - ' %r' % ts) - offset = time_representation['offset'] - if 'negative_utc' in time_representation: - negative_utc = time_representation['negative_utc'] + "normalize_timestamp received non-integer timestamp member:" " %r" % ts + ) + offset = time_representation["offset"] + if "negative_utc" in time_representation: + negative_utc = time_representation["negative_utc"] elif isinstance(time_representation, datetime.datetime): seconds = int(time_representation.timestamp()) microseconds = time_representation.microsecond utcoffset = time_representation.utcoffset() if utcoffset is None: raise ValueError( - 'normalize_timestamp received datetime without timezone: %s' % - time_representation) + "normalize_timestamp received datetime without timezone: %s" + % time_representation + ) # utcoffset is an integer number of minutes seconds_offset = utcoffset.total_seconds() offset = int(seconds_offset) // 60 elif isinstance(time_representation, int): seconds = time_representation microseconds = 0 offset = 0 else: raise ValueError( - 'normalize_timestamp received non-integer timestamp:' - ' %r' % time_representation) + "normalize_timestamp received non-integer timestamp:" + " %r" % time_representation + ) return { - 'timestamp': { - 'seconds': seconds, - 'microseconds': microseconds, - }, - 'offset': offset, - 'negative_utc': negative_utc, + "timestamp": {"seconds": seconds, "microseconds": microseconds,}, + "offset": offset, + "negative_utc": negative_utc, } def format_author(author): """Format the specification of an author. An author is either a byte string (passed unchanged), or a dict with three keys, fullname, name and email. If the fullname exists, return it; if it doesn't, we construct a fullname using the following heuristics: if the name value is None, we return the email in angle brackets, else, we return the name, a space, and the email in angle brackets. """ if isinstance(author, bytes) or author is None: return author - if 'fullname' in author: - return author['fullname'] + if "fullname" in author: + return author["fullname"] ret = [] - if author['name'] is not None: - ret.append(author['name']) - if author['email'] is not None: - ret.append(b''.join([b'<', author['email'], b'>'])) + if author["name"] is not None: + ret.append(author["name"]) + if author["email"] is not None: + ret.append(b"".join([b"<", author["email"], b">"])) - return b' '.join(ret) + return b" ".join(ret) def format_author_line(header, author, date_offset): """Format a an author line according to git standards. An author line has three components: - a header, describing the type of author (author, committer, tagger) - a name and email, which is an arbitrary bytestring - optionally, a timestamp with UTC offset specification The author line is formatted thus:: `header` `name and email`[ `timestamp` `utc_offset`] The timestamp is encoded as a (decimal) number of seconds since the UNIX epoch (1970-01-01 at 00:00 UTC). As an extension to the git format, we support fractional timestamps, using a dot as the separator for the decimal part. The utc offset is a number of minutes encoded as '[+-]HHMM'. Note some tools can pass a negative offset corresponding to the UTC timezone ('-0000'), which is valid and is encoded as such. For convenience, this function returns the whole line with its trailing newline. Args: header: the header of the author line (one of 'author', 'committer', 'tagger') author: an author specification (dict with two bytes values: name and email, or byte value) date_offset: a normalized date/time representation as returned by :func:`normalize_timestamp`. Returns: the newline-terminated byte string containing the author line """ - ret = [header.encode(), b' ', escape_newlines(format_author(author))] + ret = [header.encode(), b" ", escape_newlines(format_author(author))] date_offset = normalize_timestamp(date_offset) if date_offset is not None: - date_f = format_date(date_offset['timestamp']) - offset_f = format_offset(date_offset['offset'], - date_offset['negative_utc']) + date_f = format_date(date_offset["timestamp"]) + offset_f = format_offset(date_offset["offset"], date_offset["negative_utc"]) - ret.extend([b' ', date_f, b' ', offset_f]) + ret.extend([b" ", date_f, b" ", offset_f]) - ret.append(b'\n') - return b''.join(ret) + ret.append(b"\n") + return b"".join(ret) def revision_identifier(revision): """Return the intrinsic identifier for a revision. The fields used for the revision identifier computation are: - directory - parents - author - author_date - committer - committer_date - metadata -> extra_headers - message A revision's identifier is the 'git'-checksum of a commit manifest constructed as follows (newlines are a single ASCII newline character):: tree [for each parent in parents] parent [end for each parents] author committer [for each key, value in extra_headers] [end for each extra_headers] The directory identifier is the ascii representation of its hexadecimal encoding. Author and committer are formatted with the :func:`format_author` function. Dates are formatted with the :func:`format_offset` function. Extra headers are an ordered list of [key, value] pairs. Keys are strings and get encoded to utf-8 for identifier computation. Values are either byte strings, unicode strings (that get encoded to utf-8), or integers (that get encoded to their utf-8 decimal representation). Multiline extra header values are escaped by indenting the continuation lines with one ascii space. If the message is None, the manifest ends with the last header. Else, the message is appended to the headers after an empty line. The checksum of the full manifest is computed using the 'commit' git object type. """ components = [ - b'tree ', identifier_to_str(revision['directory']).encode(), b'\n', + b"tree ", + identifier_to_str(revision["directory"]).encode(), + b"\n", ] - for parent in revision['parents']: + for parent in revision["parents"]: if parent: - components.extend([ - b'parent ', identifier_to_str(parent).encode(), b'\n', - ]) - - components.extend([ - format_author_line('author', revision['author'], revision['date']), - format_author_line('committer', revision['committer'], - revision['committer_date']), - ]) + components.extend( + [b"parent ", identifier_to_str(parent).encode(), b"\n",] + ) + + components.extend( + [ + format_author_line("author", revision["author"], revision["date"]), + format_author_line( + "committer", revision["committer"], revision["committer_date"] + ), + ] + ) # Handle extra headers - metadata = revision.get('metadata') + metadata = revision.get("metadata") if not metadata: metadata = {} - for key, value in metadata.get('extra_headers', []): + for key, value in metadata.get("extra_headers", []): # Integer values: decimal representation if isinstance(value, int): - value = str(value).encode('utf-8') + value = str(value).encode("utf-8") # Unicode string values: utf-8 encoding if isinstance(value, str): - value = value.encode('utf-8') + value = value.encode("utf-8") # encode the key to utf-8 - components.extend([key.encode('utf-8'), b' ', - escape_newlines(value), b'\n']) + components.extend([key.encode("utf-8"), b" ", escape_newlines(value), b"\n"]) - if revision['message'] is not None: - components.extend([b'\n', revision['message']]) + if revision["message"] is not None: + components.extend([b"\n", revision["message"]]) - commit_raw = b''.join(components) - return identifier_to_str(hash_git_data(commit_raw, 'commit')) + commit_raw = b"".join(components) + return identifier_to_str(hash_git_data(commit_raw, "commit")) def target_type_to_git(target_type): """Convert a software heritage target type to a git object type""" return { - 'content': b'blob', - 'directory': b'tree', - 'revision': b'commit', - 'release': b'tag', - 'snapshot': b'refs' + "content": b"blob", + "directory": b"tree", + "revision": b"commit", + "release": b"tag", + "snapshot": b"refs", }[target_type] def release_identifier(release): """Return the intrinsic identifier for a release.""" components = [ - b'object ', identifier_to_str(release['target']).encode(), b'\n', - b'type ', target_type_to_git(release['target_type']), b'\n', - b'tag ', release['name'], b'\n', + b"object ", + identifier_to_str(release["target"]).encode(), + b"\n", + b"type ", + target_type_to_git(release["target_type"]), + b"\n", + b"tag ", + release["name"], + b"\n", ] - if 'author' in release and release['author']: + if "author" in release and release["author"]: components.append( - format_author_line('tagger', release['author'], release['date']) + format_author_line("tagger", release["author"], release["date"]) ) - if release['message'] is not None: - components.extend([b'\n', release['message']]) + if release["message"] is not None: + components.extend([b"\n", release["message"]]) - return identifier_to_str(hash_git_data(b''.join(components), 'tag')) + return identifier_to_str(hash_git_data(b"".join(components), "tag")) def snapshot_identifier(snapshot, *, ignore_unresolved=False): """Return the intrinsic identifier for a snapshot. Snapshots are a set of named branches, which are pointers to objects at any level of the Software Heritage DAG. As well as pointing to other objects in the Software Heritage DAG, branches can also be *alias*es, in which case their target is the name of another branch in the same snapshot, or *dangling*, in which case the target is unknown (and represented by the ``None`` value). A snapshot identifier is a salted sha1 (using the git hashing algorithm with the ``snapshot`` object type) of a manifest following the algorithm: 1. Branches are sorted using the name as key, in bytes order. 2. For each branch, the following bytes are output: - the type of the branch target: - ``content``, ``directory``, ``revision``, ``release`` or ``snapshot`` for the corresponding entries in the DAG; - ``alias`` for branches referencing another branch; - ``dangling`` for dangling branches - an ascii space (``\\x20``) - the branch name (as raw bytes) - a null byte (``\\x00``) - the length of the target identifier, as an ascii-encoded decimal number (``20`` for current intrinsic identifiers, ``0`` for dangling branches, the length of the target branch name for branch aliases) - a colon (``:``) - the identifier of the target object pointed at by the branch, stored in the 'target' member: - for contents: their *sha1_git* - for directories, revisions, releases or snapshots: their intrinsic identifier - for branch aliases, the name of the target branch (as raw bytes) - for dangling branches, the empty string Note that, akin to directory manifests, there is no separator between entries. Because of symbolic branches, identifiers are of arbitrary length but are length-encoded to avoid ambiguity. Args: snapshot (dict): the snapshot of which to compute the identifier. A single entry is needed, ``'branches'``, which is itself a :class:`dict` mapping each branch to its target ignore_unresolved (bool): if `True`, ignore unresolved branch aliases. Returns: str: the intrinsic identifier for `snapshot` """ unresolved = [] lines = [] - for name, target in sorted(snapshot['branches'].items()): + for name, target in sorted(snapshot["branches"].items()): if not target: - target_type = b'dangling' - target_id = b'' - elif target['target_type'] == 'alias': - target_type = b'alias' - target_id = target['target'] - if target_id not in snapshot['branches'] or target_id == name: + target_type = b"dangling" + target_id = b"" + elif target["target_type"] == "alias": + target_type = b"alias" + target_id = target["target"] + if target_id not in snapshot["branches"] or target_id == name: unresolved.append((name, target_id)) else: - target_type = target['target_type'].encode() - target_id = identifier_to_bytes(target['target']) - - lines.extend([ - target_type, b'\x20', name, b'\x00', - ('%d:' % len(target_id)).encode(), target_id, - ]) + target_type = target["target_type"].encode() + target_id = identifier_to_bytes(target["target"]) + + lines.extend( + [ + target_type, + b"\x20", + name, + b"\x00", + ("%d:" % len(target_id)).encode(), + target_id, + ] + ) if unresolved and not ignore_unresolved: - raise ValueError('Branch aliases unresolved: %s' % - ', '.join('%s -> %s' % x for x in unresolved), - unresolved) + raise ValueError( + "Branch aliases unresolved: %s" + % ", ".join("%s -> %s" % x for x in unresolved), + unresolved, + ) - return identifier_to_str(hash_git_data(b''.join(lines), 'snapshot')) + return identifier_to_str(hash_git_data(b"".join(lines), "snapshot")) def origin_identifier(origin): """Return the intrinsic identifier for an origin. An origin's identifier is the sha1 checksum of the entire origin URL """ - return hashlib.sha1(origin['url'].encode('utf-8')).hexdigest() + return hashlib.sha1(origin["url"].encode("utf-8")).hexdigest() _object_type_map = { - ORIGIN: { - 'short_name': 'ori', - 'key_id': 'id' - }, - SNAPSHOT: { - 'short_name': 'snp', - 'key_id': 'id' - }, - RELEASE: { - 'short_name': 'rel', - 'key_id': 'id' - }, - REVISION: { - 'short_name': 'rev', - 'key_id': 'id' - }, - DIRECTORY: { - 'short_name': 'dir', - 'key_id': 'id' - }, - CONTENT: { - 'short_name': 'cnt', - 'key_id': 'sha1_git' - } + ORIGIN: {"short_name": "ori", "key_id": "id"}, + SNAPSHOT: {"short_name": "snp", "key_id": "id"}, + RELEASE: {"short_name": "rel", "key_id": "id"}, + REVISION: {"short_name": "rev", "key_id": "id"}, + DIRECTORY: {"short_name": "dir", "key_id": "id"}, + CONTENT: {"short_name": "cnt", "key_id": "sha1_git"}, } _PersistentId = NamedTuple( - 'PersistentId', [ - ('namespace', str), - ('scheme_version', int), - ('object_type', str), - ('object_id', str), - ('metadata', Dict[str, Any]), - ]) + "PersistentId", + [ + ("namespace", str), + ("scheme_version", int), + ("object_type", str), + ("object_id", str), + ("metadata", Dict[str, Any]), + ], +) class PersistentId(_PersistentId): """ Named tuple holding the relevant info associated to a Software Heritage persistent identifier. Args: namespace (str): the namespace of the identifier, defaults to 'swh' scheme_version (int): the scheme version of the identifier, defaults to 1 object_type (str): the type of object the identifier points to, either 'content', 'directory', 'release', 'revision' or 'snapshot' object_id (dict/bytes/str): object's dict representation or object identifier metadata (dict): optional dict filled with metadata related to pointed object Raises: swh.model.exceptions.ValidationError: In case of invalid object type or id Once created, it contains the following attributes: Attributes: namespace (str): the namespace of the identifier scheme_version (int): the scheme version of the identifier object_type (str): the type of object the identifier points to object_id (str): hexadecimal representation of the object hash metadata (dict): metadata related to the pointed object To get the raw persistent identifier string from an instance of this named tuple, use the :func:`str` function:: pid = PersistentId( object_type='content', object_id='8ff44f081d43176474b267de5451f2c2e88089d0' ) pid_str = str(pid) # 'swh:1:cnt:8ff44f081d43176474b267de5451f2c2e88089d0' """ + __slots__ = () - def __new__(cls, namespace=PID_NAMESPACE, scheme_version=PID_VERSION, - object_type='', object_id='', metadata={}): + def __new__( + cls, + namespace=PID_NAMESPACE, + scheme_version=PID_VERSION, + object_type="", + object_id="", + metadata={}, + ): o = _object_type_map.get(object_type) if not o: - raise ValidationError('Wrong input: Supported types are %s' % ( - list(_object_type_map.keys()))) + raise ValidationError( + "Wrong input: Supported types are %s" % (list(_object_type_map.keys())) + ) if namespace != PID_NAMESPACE: raise ValidationError( - "Wrong format: only supported namespace is '%s'" - % PID_NAMESPACE) + "Wrong format: only supported namespace is '%s'" % PID_NAMESPACE + ) if scheme_version != PID_VERSION: raise ValidationError( - 'Wrong format: only supported version is %d' % PID_VERSION) + "Wrong format: only supported version is %d" % PID_VERSION + ) # internal swh representation resolution if isinstance(object_id, dict): - object_id = object_id[o['key_id']] + object_id = object_id[o["key_id"]] validate_sha1(object_id) # can raise if invalid hash object_id = hash_to_hex(object_id) return super(cls, PersistentId).__new__( - cls, namespace, scheme_version, object_type, object_id, metadata) + cls, namespace, scheme_version, object_type, object_id, metadata + ) def __str__(self): o = _object_type_map.get(self.object_type) - pid = PID_SEP.join([self.namespace, str(self.scheme_version), - o['short_name'], self.object_id]) + pid = PID_SEP.join( + [self.namespace, str(self.scheme_version), o["short_name"], self.object_id] + ) if self.metadata: for k, v in self.metadata.items(): - pid += '%s%s=%s' % (PID_CTXT_SEP, k, v) + pid += "%s%s=%s" % (PID_CTXT_SEP, k, v) return pid -def persistent_identifier(object_type, object_id, scheme_version=1, - metadata={}): +def persistent_identifier(object_type, object_id, scheme_version=1, metadata={}): """Compute persistent identifier (stable over time) as per documentation. Documentation: https://docs.softwareheritage.org/devel/swh-model/persistent-identifiers.html # noqa Args: object_type (str): object's type, either 'content', 'directory', 'release', 'revision' or 'snapshot' object_id (dict/bytes/str): object's dict representation or object identifier scheme_version (int): persistent identifier scheme version, defaults to 1 metadata (dict): metadata related to the pointed object Raises: swh.model.exceptions.ValidationError: In case of invalid object type or id Returns: str: the persistent identifier """ - pid = PersistentId(scheme_version=scheme_version, object_type=object_type, - object_id=object_id, metadata=metadata) + pid = PersistentId( + scheme_version=scheme_version, + object_type=object_type, + object_id=object_id, + metadata=metadata, + ) return str(pid) def parse_persistent_identifier(persistent_id): """Parse swh's :ref:`persistent-identifiers` scheme. Args: persistent_id (str): A persistent identifier Raises: swh.model.exceptions.ValidationError: in case of: * missing mandatory values (4) * invalid namespace supplied * invalid version supplied * invalid type supplied * missing hash * invalid hash identifier supplied Returns: PersistentId: a named tuple holding the parsing result """ # ; persistent_id_parts = persistent_id.split(PID_CTXT_SEP) - pid_data = persistent_id_parts.pop(0).split(':') + pid_data = persistent_id_parts.pop(0).split(":") if len(pid_data) != 4: - raise ValidationError( - 'Wrong format: There should be 4 mandatory values') + raise ValidationError("Wrong format: There should be 4 mandatory values") # Checking for parsing errors _ns, _version, _type, _id = pid_data pid_data[1] = int(pid_data[1]) for otype, data in _object_type_map.items(): - if _type == data['short_name']: + if _type == data["short_name"]: pid_data[2] = otype break if not _id: - raise ValidationError( - 'Wrong format: Identifier should be present') + raise ValidationError("Wrong format: Identifier should be present") persistent_id_metadata = {} for part in persistent_id_parts: try: - key, val = part.split('=') + key, val = part.split("=") persistent_id_metadata[key] = val except Exception: - msg = 'Contextual data is badly formatted, form key=val expected' + msg = "Contextual data is badly formatted, form key=val expected" raise ValidationError(msg) pid_data.append(persistent_id_metadata) return PersistentId(*pid_data) diff --git a/swh/model/merkle.py b/swh/model/merkle.py index 9d97efd..36407d8 100644 --- a/swh/model/merkle.py +++ b/swh/model/merkle.py @@ -1,309 +1,313 @@ # Copyright (C) 2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information """Merkle tree data structure""" import abc import collections from typing import Iterator, List, Optional, Set def deep_update(left, right): """Recursively update the left mapping with deeply nested values from the right mapping. This function is useful to merge the results of several calls to :func:`MerkleNode.collect`. Arguments: left: a mapping (modified by the update operation) right: a mapping Returns: the left mapping, updated with nested values from the right mapping Example: >>> a = { ... 'key1': { ... 'key2': { ... 'key3': 'value1/2/3', ... }, ... }, ... } >>> deep_update(a, { ... 'key1': { ... 'key2': { ... 'key4': 'value1/2/4', ... }, ... }, ... }) == { ... 'key1': { ... 'key2': { ... 'key3': 'value1/2/3', ... 'key4': 'value1/2/4', ... }, ... }, ... } True >>> deep_update(a, { ... 'key1': { ... 'key2': { ... 'key3': 'newvalue1/2/3', ... }, ... }, ... }) == { ... 'key1': { ... 'key2': { ... 'key3': 'newvalue1/2/3', ... 'key4': 'value1/2/4', ... }, ... }, ... } True """ for key, rvalue in right.items(): if isinstance(rvalue, collections.Mapping): new_lvalue = deep_update(left.get(key, {}), rvalue) left[key] = new_lvalue else: left[key] = rvalue return left class MerkleNode(dict, metaclass=abc.ABCMeta): """Representation of a node in a Merkle Tree. A (generalized) `Merkle Tree`_ is a tree in which every node is labeled with a hash of its own data and the hash of its children. .. _Merkle Tree: https://en.wikipedia.org/wiki/Merkle_tree In pseudocode:: node.hash = hash(node.data + sum(child.hash for child in node.children)) This class efficiently implements the Merkle Tree data structure on top of a Python :class:`dict`, minimizing hash computations and new data collections when updating nodes. Node data is stored in the :attr:`data` attribute, while (named) children are stored as items of the underlying dictionary. Addition, update and removal of objects are instrumented to automatically invalidate the hashes of the current node as well as its registered parents; It also resets the collection status of the objects so the updated objects can be collected. The collection of updated data from the tree is implemented through the :func:`collect` function and associated helpers. Attributes: data (dict): data associated to the current node parents (list): known parents of the current node collected (bool): whether the current node has been collected """ - __slots__ = ['parents', 'data', '__hash', 'collected'] + + __slots__ = ["parents", "data", "__hash", "collected"] type = None # type: Optional[str] # TODO: make this an enum """Type of the current node (used as a classifier for :func:`collect`)""" def __init__(self, data=None): super().__init__() self.parents = [] self.data = data self.__hash = None self.collected = False def __eq__(self, other): - return isinstance(other, MerkleNode) \ - and super().__eq__(other) and self.data == other.data + return ( + isinstance(other, MerkleNode) + and super().__eq__(other) + and self.data == other.data + ) def __ne__(self, other): return not self.__eq__(other) def invalidate_hash(self): """Invalidate the cached hash of the current node.""" if not self.__hash: return self.__hash = None self.collected = False for parent in self.parents: parent.invalidate_hash() def update_hash(self, *, force=False): """Recursively compute the hash of the current node. Args: force (bool): invalidate the cache and force the computation for this node and all children. """ if self.__hash and not force: return self.__hash if force: self.invalidate_hash() for child in self.values(): child.update_hash(force=force) self.__hash = self.compute_hash() return self.__hash @property def hash(self): """The hash of the current node, as calculated by :func:`compute_hash`. """ return self.update_hash() @abc.abstractmethod def compute_hash(self): """Compute the hash of the current node. The hash should depend on the data of the node, as well as on hashes of the children nodes. """ - raise NotImplementedError('Must implement compute_hash method') + raise NotImplementedError("Must implement compute_hash method") def __setitem__(self, name, new_child): """Add a child, invalidating the current hash""" self.invalidate_hash() super().__setitem__(name, new_child) new_child.parents.append(self) def __delitem__(self, name): """Remove a child, invalidating the current hash""" if name in self: self.invalidate_hash() self[name].parents.remove(self) super().__delitem__(name) else: raise KeyError(name) def update(self, new_children): """Add several named children from a dictionary""" if not new_children: return self.invalidate_hash() for name, new_child in new_children.items(): new_child.parents.append(self) if name in self: self[name].parents.remove(self) super().update(new_children) def get_data(self, **kwargs): """Retrieve and format the collected data for the current node, for use by :func:`collect`. Can be overridden, for instance when you want the collected data to contain information about the child nodes. Arguments: kwargs: allow subclasses to alter behaviour depending on how :func:`collect` is called. Returns: data formatted for :func:`collect` """ return self.data def collect_node(self, **kwargs): """Collect the data for the current node, for use by :func:`collect`. Arguments: kwargs: passed as-is to :func:`get_data`. Returns: A :class:`dict` compatible with :func:`collect`. """ if not self.collected: self.collected = True return {self.type: {self.hash: self.get_data(**kwargs)}} else: return {} def collect(self, **kwargs): """Collect the data for all nodes in the subtree rooted at `self`. The data is deduplicated by type and by hash. Arguments: kwargs: passed as-is to :func:`get_data`. Returns: A :class:`dict` with the following structure:: { 'typeA': { node1.hash: node1.get_data(), node2.hash: node2.get_data(), }, 'typeB': { node3.hash: node3.get_data(), ... }, ... } """ ret = self.collect_node(**kwargs) for child in self.values(): deep_update(ret, child.collect(**kwargs)) return ret def reset_collect(self): """Recursively unmark collected nodes in the subtree rooted at `self`. This lets the caller use :func:`collect` again. """ self.collected = False for child in self.values(): child.reset_collect() - def iter_tree(self) -> Iterator['MerkleNode']: + def iter_tree(self) -> Iterator["MerkleNode"]: """Yields all children nodes, recursively. Common nodes are deduplicated. """ yield from self._iter_tree(set()) - def _iter_tree( - self, seen: Set[bytes]) -> Iterator['MerkleNode']: + def _iter_tree(self, seen: Set[bytes]) -> Iterator["MerkleNode"]: if self.hash not in seen: seen.add(self.hash) yield self for child in self.values(): yield from child._iter_tree(seen=seen) class MerkleLeaf(MerkleNode): """A leaf to a Merkle tree. A Merkle leaf is simply a Merkle node with children disabled. """ + __slots__ = [] # type: List[str] def __setitem__(self, name, child): - raise ValueError('%s is a leaf' % self.__class__.__name__) + raise ValueError("%s is a leaf" % self.__class__.__name__) def __getitem__(self, name): - raise ValueError('%s is a leaf' % self.__class__.__name__) + raise ValueError("%s is a leaf" % self.__class__.__name__) def __delitem__(self, name): - raise ValueError('%s is a leaf' % self.__class__.__name__) + raise ValueError("%s is a leaf" % self.__class__.__name__) def update(self, new_children): """Children update operation. Disabled for leaves.""" - raise ValueError('%s is a leaf' % self.__class__.__name__) + raise ValueError("%s is a leaf" % self.__class__.__name__) diff --git a/swh/model/model.py b/swh/model/model.py index d7b1f89..2814fd6 100644 --- a/swh/model/model.py +++ b/swh/model/model.py @@ -1,736 +1,612 @@ # Copyright (C) 2018-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime from abc import ABCMeta, abstractmethod from enum import Enum from typing import Dict, List, Optional, Union import attr from attrs_strict import type_validator import dateutil.parser import iso8601 from .identifiers import ( - normalize_timestamp, directory_identifier, revision_identifier, - release_identifier, snapshot_identifier + normalize_timestamp, + directory_identifier, + revision_identifier, + release_identifier, + snapshot_identifier, ) from .hashutil import DEFAULT_ALGORITHMS, hash_to_bytes, MultiHash class MissingData(Exception): """Raised by `Content.with_data` when it has no way of fetching the data (but not when fetching the data fails).""" + pass SHA1_SIZE = 20 # TODO: Limit this to 20 bytes Sha1Git = bytes def dictify(value): "Helper function used by BaseModel.to_dict()" if isinstance(value, BaseModel): return value.to_dict() elif isinstance(value, Enum): return value.value elif isinstance(value, dict): return {k: dictify(v) for k, v in value.items()} elif isinstance(value, list): return [dictify(v) for v in value] else: return value class BaseModel: """Base class for SWH model classes. Provides serialization/deserialization to/from Python dictionaries, that are suitable for JSON/msgpack-like formats.""" def to_dict(self): """Wrapper of `attr.asdict` that can be overridden by subclasses that have special handling of some of the fields.""" return dictify(attr.asdict(self, recurse=False)) @classmethod def from_dict(cls, d): """Takes a dictionary representing a tree of SWH objects, and recursively builds the corresponding objects.""" return cls(**d) class HashableObject(metaclass=ABCMeta): """Mixin to automatically compute object identifier hash when the associated model is instantiated.""" @staticmethod @abstractmethod def compute_hash(object_dict): """Derived model classes must implement this to compute the object hash from its dict representation.""" pass def __attrs_post_init__(self): if not self.id: obj_id = hash_to_bytes(self.compute_hash(self.to_dict())) - object.__setattr__(self, 'id', obj_id) + object.__setattr__(self, "id", obj_id) @attr.s(frozen=True) class Person(BaseModel): """Represents the author/committer of a revision or release.""" - fullname = attr.ib( - type=bytes, - validator=type_validator()) - name = attr.ib( - type=Optional[bytes], - validator=type_validator()) - email = attr.ib( - type=Optional[bytes], - validator=type_validator()) + + fullname = attr.ib(type=bytes, validator=type_validator()) + name = attr.ib(type=Optional[bytes], validator=type_validator()) + email = attr.ib(type=Optional[bytes], validator=type_validator()) @classmethod def from_fullname(cls, fullname: bytes): """Returns a Person object, by guessing the name and email from the fullname, in the `name ` format. The fullname is left unchanged.""" if fullname is None: - raise TypeError('fullname is None.') + raise TypeError("fullname is None.") name: Optional[bytes] email: Optional[bytes] try: - open_bracket = fullname.index(b'<') + open_bracket = fullname.index(b"<") except ValueError: name = fullname email = None else: raw_name = fullname[:open_bracket] - raw_email = fullname[open_bracket+1:] + raw_email = fullname[open_bracket + 1 :] if not raw_name: name = None else: name = raw_name.strip() try: - close_bracket = raw_email.rindex(b'>') + close_bracket = raw_email.rindex(b">") except ValueError: email = raw_email else: email = raw_email[:close_bracket] - return Person( - name=name or None, - email=email or None, - fullname=fullname, - ) + return Person(name=name or None, email=email or None, fullname=fullname,) @attr.s(frozen=True) class Timestamp(BaseModel): """Represents a naive timestamp from a VCS.""" - seconds = attr.ib( - type=int, - validator=type_validator()) - microseconds = attr.ib( - type=int, - validator=type_validator()) + + seconds = attr.ib(type=int, validator=type_validator()) + microseconds = attr.ib(type=int, validator=type_validator()) @seconds.validator def check_seconds(self, attribute, value): """Check that seconds fit in a 64-bits signed integer.""" - if not (-2**63 <= value < 2**63): - raise ValueError('Seconds must be a signed 64-bits integer.') + if not (-(2 ** 63) <= value < 2 ** 63): + raise ValueError("Seconds must be a signed 64-bits integer.") @microseconds.validator def check_microseconds(self, attribute, value): """Checks that microseconds are positive and < 1000000.""" - if not (0 <= value < 10**6): - raise ValueError('Microseconds must be in [0, 1000000[.') + if not (0 <= value < 10 ** 6): + raise ValueError("Microseconds must be in [0, 1000000[.") @attr.s(frozen=True) class TimestampWithTimezone(BaseModel): """Represents a TZ-aware timestamp from a VCS.""" - timestamp = attr.ib( - type=Timestamp, - validator=type_validator()) - offset = attr.ib( - type=int, - validator=type_validator()) - negative_utc = attr.ib( - type=bool, - validator=type_validator()) + + timestamp = attr.ib(type=Timestamp, validator=type_validator()) + offset = attr.ib(type=int, validator=type_validator()) + negative_utc = attr.ib(type=bool, validator=type_validator()) @offset.validator def check_offset(self, attribute, value): """Checks the offset is a 16-bits signed integer (in theory, it should always be between -14 and +14 hours).""" - if not (-2**15 <= value < 2**15): + if not (-(2 ** 15) <= value < 2 ** 15): # max 14 hours offset in theory, but you never know what # you'll find in the wild... - raise ValueError('offset too large: %d minutes' % value) + raise ValueError("offset too large: %d minutes" % value) @negative_utc.validator def check_negative_utc(self, attribute, value): if self.offset and value: raise ValueError("negative_utc can only be True is offset=0") @classmethod def from_dict(cls, obj: Union[Dict, datetime.datetime, int]): """Builds a TimestampWithTimezone from any of the formats accepted by :func:`swh.model.normalize_timestamp`.""" # TODO: this accept way more types than just dicts; find a better # name d = normalize_timestamp(obj) return cls( - timestamp=Timestamp.from_dict(d['timestamp']), - offset=d['offset'], - negative_utc=d['negative_utc']) + timestamp=Timestamp.from_dict(d["timestamp"]), + offset=d["offset"], + negative_utc=d["negative_utc"], + ) @classmethod def from_datetime(cls, dt: datetime.datetime): return cls.from_dict(dt) @classmethod def from_iso8601(cls, s): """Builds a TimestampWithTimezone from an ISO8601-formatted string. """ dt = iso8601.parse_date(s) tstz = cls.from_datetime(dt) - if dt.tzname() == '-00:00': + if dt.tzname() == "-00:00": tstz = attr.evolve(tstz, negative_utc=True) return tstz @attr.s(frozen=True) class Origin(BaseModel): """Represents a software source: a VCS and an URL.""" - url = attr.ib( - type=str, - validator=type_validator()) + + url = attr.ib(type=str, validator=type_validator()) @attr.s(frozen=True) class OriginVisit(BaseModel): """Represents a visit of an origin at a given point in time, by a SWH loader.""" - origin = attr.ib( - type=str, - validator=type_validator()) - date = attr.ib( - type=datetime.datetime, - validator=type_validator()) + + origin = attr.ib(type=str, validator=type_validator()) + date = attr.ib(type=datetime.datetime, validator=type_validator()) status = attr.ib( - type=str, - validator=attr.validators.in_(['ongoing', 'full', 'partial'])) - type = attr.ib( - type=str, - validator=type_validator()) - snapshot = attr.ib( - type=Optional[Sha1Git], - validator=type_validator()) + type=str, validator=attr.validators.in_(["ongoing", "full", "partial"]) + ) + type = attr.ib(type=str, validator=type_validator()) + snapshot = attr.ib(type=Optional[Sha1Git], validator=type_validator()) metadata = attr.ib( - type=Optional[Dict[str, object]], - validator=type_validator(), - default=None) - visit = attr.ib( - type=Optional[int], - validator=type_validator(), - default=None) + type=Optional[Dict[str, object]], validator=type_validator(), default=None + ) + visit = attr.ib(type=Optional[int], validator=type_validator(), default=None) """Should not be set before calling 'origin_visit_add()'.""" def to_dict(self): """Serializes the date as a string and omits the visit id if it is `None`.""" ov = super().to_dict() - if ov['visit'] is None: - del ov['visit'] + if ov["visit"] is None: + del ov["visit"] return ov @classmethod def from_dict(cls, d): """Parses the date from a string, and accepts missing visit ids.""" - if isinstance(d['date'], str): + if isinstance(d["date"], str): d = d.copy() - d['date'] = dateutil.parser.parse(d['date']) + d["date"] = dateutil.parser.parse(d["date"]) return super().from_dict(d) @attr.s(frozen=True) class OriginVisitUpdate(BaseModel): """Represents a visit update of an origin at a given point in time. """ - origin = attr.ib( - type=str, - validator=type_validator()) - visit = attr.ib( - type=int, - validator=type_validator()) - date = attr.ib( - type=datetime.datetime, - validator=type_validator()) + origin = attr.ib(type=str, validator=type_validator()) + visit = attr.ib(type=int, validator=type_validator()) + + date = attr.ib(type=datetime.datetime, validator=type_validator()) status = attr.ib( - type=str, - validator=attr.validators.in_(['ongoing', 'full', 'partial'])) - snapshot = attr.ib( - type=Optional[Sha1Git], - validator=type_validator()) + type=str, validator=attr.validators.in_(["ongoing", "full", "partial"]) + ) + snapshot = attr.ib(type=Optional[Sha1Git], validator=type_validator()) metadata = attr.ib( - type=Optional[Dict[str, object]], - validator=type_validator(), - default=None) + type=Optional[Dict[str, object]], validator=type_validator(), default=None + ) class TargetType(Enum): """The type of content pointed to by a snapshot branch. Usually a revision or an alias.""" - CONTENT = 'content' - DIRECTORY = 'directory' - REVISION = 'revision' - RELEASE = 'release' - SNAPSHOT = 'snapshot' - ALIAS = 'alias' + + CONTENT = "content" + DIRECTORY = "directory" + REVISION = "revision" + RELEASE = "release" + SNAPSHOT = "snapshot" + ALIAS = "alias" class ObjectType(Enum): """The type of content pointed to by a release. Usually a revision""" - CONTENT = 'content' - DIRECTORY = 'directory' - REVISION = 'revision' - RELEASE = 'release' - SNAPSHOT = 'snapshot' + + CONTENT = "content" + DIRECTORY = "directory" + REVISION = "revision" + RELEASE = "release" + SNAPSHOT = "snapshot" @attr.s(frozen=True) class SnapshotBranch(BaseModel): """Represents one of the branches of a snapshot.""" - target = attr.ib( - type=bytes, - validator=type_validator()) - target_type = attr.ib( - type=TargetType, - validator=type_validator()) + + target = attr.ib(type=bytes, validator=type_validator()) + target_type = attr.ib(type=TargetType, validator=type_validator()) @target.validator def check_target(self, attribute, value): """Checks the target type is not an alias, checks the target is a valid sha1_git.""" if self.target_type != TargetType.ALIAS and self.target is not None: if len(value) != 20: - raise ValueError('Wrong length for bytes identifier: %d' % - len(value)) + raise ValueError("Wrong length for bytes identifier: %d" % len(value)) @classmethod def from_dict(cls, d): - return cls( - target=d['target'], - target_type=TargetType(d['target_type'])) + return cls(target=d["target"], target_type=TargetType(d["target_type"])) @attr.s(frozen=True) class Snapshot(BaseModel, HashableObject): """Represents the full state of an origin at a given point in time.""" + branches = attr.ib( - type=Dict[bytes, Optional[SnapshotBranch]], - validator=type_validator()) - id = attr.ib( - type=Sha1Git, - validator=type_validator(), - default=b'') + type=Dict[bytes, Optional[SnapshotBranch]], validator=type_validator() + ) + id = attr.ib(type=Sha1Git, validator=type_validator(), default=b"") @staticmethod def compute_hash(object_dict): return snapshot_identifier(object_dict) @classmethod def from_dict(cls, d): d = d.copy() return cls( branches={ name: SnapshotBranch.from_dict(branch) if branch else None - for (name, branch) in d.pop('branches').items() + for (name, branch) in d.pop("branches").items() }, - **d) + **d + ) @attr.s(frozen=True) class Release(BaseModel, HashableObject): - name = attr.ib( - type=bytes, - validator=type_validator()) - message = attr.ib( - type=Optional[bytes], - validator=type_validator()) - target = attr.ib( - type=Optional[Sha1Git], - validator=type_validator()) - target_type = attr.ib( - type=ObjectType, - validator=type_validator()) - synthetic = attr.ib( - type=bool, - validator=type_validator()) - author = attr.ib( - type=Optional[Person], - validator=type_validator(), - default=None) + name = attr.ib(type=bytes, validator=type_validator()) + message = attr.ib(type=Optional[bytes], validator=type_validator()) + target = attr.ib(type=Optional[Sha1Git], validator=type_validator()) + target_type = attr.ib(type=ObjectType, validator=type_validator()) + synthetic = attr.ib(type=bool, validator=type_validator()) + author = attr.ib(type=Optional[Person], validator=type_validator(), default=None) date = attr.ib( - type=Optional[TimestampWithTimezone], - validator=type_validator(), - default=None) + type=Optional[TimestampWithTimezone], validator=type_validator(), default=None + ) metadata = attr.ib( - type=Optional[Dict[str, object]], - validator=type_validator(), - default=None) - id = attr.ib( - type=Sha1Git, - validator=type_validator(), - default=b'') + type=Optional[Dict[str, object]], validator=type_validator(), default=None + ) + id = attr.ib(type=Sha1Git, validator=type_validator(), default=b"") @staticmethod def compute_hash(object_dict): return release_identifier(object_dict) @author.validator def check_author(self, attribute, value): """If the author is `None`, checks the date is `None` too.""" if self.author is None and self.date is not None: - raise ValueError('release date must be None if author is None.') + raise ValueError("release date must be None if author is None.") def to_dict(self): rel = super().to_dict() - if rel['metadata'] is None: - del rel['metadata'] + if rel["metadata"] is None: + del rel["metadata"] return rel @classmethod def from_dict(cls, d): d = d.copy() - if d.get('author'): - d['author'] = Person.from_dict(d['author']) - if d.get('date'): - d['date'] = TimestampWithTimezone.from_dict(d['date']) - return cls( - target_type=ObjectType(d.pop('target_type')), - **d) + if d.get("author"): + d["author"] = Person.from_dict(d["author"]) + if d.get("date"): + d["date"] = TimestampWithTimezone.from_dict(d["date"]) + return cls(target_type=ObjectType(d.pop("target_type")), **d) class RevisionType(Enum): - GIT = 'git' - TAR = 'tar' - DSC = 'dsc' - SUBVERSION = 'svn' - MERCURIAL = 'hg' + GIT = "git" + TAR = "tar" + DSC = "dsc" + SUBVERSION = "svn" + MERCURIAL = "hg" @attr.s(frozen=True) class Revision(BaseModel, HashableObject): - message = attr.ib( - type=bytes, - validator=type_validator()) - author = attr.ib( - type=Person, - validator=type_validator()) - committer = attr.ib( - type=Person, - validator=type_validator()) - date = attr.ib( - type=Optional[TimestampWithTimezone], - validator=type_validator()) + message = attr.ib(type=bytes, validator=type_validator()) + author = attr.ib(type=Person, validator=type_validator()) + committer = attr.ib(type=Person, validator=type_validator()) + date = attr.ib(type=Optional[TimestampWithTimezone], validator=type_validator()) committer_date = attr.ib( - type=Optional[TimestampWithTimezone], - validator=type_validator()) - type = attr.ib( - type=RevisionType, - validator=type_validator()) - directory = attr.ib( - type=Sha1Git, - validator=type_validator()) - synthetic = attr.ib( - type=bool, - validator=type_validator()) + type=Optional[TimestampWithTimezone], validator=type_validator() + ) + type = attr.ib(type=RevisionType, validator=type_validator()) + directory = attr.ib(type=Sha1Git, validator=type_validator()) + synthetic = attr.ib(type=bool, validator=type_validator()) metadata = attr.ib( - type=Optional[Dict[str, object]], - validator=type_validator(), - default=None) + type=Optional[Dict[str, object]], validator=type_validator(), default=None + ) parents = attr.ib( - type=List[Sha1Git], - validator=type_validator(), - default=attr.Factory(list)) - id = attr.ib( - type=Sha1Git, - validator=type_validator(), - default=b'') + type=List[Sha1Git], validator=type_validator(), default=attr.Factory(list) + ) + id = attr.ib(type=Sha1Git, validator=type_validator(), default=b"") @staticmethod def compute_hash(object_dict): return revision_identifier(object_dict) @classmethod def from_dict(cls, d): d = d.copy() - date = d.pop('date') + date = d.pop("date") if date: date = TimestampWithTimezone.from_dict(date) - committer_date = d.pop('committer_date') + committer_date = d.pop("committer_date") if committer_date: - committer_date = TimestampWithTimezone.from_dict( - committer_date) + committer_date = TimestampWithTimezone.from_dict(committer_date) return cls( - author=Person.from_dict(d.pop('author')), - committer=Person.from_dict(d.pop('committer')), + author=Person.from_dict(d.pop("author")), + committer=Person.from_dict(d.pop("committer")), date=date, committer_date=committer_date, - type=RevisionType(d.pop('type')), - **d) + type=RevisionType(d.pop("type")), + **d + ) @attr.s(frozen=True) class DirectoryEntry(BaseModel): - name = attr.ib( - type=bytes, - validator=type_validator()) - type = attr.ib( - type=str, - validator=attr.validators.in_(['file', 'dir', 'rev'])) - target = attr.ib( - type=Sha1Git, - validator=type_validator()) - perms = attr.ib( - type=int, - validator=type_validator()) + name = attr.ib(type=bytes, validator=type_validator()) + type = attr.ib(type=str, validator=attr.validators.in_(["file", "dir", "rev"])) + target = attr.ib(type=Sha1Git, validator=type_validator()) + perms = attr.ib(type=int, validator=type_validator()) """Usually one of the values of `swh.model.from_disk.DentryPerms`.""" @attr.s(frozen=True) class Directory(BaseModel, HashableObject): - entries = attr.ib( - type=List[DirectoryEntry], - validator=type_validator()) - id = attr.ib( - type=Sha1Git, - validator=type_validator(), - default=b'') + entries = attr.ib(type=List[DirectoryEntry], validator=type_validator()) + id = attr.ib(type=Sha1Git, validator=type_validator(), default=b"") @staticmethod def compute_hash(object_dict): return directory_identifier(object_dict) @classmethod def from_dict(cls, d): d = d.copy() return cls( - entries=[DirectoryEntry.from_dict(entry) - for entry in d.pop('entries')], - **d) + entries=[DirectoryEntry.from_dict(entry) for entry in d.pop("entries")], **d + ) @attr.s(frozen=True) class BaseContent(BaseModel): status = attr.ib( - type=str, - validator=attr.validators.in_(['visible', 'hidden', 'absent'])) + type=str, validator=attr.validators.in_(["visible", "hidden", "absent"]) + ) @staticmethod def _hash_data(data: bytes): """Hash some data, returning most of the fields of a content object""" d = MultiHash.from_data(data).digest() - d['data'] = data - d['length'] = len(data) + d["data"] = data + d["length"] = len(data) return d @classmethod def from_dict(cls, d, use_subclass=True): if use_subclass: # Chooses a subclass to instantiate instead. - if d['status'] == 'absent': + if d["status"] == "absent": return SkippedContent.from_dict(d) else: return Content.from_dict(d) else: return super().from_dict(d) def get_hash(self, hash_name): if hash_name not in DEFAULT_ALGORITHMS: - raise ValueError('{} is not a valid hash name.'.format(hash_name)) + raise ValueError("{} is not a valid hash name.".format(hash_name)) return getattr(self, hash_name) def hashes(self) -> Dict[str, bytes]: """Returns a dictionary {hash_name: hash_value}""" return {algo: getattr(self, algo) for algo in DEFAULT_ALGORITHMS} @attr.s(frozen=True) class Content(BaseContent): - sha1 = attr.ib( - type=bytes, - validator=type_validator()) - sha1_git = attr.ib( - type=Sha1Git, - validator=type_validator()) - sha256 = attr.ib( - type=bytes, - validator=type_validator()) - blake2s256 = attr.ib( - type=bytes, - validator=type_validator()) - - length = attr.ib( - type=int, - validator=type_validator()) + sha1 = attr.ib(type=bytes, validator=type_validator()) + sha1_git = attr.ib(type=Sha1Git, validator=type_validator()) + sha256 = attr.ib(type=bytes, validator=type_validator()) + blake2s256 = attr.ib(type=bytes, validator=type_validator()) + + length = attr.ib(type=int, validator=type_validator()) status = attr.ib( type=str, - validator=attr.validators.in_(['visible', 'hidden']), - default='visible') + validator=attr.validators.in_(["visible", "hidden"]), + default="visible", + ) - data = attr.ib( - type=Optional[bytes], - validator=type_validator(), - default=None) + data = attr.ib(type=Optional[bytes], validator=type_validator(), default=None) ctime = attr.ib( - type=Optional[datetime.datetime], - validator=type_validator(), - default=None) + type=Optional[datetime.datetime], validator=type_validator(), default=None + ) @length.validator def check_length(self, attribute, value): """Checks the length is positive.""" if value < 0: - raise ValueError('Length must be positive.') + raise ValueError("Length must be positive.") def to_dict(self): content = super().to_dict() - if content['data'] is None: - del content['data'] + if content["data"] is None: + del content["data"] return content @classmethod - def from_data(cls, data, status='visible', ctime=None) -> 'Content': + def from_data(cls, data, status="visible", ctime=None) -> "Content": """Generate a Content from a given `data` byte string. This populates the Content with the hashes and length for the data passed as argument, as well as the data itself. """ d = cls._hash_data(data) - d['status'] = status - d['ctime'] = ctime + d["status"] = status + d["ctime"] = ctime return cls(**d) @classmethod def from_dict(cls, d): - if isinstance(d.get('ctime'), str): + if isinstance(d.get("ctime"), str): d = d.copy() - d['ctime'] = dateutil.parser.parse(d['ctime']) + d["ctime"] = dateutil.parser.parse(d["ctime"]) return super().from_dict(d, use_subclass=False) - def with_data(self) -> 'Content': + def with_data(self) -> "Content": """Loads the `data` attribute; meaning that it is guaranteed not to be None after this call. This call is almost a no-op, but subclasses may overload this method to lazy-load data (eg. from disk or objstorage).""" if self.data is None: - raise MissingData('Content data is None.') + raise MissingData("Content data is None.") return self @attr.s(frozen=True) class SkippedContent(BaseContent): - sha1 = attr.ib( - type=Optional[bytes], - validator=type_validator()) - sha1_git = attr.ib( - type=Optional[Sha1Git], - validator=type_validator()) - sha256 = attr.ib( - type=Optional[bytes], - validator=type_validator()) - blake2s256 = attr.ib( - type=Optional[bytes], - validator=type_validator()) - - length = attr.ib( - type=Optional[int], - validator=type_validator()) + sha1 = attr.ib(type=Optional[bytes], validator=type_validator()) + sha1_git = attr.ib(type=Optional[Sha1Git], validator=type_validator()) + sha256 = attr.ib(type=Optional[bytes], validator=type_validator()) + blake2s256 = attr.ib(type=Optional[bytes], validator=type_validator()) - status = attr.ib( - type=str, - validator=attr.validators.in_(['absent'])) - reason = attr.ib( - type=Optional[str], - validator=type_validator(), - default=None) + length = attr.ib(type=Optional[int], validator=type_validator()) + + status = attr.ib(type=str, validator=attr.validators.in_(["absent"])) + reason = attr.ib(type=Optional[str], validator=type_validator(), default=None) - origin = attr.ib( - type=Optional[str], - validator=type_validator(), - default=None) + origin = attr.ib(type=Optional[str], validator=type_validator(), default=None) ctime = attr.ib( - type=Optional[datetime.datetime], - validator=type_validator(), - default=None) + type=Optional[datetime.datetime], validator=type_validator(), default=None + ) @reason.validator def check_reason(self, attribute, value): """Checks the reason is full if status != absent.""" assert self.reason == value if value is None: - raise ValueError('Must provide a reason if content is absent.') + raise ValueError("Must provide a reason if content is absent.") @length.validator def check_length(self, attribute, value): """Checks the length is positive or -1.""" if value < -1: - raise ValueError('Length must be positive or -1.') + raise ValueError("Length must be positive or -1.") def to_dict(self): content = super().to_dict() - if content['origin'] is None: - del content['origin'] + if content["origin"] is None: + del content["origin"] return content @classmethod def from_data( - cls, - data: bytes, - reason: str, - ctime: Optional[datetime.datetime] = None) -> 'SkippedContent': + cls, data: bytes, reason: str, ctime: Optional[datetime.datetime] = None + ) -> "SkippedContent": """Generate a SkippedContent from a given `data` byte string. This populates the SkippedContent with the hashes and length for the data passed as argument. You can use `attr.evolve` on such a generated content to nullify some of its attributes, e.g. for tests. """ d = cls._hash_data(data) - del d['data'] - d['status'] = 'absent' - d['reason'] = reason - d['ctime'] = ctime + del d["data"] + d["status"] = "absent" + d["reason"] = reason + d["ctime"] = ctime return cls(**d) @classmethod def from_dict(cls, d): d2 = d.copy() - if d2.pop('data', None) is not None: + if d2.pop("data", None) is not None: raise ValueError('SkippedContent has no "data" attribute %r' % d) return super().from_dict(d2, use_subclass=False) diff --git a/swh/model/tests/fields/test_compound.py b/swh/model/tests/fields/test_compound.py index dffbb04..352bba9 100644 --- a/swh/model/tests/fields/test_compound.py +++ b/swh/model/tests/fields/test_compound.py @@ -1,228 +1,238 @@ # Copyright (C) 2015 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime import unittest from swh.model.exceptions import NON_FIELD_ERRORS, ValidationError from swh.model.fields import compound, simple class ValidateCompound(unittest.TestCase): def setUp(self): def validate_always(model): return True def validate_never(model): return False - self.test_model = 'test model' + self.test_model = "test model" self.test_schema = { - 'int': (True, simple.validate_int), - 'str': (True, simple.validate_str), - 'str2': (True, simple.validate_str), - 'datetime': (False, simple.validate_datetime), + "int": (True, simple.validate_int), + "str": (True, simple.validate_str), + "str2": (True, simple.validate_str), + "datetime": (False, simple.validate_datetime), NON_FIELD_ERRORS: validate_always, } self.test_schema_shortcut = self.test_schema.copy() self.test_schema_shortcut[NON_FIELD_ERRORS] = validate_never self.test_schema_field_failed = self.test_schema.copy() - self.test_schema_field_failed['int'] = (True, [simple.validate_int, - validate_never]) + self.test_schema_field_failed["int"] = ( + True, + [simple.validate_int, validate_never], + ) self.test_value = { - 'str': 'value1', - 'str2': 'value2', - 'int': 42, - 'datetime': datetime.datetime(1990, 1, 1, 12, 0, 0, - tzinfo=datetime.timezone.utc), + "str": "value1", + "str2": "value2", + "int": 42, + "datetime": datetime.datetime( + 1990, 1, 1, 12, 0, 0, tzinfo=datetime.timezone.utc + ), } self.test_value_missing = { - 'str': 'value1', + "str": "value1", } self.test_value_str_error = { - 'str': 1984, - 'str2': 'value2', - 'int': 42, - 'datetime': datetime.datetime(1990, 1, 1, 12, 0, 0, - tzinfo=datetime.timezone.utc), + "str": 1984, + "str2": "value2", + "int": 42, + "datetime": datetime.datetime( + 1990, 1, 1, 12, 0, 0, tzinfo=datetime.timezone.utc + ), } - self.test_value_missing_keys = {'int'} + self.test_value_missing_keys = {"int"} self.test_value_wrong_type = 42 self.present_keys = set(self.test_value) - self.missing_keys = {'missingkey1', 'missingkey2'} + self.missing_keys = {"missingkey1", "missingkey2"} def test_validate_any_key(self): - self.assertTrue( - compound.validate_any_key(self.test_value, self.present_keys)) + self.assertTrue(compound.validate_any_key(self.test_value, self.present_keys)) self.assertTrue( - compound.validate_any_key(self.test_value, - self.present_keys | self.missing_keys)) + compound.validate_any_key( + self.test_value, self.present_keys | self.missing_keys + ) + ) def test_validate_any_key_missing(self): with self.assertRaises(ValidationError) as cm: compound.validate_any_key(self.test_value, self.missing_keys) exc = cm.exception self.assertIsInstance(str(exc), str) - self.assertEqual(exc.code, 'missing-alternative-field') - self.assertEqual(exc.params['missing_fields'], - ', '.join(sorted(self.missing_keys))) + self.assertEqual(exc.code, "missing-alternative-field") + self.assertEqual( + exc.params["missing_fields"], ", ".join(sorted(self.missing_keys)) + ) def test_validate_all_keys(self): - self.assertTrue( - compound.validate_all_keys(self.test_value, self.present_keys)) + self.assertTrue(compound.validate_all_keys(self.test_value, self.present_keys)) def test_validate_all_keys_missing(self): with self.assertRaises(ValidationError) as cm: compound.validate_all_keys(self.test_value, self.missing_keys) exc = cm.exception self.assertIsInstance(str(exc), str) - self.assertEqual(exc.code, 'missing-mandatory-field') - self.assertEqual(exc.params['missing_fields'], - ', '.join(sorted(self.missing_keys))) + self.assertEqual(exc.code, "missing-mandatory-field") + self.assertEqual( + exc.params["missing_fields"], ", ".join(sorted(self.missing_keys)) + ) with self.assertRaises(ValidationError) as cm: - compound.validate_all_keys(self.test_value, - self.present_keys | self.missing_keys) + compound.validate_all_keys( + self.test_value, self.present_keys | self.missing_keys + ) exc = cm.exception self.assertIsInstance(str(exc), str) - self.assertEqual(exc.code, 'missing-mandatory-field') - self.assertEqual(exc.params['missing_fields'], - ', '.join(sorted(self.missing_keys))) + self.assertEqual(exc.code, "missing-mandatory-field") + self.assertEqual( + exc.params["missing_fields"], ", ".join(sorted(self.missing_keys)) + ) def test_validate_against_schema(self): self.assertTrue( - compound.validate_against_schema(self.test_model, self.test_schema, - self.test_value)) + compound.validate_against_schema( + self.test_model, self.test_schema, self.test_value + ) + ) def test_validate_against_schema_wrong_type(self): with self.assertRaises(ValidationError) as cm: - compound.validate_against_schema(self.test_model, self.test_schema, - self.test_value_wrong_type) + compound.validate_against_schema( + self.test_model, self.test_schema, self.test_value_wrong_type + ) exc = cm.exception self.assertIsInstance(str(exc), str) - self.assertEqual(exc.code, 'model-unexpected-type') - self.assertEqual(exc.params['model'], self.test_model) - self.assertEqual(exc.params['type'], - self.test_value_wrong_type.__class__.__name__) + self.assertEqual(exc.code, "model-unexpected-type") + self.assertEqual(exc.params["model"], self.test_model) + self.assertEqual( + exc.params["type"], self.test_value_wrong_type.__class__.__name__ + ) def test_validate_against_schema_mandatory_keys(self): with self.assertRaises(ValidationError) as cm: - compound.validate_against_schema(self.test_model, self.test_schema, - self.test_value_missing) + compound.validate_against_schema( + self.test_model, self.test_schema, self.test_value_missing + ) # The exception should be of the form: # ValidationError({ # 'mandatory_key1': [ValidationError('model-field-mandatory')], # 'mandatory_key2': [ValidationError('model-field-mandatory')], # }) exc = cm.exception self.assertIsInstance(str(exc), str) for key in self.test_value_missing_keys: nested_key = exc.error_dict[key] self.assertIsInstance(nested_key, list) self.assertEqual(len(nested_key), 1) nested = nested_key[0] self.assertIsInstance(nested, ValidationError) - self.assertEqual(nested.code, 'model-field-mandatory') - self.assertEqual(nested.params['field'], key) + self.assertEqual(nested.code, "model-field-mandatory") + self.assertEqual(nested.params["field"], key) def test_validate_whole_schema_shortcut_previous_error(self): with self.assertRaises(ValidationError) as cm: compound.validate_against_schema( - self.test_model, - self.test_schema_shortcut, - self.test_value_missing, + self.test_model, self.test_schema_shortcut, self.test_value_missing, ) exc = cm.exception self.assertIsInstance(str(exc), str) self.assertNotIn(NON_FIELD_ERRORS, exc.error_dict) def test_validate_whole_schema(self): with self.assertRaises(ValidationError) as cm: compound.validate_against_schema( - self.test_model, - self.test_schema_shortcut, - self.test_value, + self.test_model, self.test_schema_shortcut, self.test_value, ) # The exception should be of the form: # ValidationError({ # NON_FIELD_ERRORS: [ValidationError('model-validation-failed')], # }) exc = cm.exception self.assertIsInstance(str(exc), str) self.assertEqual(set(exc.error_dict.keys()), {NON_FIELD_ERRORS}) non_field_errors = exc.error_dict[NON_FIELD_ERRORS] self.assertIsInstance(non_field_errors, list) self.assertEqual(len(non_field_errors), 1) nested = non_field_errors[0] self.assertIsInstance(nested, ValidationError) - self.assertEqual(nested.code, 'model-validation-failed') - self.assertEqual(nested.params['model'], self.test_model) - self.assertEqual(nested.params['validator'], 'validate_never') + self.assertEqual(nested.code, "model-validation-failed") + self.assertEqual(nested.params["model"], self.test_model) + self.assertEqual(nested.params["validator"], "validate_never") def test_validate_against_schema_field_error(self): with self.assertRaises(ValidationError) as cm: - compound.validate_against_schema(self.test_model, self.test_schema, - self.test_value_str_error) + compound.validate_against_schema( + self.test_model, self.test_schema, self.test_value_str_error + ) # The exception should be of the form: # ValidationError({ # 'str': [ValidationError('unexpected-type')], # }) exc = cm.exception self.assertIsInstance(str(exc), str) - self.assertEqual(set(exc.error_dict.keys()), {'str'}) + self.assertEqual(set(exc.error_dict.keys()), {"str"}) - str_errors = exc.error_dict['str'] + str_errors = exc.error_dict["str"] self.assertIsInstance(str_errors, list) self.assertEqual(len(str_errors), 1) nested = str_errors[0] self.assertIsInstance(nested, ValidationError) - self.assertEqual(nested.code, 'unexpected-type') + self.assertEqual(nested.code, "unexpected-type") def test_validate_against_schema_field_failed(self): with self.assertRaises(ValidationError) as cm: - compound.validate_against_schema(self.test_model, - self.test_schema_field_failed, - self.test_value) + compound.validate_against_schema( + self.test_model, self.test_schema_field_failed, self.test_value + ) # The exception should be of the form: # ValidationError({ # 'int': [ValidationError('field-validation-failed')], # }) exc = cm.exception self.assertIsInstance(str(exc), str) - self.assertEqual(set(exc.error_dict.keys()), {'int'}) + self.assertEqual(set(exc.error_dict.keys()), {"int"}) - int_errors = exc.error_dict['int'] + int_errors = exc.error_dict["int"] self.assertIsInstance(int_errors, list) self.assertEqual(len(int_errors), 1) nested = int_errors[0] self.assertIsInstance(nested, ValidationError) - self.assertEqual(nested.code, 'field-validation-failed') - self.assertEqual(nested.params['validator'], 'validate_never') - self.assertEqual(nested.params['field'], 'int') + self.assertEqual(nested.code, "field-validation-failed") + self.assertEqual(nested.params["validator"], "validate_never") + self.assertEqual(nested.params["field"], "int") diff --git a/swh/model/tests/fields/test_hashes.py b/swh/model/tests/fields/test_hashes.py index 7ce0b78..15dbcc2 100644 --- a/swh/model/tests/fields/test_hashes.py +++ b/swh/model/tests/fields/test_hashes.py @@ -1,150 +1,146 @@ # Copyright (C) 2015 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import unittest from swh.model.exceptions import ValidationError from swh.model.fields import hashes class ValidateHashes(unittest.TestCase): def setUp(self): self.valid_byte_hashes = { - 'sha1': b'\xf1\xd2\xd2\xf9\x24\xe9\x86\xac\x86\xfd\xf7\xb3\x6c\x94' - b'\xbc\xdf\x32\xbe\xec\x15', - 'sha1_git': b'\x25\x7c\xc5\x64\x2c\xb1\xa0\x54\xf0\x8c\xc8\x3f\x2d' - b'\x94\x3e\x56\xfd\x3e\xbe\x99', - 'sha256': b'\xb5\xbb\x9d\x80\x14\xa0\xf9\xb1\xd6\x1e\x21\xe7\x96' - b'\xd7\x8d\xcc\xdf\x13\x52\xf2\x3c\xd3\x28\x12\xf4\x85' - b'\x0b\x87\x8a\xe4\x94\x4c', + "sha1": b"\xf1\xd2\xd2\xf9\x24\xe9\x86\xac\x86\xfd\xf7\xb3\x6c\x94" + b"\xbc\xdf\x32\xbe\xec\x15", + "sha1_git": b"\x25\x7c\xc5\x64\x2c\xb1\xa0\x54\xf0\x8c\xc8\x3f\x2d" + b"\x94\x3e\x56\xfd\x3e\xbe\x99", + "sha256": b"\xb5\xbb\x9d\x80\x14\xa0\xf9\xb1\xd6\x1e\x21\xe7\x96" + b"\xd7\x8d\xcc\xdf\x13\x52\xf2\x3c\xd3\x28\x12\xf4\x85" + b"\x0b\x87\x8a\xe4\x94\x4c", } self.valid_str_hashes = { - 'sha1': 'f1d2d2f924e986ac86fdf7b36c94bcdf32beec15', - 'sha1_git': '257cc5642cb1a054f08cc83f2d943e56fd3ebe99', - 'sha256': 'b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f485' - '0b878ae4944c', + "sha1": "f1d2d2f924e986ac86fdf7b36c94bcdf32beec15", + "sha1_git": "257cc5642cb1a054f08cc83f2d943e56fd3ebe99", + "sha256": "b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f485" + "0b878ae4944c", } self.bad_hash = object() def test_valid_bytes_hash(self): for hash_type, value in self.valid_byte_hashes.items(): self.assertTrue(hashes.validate_hash(value, hash_type)) def test_valid_str_hash(self): for hash_type, value in self.valid_str_hashes.items(): self.assertTrue(hashes.validate_hash(value, hash_type)) def test_invalid_hash_type(self): - hash_type = 'unknown_hash_type' + hash_type = "unknown_hash_type" with self.assertRaises(ValidationError) as cm: - hashes.validate_hash(self.valid_str_hashes['sha1'], hash_type) + hashes.validate_hash(self.valid_str_hashes["sha1"], hash_type) exc = cm.exception self.assertIsInstance(str(exc), str) - self.assertEqual(exc.code, 'unexpected-hash-type') - self.assertEqual(exc.params['hash_type'], hash_type) + self.assertEqual(exc.code, "unexpected-hash-type") + self.assertEqual(exc.params["hash_type"], hash_type) - self.assertIn('Unexpected hash type', str(exc)) + self.assertIn("Unexpected hash type", str(exc)) self.assertIn(hash_type, str(exc)) def test_invalid_bytes_len(self): for hash_type, value in self.valid_byte_hashes.items(): - value = value + b'\x00\x01' + value = value + b"\x00\x01" with self.assertRaises(ValidationError) as cm: hashes.validate_hash(value, hash_type) exc = cm.exception self.assertIsInstance(str(exc), str) - self.assertEqual(exc.code, 'unexpected-hash-length') - self.assertEqual(exc.params['hash_type'], hash_type) - self.assertEqual(exc.params['length'], len(value)) + self.assertEqual(exc.code, "unexpected-hash-length") + self.assertEqual(exc.params["hash_type"], hash_type) + self.assertEqual(exc.params["length"], len(value)) - self.assertIn('Unexpected length', str(exc)) + self.assertIn("Unexpected length", str(exc)) self.assertIn(str(len(value)), str(exc)) def test_invalid_str_len(self): for hash_type, value in self.valid_str_hashes.items(): - value = value + '0001' + value = value + "0001" with self.assertRaises(ValidationError) as cm: hashes.validate_hash(value, hash_type) exc = cm.exception self.assertIsInstance(str(exc), str) - self.assertEqual(exc.code, 'unexpected-hash-length') - self.assertEqual(exc.params['hash_type'], hash_type) - self.assertEqual(exc.params['length'], len(value)) + self.assertEqual(exc.code, "unexpected-hash-length") + self.assertEqual(exc.params["hash_type"], hash_type) + self.assertEqual(exc.params["length"], len(value)) - self.assertIn('Unexpected length', str(exc)) + self.assertIn("Unexpected length", str(exc)) self.assertIn(str(len(value)), str(exc)) def test_invalid_str_contents(self): for hash_type, value in self.valid_str_hashes.items(): - value = '\xa2' + value[1:-1] + '\xc3' + value = "\xa2" + value[1:-1] + "\xc3" with self.assertRaises(ValidationError) as cm: hashes.validate_hash(value, hash_type) exc = cm.exception self.assertIsInstance(str(exc), str) - self.assertEqual(exc.code, 'unexpected-hash-contents') - self.assertEqual(exc.params['hash_type'], hash_type) - self.assertEqual(exc.params['unexpected_chars'], '\xa2, \xc3') + self.assertEqual(exc.code, "unexpected-hash-contents") + self.assertEqual(exc.params["hash_type"], hash_type) + self.assertEqual(exc.params["unexpected_chars"], "\xa2, \xc3") - self.assertIn('Unexpected characters', str(exc)) - self.assertIn('\xc3', str(exc)) - self.assertIn('\xa2', str(exc)) + self.assertIn("Unexpected characters", str(exc)) + self.assertIn("\xc3", str(exc)) + self.assertIn("\xa2", str(exc)) def test_invalid_value_type(self): with self.assertRaises(ValidationError) as cm: - hashes.validate_hash(self.bad_hash, 'sha1') + hashes.validate_hash(self.bad_hash, "sha1") exc = cm.exception self.assertIsInstance(str(exc), str) - self.assertEqual(exc.code, 'unexpected-hash-value-type') - self.assertEqual(exc.params['type'], self.bad_hash.__class__.__name__) + self.assertEqual(exc.code, "unexpected-hash-value-type") + self.assertEqual(exc.params["type"], self.bad_hash.__class__.__name__) - self.assertIn('Unexpected type', str(exc)) + self.assertIn("Unexpected type", str(exc)) self.assertIn(self.bad_hash.__class__.__name__, str(exc)) def test_validate_sha1(self): - self.assertTrue(hashes.validate_sha1(self.valid_byte_hashes['sha1'])) - self.assertTrue(hashes.validate_sha1(self.valid_str_hashes['sha1'])) + self.assertTrue(hashes.validate_sha1(self.valid_byte_hashes["sha1"])) + self.assertTrue(hashes.validate_sha1(self.valid_str_hashes["sha1"])) with self.assertRaises(ValidationError) as cm: hashes.validate_sha1(self.bad_hash) exc = cm.exception self.assertIsInstance(str(exc), str) - self.assertEqual(exc.code, 'unexpected-hash-value-type') - self.assertEqual(exc.params['type'], self.bad_hash.__class__.__name__) + self.assertEqual(exc.code, "unexpected-hash-value-type") + self.assertEqual(exc.params["type"], self.bad_hash.__class__.__name__) def test_validate_sha1_git(self): - self.assertTrue( - hashes.validate_sha1_git(self.valid_byte_hashes['sha1_git'])) - self.assertTrue( - hashes.validate_sha1_git(self.valid_str_hashes['sha1_git'])) + self.assertTrue(hashes.validate_sha1_git(self.valid_byte_hashes["sha1_git"])) + self.assertTrue(hashes.validate_sha1_git(self.valid_str_hashes["sha1_git"])) with self.assertRaises(ValidationError) as cm: hashes.validate_sha1_git(self.bad_hash) exc = cm.exception self.assertIsInstance(str(exc), str) - self.assertEqual(exc.code, 'unexpected-hash-value-type') - self.assertEqual(exc.params['type'], self.bad_hash.__class__.__name__) + self.assertEqual(exc.code, "unexpected-hash-value-type") + self.assertEqual(exc.params["type"], self.bad_hash.__class__.__name__) def test_validate_sha256(self): - self.assertTrue( - hashes.validate_sha256(self.valid_byte_hashes['sha256'])) - self.assertTrue( - hashes.validate_sha256(self.valid_str_hashes['sha256'])) + self.assertTrue(hashes.validate_sha256(self.valid_byte_hashes["sha256"])) + self.assertTrue(hashes.validate_sha256(self.valid_str_hashes["sha256"])) with self.assertRaises(ValidationError) as cm: hashes.validate_sha256(self.bad_hash) exc = cm.exception self.assertIsInstance(str(exc), str) - self.assertEqual(exc.code, 'unexpected-hash-value-type') - self.assertEqual(exc.params['type'], self.bad_hash.__class__.__name__) + self.assertEqual(exc.code, "unexpected-hash-value-type") + self.assertEqual(exc.params["type"], self.bad_hash.__class__.__name__) diff --git a/swh/model/tests/fields/test_simple.py b/swh/model/tests/fields/test_simple.py index ab5e262..25b1f1b 100644 --- a/swh/model/tests/fields/test_simple.py +++ b/swh/model/tests/fields/test_simple.py @@ -1,123 +1,125 @@ # Copyright (C) 2015 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime import unittest from swh.model.exceptions import ValidationError from swh.model.fields import simple class ValidateSimple(unittest.TestCase): def setUp(self): - self.valid_str = 'I am a valid string' + self.valid_str = "I am a valid string" - self.valid_bytes = b'I am a valid bytes object' + self.valid_bytes = b"I am a valid bytes object" - self.enum_values = {'an enum value', 'other', 'and another'} - self.invalid_enum_value = 'invalid enum value' + self.enum_values = {"an enum value", "other", "and another"} + self.invalid_enum_value = "invalid enum value" self.valid_int = 42 self.valid_real = 42.42 - self.valid_datetime = datetime.datetime(1999, 1, 1, 12, 0, 0, - tzinfo=datetime.timezone.utc) + self.valid_datetime = datetime.datetime( + 1999, 1, 1, 12, 0, 0, tzinfo=datetime.timezone.utc + ) self.invalid_datetime_notz = datetime.datetime(1999, 1, 1, 12, 0, 0) def test_validate_int(self): self.assertTrue(simple.validate_int(self.valid_int)) def test_validate_int_invalid_type(self): with self.assertRaises(ValidationError) as cm: simple.validate_int(self.valid_str) exc = cm.exception self.assertIsInstance(str(exc), str) - self.assertEqual(exc.code, 'unexpected-type') - self.assertEqual(exc.params['expected_type'], 'Integral') - self.assertEqual(exc.params['type'], 'str') + self.assertEqual(exc.code, "unexpected-type") + self.assertEqual(exc.params["expected_type"], "Integral") + self.assertEqual(exc.params["type"], "str") def test_validate_str(self): self.assertTrue(simple.validate_str(self.valid_str)) def test_validate_str_invalid_type(self): with self.assertRaises(ValidationError) as cm: simple.validate_str(self.valid_int) exc = cm.exception self.assertIsInstance(str(exc), str) - self.assertEqual(exc.code, 'unexpected-type') - self.assertEqual(exc.params['expected_type'], 'str') - self.assertEqual(exc.params['type'], 'int') + self.assertEqual(exc.code, "unexpected-type") + self.assertEqual(exc.params["expected_type"], "str") + self.assertEqual(exc.params["type"], "int") with self.assertRaises(ValidationError) as cm: simple.validate_str(self.valid_bytes) exc = cm.exception self.assertIsInstance(str(exc), str) - self.assertEqual(exc.code, 'unexpected-type') - self.assertEqual(exc.params['expected_type'], 'str') - self.assertEqual(exc.params['type'], 'bytes') + self.assertEqual(exc.code, "unexpected-type") + self.assertEqual(exc.params["expected_type"], "str") + self.assertEqual(exc.params["type"], "bytes") def test_validate_bytes(self): self.assertTrue(simple.validate_bytes(self.valid_bytes)) def test_validate_bytes_invalid_type(self): with self.assertRaises(ValidationError) as cm: simple.validate_bytes(self.valid_int) exc = cm.exception self.assertIsInstance(str(exc), str) - self.assertEqual(exc.code, 'unexpected-type') - self.assertEqual(exc.params['expected_type'], 'bytes') - self.assertEqual(exc.params['type'], 'int') + self.assertEqual(exc.code, "unexpected-type") + self.assertEqual(exc.params["expected_type"], "bytes") + self.assertEqual(exc.params["type"], "int") with self.assertRaises(ValidationError) as cm: simple.validate_bytes(self.valid_str) exc = cm.exception self.assertIsInstance(str(exc), str) - self.assertEqual(exc.code, 'unexpected-type') - self.assertEqual(exc.params['expected_type'], 'bytes') - self.assertEqual(exc.params['type'], 'str') + self.assertEqual(exc.code, "unexpected-type") + self.assertEqual(exc.params["expected_type"], "bytes") + self.assertEqual(exc.params["type"], "str") def test_validate_datetime(self): self.assertTrue(simple.validate_datetime(self.valid_datetime)) self.assertTrue(simple.validate_datetime(self.valid_int)) self.assertTrue(simple.validate_datetime(self.valid_real)) def test_validate_datetime_invalid_type(self): with self.assertRaises(ValidationError) as cm: simple.validate_datetime(self.valid_str) exc = cm.exception self.assertIsInstance(str(exc), str) - self.assertEqual(exc.code, 'unexpected-type') - self.assertEqual(exc.params['expected_type'], 'one of datetime, Real') - self.assertEqual(exc.params['type'], 'str') + self.assertEqual(exc.code, "unexpected-type") + self.assertEqual(exc.params["expected_type"], "one of datetime, Real") + self.assertEqual(exc.params["type"], "str") def test_validate_datetime_invalide_tz(self): with self.assertRaises(ValidationError) as cm: simple.validate_datetime(self.invalid_datetime_notz) exc = cm.exception self.assertIsInstance(str(exc), str) - self.assertEqual(exc.code, 'datetime-without-tzinfo') + self.assertEqual(exc.code, "datetime-without-tzinfo") def test_validate_enum(self): for value in self.enum_values: self.assertTrue(simple.validate_enum(value, self.enum_values)) def test_validate_enum_invalid_value(self): with self.assertRaises(ValidationError) as cm: simple.validate_enum(self.invalid_enum_value, self.enum_values) exc = cm.exception self.assertIsInstance(str(exc), str) - self.assertEqual(exc.code, 'unexpected-value') - self.assertEqual(exc.params['value'], self.invalid_enum_value) - self.assertEqual(exc.params['expected_values'], - ', '.join(sorted(self.enum_values))) + self.assertEqual(exc.code, "unexpected-value") + self.assertEqual(exc.params["value"], self.invalid_enum_value) + self.assertEqual( + exc.params["expected_values"], ", ".join(sorted(self.enum_values)) + ) diff --git a/swh/model/tests/generate_testdata.py b/swh/model/tests/generate_testdata.py index 5a274e0..0280a6a 100644 --- a/swh/model/tests/generate_testdata.py +++ b/swh/model/tests/generate_testdata.py @@ -1,66 +1,72 @@ # Copyright (C) 2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from datetime import datetime from pytz import all_timezones, timezone from random import choice, randint, random, shuffle from typing import List, Dict from swh.model.hashutil import MultiHash -PROTOCOLS = ['git', 'http', 'https', 'deb', 'svn', 'mock'] -DOMAINS = ['example.com', 'some.long.host.name', 'xn--n28h.tld'] -PATHS = ['', '/', '/stuff', '/stuff/', - '/path/to/resource', - '/path/with/anchor#id=42', - '/path/with/qargs?q=1&b'] -CONTENT_STATUS = ['visible', 'hidden', 'absent'] +PROTOCOLS = ["git", "http", "https", "deb", "svn", "mock"] +DOMAINS = ["example.com", "some.long.host.name", "xn--n28h.tld"] +PATHS = [ + "", + "/", + "/stuff", + "/stuff/", + "/path/to/resource", + "/path/with/anchor#id=42", + "/path/with/qargs?q=1&b", +] +CONTENT_STATUS = ["visible", "hidden", "absent"] MAX_DATE = 3e9 # around 2065 def gen_all_origins(): for protocol in PROTOCOLS: for domain in DOMAINS: for urlpath in PATHS: - yield {'url': '%s://%s%s' % (protocol, domain, urlpath)} + yield {"url": "%s://%s%s" % (protocol, domain, urlpath)} ORIGINS = list(gen_all_origins()) def gen_origins(n: int = 100) -> List: """Returns a list of n randomly generated origins suitable for using as Storage.add_origin() argument. """ origins = ORIGINS[:] shuffle(origins) return origins[:n] def gen_content(): size = randint(1, 10 * 1024) data = bytes(randint(0, 255) for i in range(size)) status = choice(CONTENT_STATUS) h = MultiHash.from_data(data) - ctime = datetime.fromtimestamp( - random() * MAX_DATE, timezone(choice(all_timezones))) - content = {'data': data, - 'status': status, - 'length': size, - 'ctime': ctime, - **h.digest()} - if status == 'absent': - content['reason'] = 'why not' - content['data'] = None + ctime = datetime.fromtimestamp(random() * MAX_DATE, timezone(choice(all_timezones))) + content = { + "data": data, + "status": status, + "length": size, + "ctime": ctime, + **h.digest(), + } + if status == "absent": + content["reason"] = "why not" + content["data"] = None return content def gen_contents(n=20) -> List[Dict]: """Returns a list of n randomly generated content objects (as dict) suitable for using as Storage.content_add() argument. """ return [gen_content() for i in range(n)] diff --git a/swh/model/tests/generate_testdata_from_disk.py b/swh/model/tests/generate_testdata_from_disk.py index 35d4f48..063e390 100644 --- a/swh/model/tests/generate_testdata_from_disk.py +++ b/swh/model/tests/generate_testdata_from_disk.py @@ -1,92 +1,92 @@ # Copyright (C) 2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from operator import itemgetter import os import sys from swh.model.from_disk import Directory, DentryPerms from swh.model.hashutil import ALGORITHMS, hash_to_hex def generate_from_directory(varname, directory, indent=0): """Generate test data from a given directory""" + def get_data(member, path): yield (path, member.get_data()) if isinstance(member, Directory): for name, child in member.items(): yield from get_data(child, os.path.join(path, name)) - data = dict(get_data(directory, b'')) + data = dict(get_data(directory, b"")) out = [] def format_hash(h, indent=0): - spindent = ' ' * indent + spindent = " " * indent if len(h) > 20: - cutoff = len(h)//2 + cutoff = len(h) // 2 parts = h[:cutoff], h[cutoff:] else: parts = [h] - out.append('hash_to_bytes(\n') + out.append("hash_to_bytes(\n") for part in parts: - out.append(spindent + ' %s\n' % repr(hash_to_hex(part))) - out.append(spindent + ')') + out.append(spindent + " %s\n" % repr(hash_to_hex(part))) + out.append(spindent + ")") def format_dict_items(d, indent=0): - spindent = ' ' * indent + spindent = " " * indent for key, value in sorted(d.items()): if isinstance(key, bytes): - out.append(spindent + repr(key) + ': {\n') + out.append(spindent + repr(key) + ": {\n") format_dict_items(value, indent=indent + 4) - out.append(spindent + '}') + out.append(spindent + "}") else: - out.append(spindent + repr(key) + ': ') - if key == 'entries': + out.append(spindent + repr(key) + ": ") + if key == "entries": if not value: - out.append('[]') + out.append("[]") else: - out.append('[') + out.append("[") last_index = len(value) - 1 for i, entry in enumerate( - sorted(value, key=itemgetter('name'))): + sorted(value, key=itemgetter("name")) + ): if i: - out.append(' ') - out.append('{\n') + out.append(" ") + out.append("{\n") format_dict_items(entry, indent=indent + 4) if i != last_index: - out.append(spindent + '},') - out.append(spindent + '}]') - elif key in ALGORITHMS | {'id', 'target'}: + out.append(spindent + "},") + out.append(spindent + "}]") + elif key in ALGORITHMS | {"id", "target"}: format_hash(value, indent=indent) elif isinstance(value, DentryPerms): out.append(str(value)) else: out.append(repr(value)) - out.append(',\n') + out.append(",\n") - spindent = ' ' * indent - out.append(spindent + '%s = {\n' % varname) + spindent = " " * indent + out.append(spindent + "%s = {\n" % varname) format_dict_items(data, indent=4 + indent) - out.append(spindent + '}') + out.append(spindent + "}") - return ''.join(out) + return "".join(out) -if __name__ == '__main__': +if __name__ == "__main__": if not sys.argv[1:]: print("Usage: %s dir1 dir2" % sys.argv[0], file=sys.stderr) exit(2) for dirname in sys.argv[1:]: basename = os.path.basename(dirname) - varname = 'expected_%s' % basename + varname = "expected_%s" % basename testdata = generate_from_directory( - varname, - Directory.from_disk(path=os.fsencode(dirname)), - indent=8 + varname, Directory.from_disk(path=os.fsencode(dirname)), indent=8 ) print(testdata) print() diff --git a/swh/model/tests/test_cli.py b/swh/model/tests/test_cli.py index 4d4ff01..fac1d89 100644 --- a/swh/model/tests/test_cli.py +++ b/swh/model/tests/test_cli.py @@ -1,156 +1,144 @@ # Copyright (C) 2018-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import os import tarfile import tempfile import unittest from click.testing import CliRunner import pytest from swh.model import cli from swh.model.hashutil import hash_to_hex from swh.model.tests.test_from_disk import DataMixin @pytest.mark.fs class TestIdentify(DataMixin, unittest.TestCase): - def setUp(self): super().setUp() self.runner = CliRunner() def assertPidOK(self, result, pid): self.assertEqual(result.exit_code, 0) self.assertEqual(result.output.split()[0], pid) def test_content_id(self): """identify file content""" self.make_contents(self.tmpdir_name) for filename, content in self.contents.items(): path = os.path.join(self.tmpdir_name, filename) - result = self.runner.invoke(cli.identify, - ['--type', 'content', path]) - self.assertPidOK(result, - 'swh:1:cnt:' + hash_to_hex(content['sha1_git'])) + result = self.runner.invoke(cli.identify, ["--type", "content", path]) + self.assertPidOK(result, "swh:1:cnt:" + hash_to_hex(content["sha1_git"])) def test_content_id_from_stdin(self): """identify file content""" self.make_contents(self.tmpdir_name) for _, content in self.contents.items(): - result = self.runner.invoke(cli.identify, - input=content['data']) - self.assertPidOK(result, - 'swh:1:cnt:' + hash_to_hex(content['sha1_git'])) + result = self.runner.invoke(cli.identify, input=content["data"]) + self.assertPidOK(result, "swh:1:cnt:" + hash_to_hex(content["sha1_git"])) def test_directory_id(self): """identify an entire directory""" self.make_from_tarball(self.tmpdir_name) - path = os.path.join(self.tmpdir_name, b'sample-folder') - result = self.runner.invoke(cli.identify, - ['--type', 'directory', path]) - self.assertPidOK(result, - 'swh:1:dir:e8b0f1466af8608c8a3fb9879db172b887e80759') + path = os.path.join(self.tmpdir_name, b"sample-folder") + result = self.runner.invoke(cli.identify, ["--type", "directory", path]) + self.assertPidOK(result, "swh:1:dir:e8b0f1466af8608c8a3fb9879db172b887e80759") def test_snapshot_id(self): """identify a snapshot""" - tarball = os.path.join(os.path.dirname(__file__), 'data', 'repos', - 'sample-repo.tgz') - with tempfile.TemporaryDirectory(prefix='swh.model.cli') as d: - with tarfile.open(tarball, 'r:gz') as t: + tarball = os.path.join( + os.path.dirname(__file__), "data", "repos", "sample-repo.tgz" + ) + with tempfile.TemporaryDirectory(prefix="swh.model.cli") as d: + with tarfile.open(tarball, "r:gz") as t: t.extractall(d) - repo_dir = os.path.join(d, 'sample-repo') - result = self.runner.invoke(cli.identify, - ['--type', 'snapshot', repo_dir]) + repo_dir = os.path.join(d, "sample-repo") + result = self.runner.invoke( + cli.identify, ["--type", "snapshot", repo_dir] + ) self.assertPidOK( - result, - 'swh:1:snp:abc888898124270905a0ef3c67e872ce08e7e0c1') + result, "swh:1:snp:abc888898124270905a0ef3c67e872ce08e7e0c1" + ) def test_origin_id(self): """identify an origin URL""" - url = 'https://github.com/torvalds/linux' - result = self.runner.invoke(cli.identify, ['--type', 'origin', url]) - self.assertPidOK(result, - 'swh:1:ori:b63a575fe3faab7692c9f38fb09d4bb45651bb0f') + url = "https://github.com/torvalds/linux" + result = self.runner.invoke(cli.identify, ["--type", "origin", url]) + self.assertPidOK(result, "swh:1:ori:b63a575fe3faab7692c9f38fb09d4bb45651bb0f") def test_symlink(self): """identify symlink --- both itself and target""" - regular = os.path.join(self.tmpdir_name, b'foo.txt') - link = os.path.join(self.tmpdir_name, b'bar.txt') - open(regular, 'w').write('foo\n') + regular = os.path.join(self.tmpdir_name, b"foo.txt") + link = os.path.join(self.tmpdir_name, b"bar.txt") + open(regular, "w").write("foo\n") os.symlink(os.path.basename(regular), link) result = self.runner.invoke(cli.identify, [link]) - self.assertPidOK(result, - 'swh:1:cnt:257cc5642cb1a054f08cc83f2d943e56fd3ebe99') + self.assertPidOK(result, "swh:1:cnt:257cc5642cb1a054f08cc83f2d943e56fd3ebe99") - result = self.runner.invoke(cli.identify, ['--no-dereference', link]) - self.assertPidOK(result, - 'swh:1:cnt:996f1789ff67c0e3f69ef5933a55d54c5d0e9954') + result = self.runner.invoke(cli.identify, ["--no-dereference", link]) + self.assertPidOK(result, "swh:1:cnt:996f1789ff67c0e3f69ef5933a55d54c5d0e9954") def test_show_filename(self): """filename is shown by default""" self.make_contents(self.tmpdir_name) for filename, content in self.contents.items(): path = os.path.join(self.tmpdir_name, filename) - result = self.runner.invoke(cli.identify, - ['--type', 'content', path]) + result = self.runner.invoke(cli.identify, ["--type", "content", path]) self.assertEqual(result.exit_code, 0) - self.assertEqual(result.output.rstrip(), - 'swh:1:cnt:%s\t%s' % - (hash_to_hex(content['sha1_git']), path.decode())) + self.assertEqual( + result.output.rstrip(), + "swh:1:cnt:%s\t%s" % (hash_to_hex(content["sha1_git"]), path.decode()), + ) def test_hide_filename(self): """filename is hidden upon request""" self.make_contents(self.tmpdir_name) for filename, content in self.contents.items(): path = os.path.join(self.tmpdir_name, filename) - result = self.runner.invoke(cli.identify, - ['--type', 'content', '--no-filename', - path]) - self.assertPidOK(result, - 'swh:1:cnt:' + hash_to_hex(content['sha1_git'])) + result = self.runner.invoke( + cli.identify, ["--type", "content", "--no-filename", path] + ) + self.assertPidOK(result, "swh:1:cnt:" + hash_to_hex(content["sha1_git"])) def test_auto_content(self): """automatic object type detection: content""" - with tempfile.NamedTemporaryFile(prefix='swh.model.cli') as f: + with tempfile.NamedTemporaryFile(prefix="swh.model.cli") as f: result = self.runner.invoke(cli.identify, [f.name]) self.assertEqual(result.exit_code, 0) - self.assertRegex(result.output, r'^swh:\d+:cnt:') + self.assertRegex(result.output, r"^swh:\d+:cnt:") def test_auto_directory(self): """automatic object type detection: directory""" - with tempfile.TemporaryDirectory(prefix='swh.model.cli') as dirname: + with tempfile.TemporaryDirectory(prefix="swh.model.cli") as dirname: result = self.runner.invoke(cli.identify, [dirname]) self.assertEqual(result.exit_code, 0) - self.assertRegex(result.output, r'^swh:\d+:dir:') + self.assertRegex(result.output, r"^swh:\d+:dir:") def test_auto_origin(self): """automatic object type detection: origin""" - result = self.runner.invoke(cli.identify, - ['https://github.com/torvalds/linux']) + result = self.runner.invoke(cli.identify, ["https://github.com/torvalds/linux"]) self.assertEqual(result.exit_code, 0) - self.assertRegex(result.output, r'^swh:\d+:ori:') + self.assertRegex(result.output, r"^swh:\d+:ori:") def test_verify_content(self): """identifier verification""" self.make_contents(self.tmpdir_name) for filename, content in self.contents.items(): - expected_id = 'swh:1:cnt:' + hash_to_hex(content['sha1_git']) + expected_id = "swh:1:cnt:" + hash_to_hex(content["sha1_git"]) # match path = os.path.join(self.tmpdir_name, filename) - result = self.runner.invoke(cli.identify, - ['--verify', expected_id, path]) + result = self.runner.invoke(cli.identify, ["--verify", expected_id, path]) self.assertEqual(result.exit_code, 0) # mismatch - with open(path, 'a') as f: - f.write('trailing garbage to make verification fail') - result = self.runner.invoke(cli.identify, - ['--verify', expected_id, path]) + with open(path, "a") as f: + f.write("trailing garbage to make verification fail") + result = self.runner.invoke(cli.identify, ["--verify", expected_id, path]) self.assertEqual(result.exit_code, 1) diff --git a/swh/model/tests/test_from_disk.py b/swh/model/tests/test_from_disk.py index d9881a1..0ebf30a 100644 --- a/swh/model/tests/test_from_disk.py +++ b/swh/model/tests/test_from_disk.py @@ -1,963 +1,939 @@ # Copyright (C) 2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import os import pytest import tarfile import tempfile import unittest from typing import ClassVar, Optional from swh.model import from_disk -from swh.model.from_disk import ( - Content, DentryPerms, Directory, DiskBackedContent -) +from swh.model.from_disk import Content, DentryPerms, Directory, DiskBackedContent from swh.model.hashutil import DEFAULT_ALGORITHMS, hash_to_bytes, hash_to_hex from swh.model import model -TEST_DATA = os.path.join(os.path.dirname(__file__), 'data') +TEST_DATA = os.path.join(os.path.dirname(__file__), "data") class ModeToPerms(unittest.TestCase): def setUp(self): super().setUp() # Generate a full permissions map self.perms_map = {} # Symlinks for i in range(0o120000, 0o127777 + 1): self.perms_map[i] = DentryPerms.symlink # Directories for i in range(0o040000, 0o047777 + 1): self.perms_map[i] = DentryPerms.directory # Other file types: socket, regular file, block device, character # device, fifo all map to regular files for ft in [0o140000, 0o100000, 0o060000, 0o020000, 0o010000]: for i in range(ft, ft + 0o7777 + 1): if i & 0o111: # executable bits are set self.perms_map[i] = DentryPerms.executable_content else: self.perms_map[i] = DentryPerms.content def test_exhaustive_mode_to_perms(self): for fmode, perm in self.perms_map.items(): self.assertEqual(perm, from_disk.mode_to_perms(fmode)) class TestDiskBackedContent(unittest.TestCase): def test_with_data(self): expected_content = model.Content( - length=42, status='visible', data=b'foo bar', - sha1=b'foo', sha1_git=b'bar', sha256=b'baz', blake2s256=b'qux') - with tempfile.NamedTemporaryFile(mode='w+b') as fd: + length=42, + status="visible", + data=b"foo bar", + sha1=b"foo", + sha1_git=b"bar", + sha256=b"baz", + blake2s256=b"qux", + ) + with tempfile.NamedTemporaryFile(mode="w+b") as fd: content = DiskBackedContent( - length=42, status='visible', path=fd.name, - sha1=b'foo', sha1_git=b'bar', sha256=b'baz', blake2s256=b'qux') - fd.write(b'foo bar') + length=42, + status="visible", + path=fd.name, + sha1=b"foo", + sha1_git=b"bar", + sha256=b"baz", + blake2s256=b"qux", + ) + fd.write(b"foo bar") fd.seek(0) content_with_data = content.with_data() assert expected_content == content_with_data def test_lazy_data(self): - with tempfile.NamedTemporaryFile(mode='w+b') as fd: - fd.write(b'foo') + with tempfile.NamedTemporaryFile(mode="w+b") as fd: + fd.write(b"foo") fd.seek(0) content = DiskBackedContent( - length=42, status='visible', path=fd.name, - sha1=b'foo', sha1_git=b'bar', sha256=b'baz', blake2s256=b'qux') - fd.write(b'bar') + length=42, + status="visible", + path=fd.name, + sha1=b"foo", + sha1_git=b"bar", + sha256=b"baz", + blake2s256=b"qux", + ) + fd.write(b"bar") fd.seek(0) content_with_data = content.with_data() - fd.write(b'baz') + fd.write(b"baz") fd.seek(0) - assert content_with_data.data == b'bar' + assert content_with_data.data == b"bar" def test_with_data_cannot_read(self): - with tempfile.NamedTemporaryFile(mode='w+b') as fd: + with tempfile.NamedTemporaryFile(mode="w+b") as fd: content = DiskBackedContent( - length=42, status='visible', path=fd.name, - sha1=b'foo', sha1_git=b'bar', sha256=b'baz', blake2s256=b'qux') + length=42, + status="visible", + path=fd.name, + sha1=b"foo", + sha1_git=b"bar", + sha256=b"baz", + blake2s256=b"qux", + ) with pytest.raises(OSError): content.with_data() def test_missing_path(self): with pytest.raises(TypeError): DiskBackedContent( - length=42, status='visible', - sha1=b'foo', sha1_git=b'bar', sha256=b'baz', blake2s256=b'qux') + length=42, + status="visible", + sha1=b"foo", + sha1_git=b"bar", + sha256=b"baz", + blake2s256=b"qux", + ) with pytest.raises(TypeError): DiskBackedContent( - length=42, status='visible', path=None, - sha1=b'foo', sha1_git=b'bar', sha256=b'baz', blake2s256=b'qux') + length=42, + status="visible", + path=None, + sha1=b"foo", + sha1_git=b"bar", + sha256=b"baz", + blake2s256=b"qux", + ) class DataMixin: maxDiff = None # type: ClassVar[Optional[int]] def setUp(self): - self.tmpdir = tempfile.TemporaryDirectory( - prefix='swh.model.from_disk' - ) + self.tmpdir = tempfile.TemporaryDirectory(prefix="swh.model.from_disk") self.tmpdir_name = os.fsencode(self.tmpdir.name) self.contents = { - b'file': { - 'data': b'42\n', - 'sha1': hash_to_bytes( - '34973274ccef6ab4dfaaf86599792fa9c3fe4689' - ), - 'sha256': hash_to_bytes( - '084c799cd551dd1d8d5c5f9a5d593b2e' - '931f5e36122ee5c793c1d08a19839cc0' - ), - 'sha1_git': hash_to_bytes( - 'd81cc0710eb6cf9efd5b920a8453e1e07157b6cd'), - 'blake2s256': hash_to_bytes( - 'd5fe1939576527e42cfd76a9455a2432' - 'fe7f56669564577dd93c4280e76d661d' - ), - 'length': 3, - 'mode': 0o100644 + b"file": { + "data": b"42\n", + "sha1": hash_to_bytes("34973274ccef6ab4dfaaf86599792fa9c3fe4689"), + "sha256": hash_to_bytes( + "084c799cd551dd1d8d5c5f9a5d593b2e" + "931f5e36122ee5c793c1d08a19839cc0" + ), + "sha1_git": hash_to_bytes("d81cc0710eb6cf9efd5b920a8453e1e07157b6cd"), + "blake2s256": hash_to_bytes( + "d5fe1939576527e42cfd76a9455a2432" + "fe7f56669564577dd93c4280e76d661d" + ), + "length": 3, + "mode": 0o100644, }, } self.symlinks = { - b'symlink': { - 'data': b'target', - 'blake2s256': hash_to_bytes( - '595d221b30fdd8e10e2fdf18376e688e' - '9f18d56fd9b6d1eb6a822f8c146c6da6' - ), - 'sha1': hash_to_bytes( - '0e8a3ad980ec179856012b7eecf4327e99cd44cd' - ), - 'sha1_git': hash_to_bytes( - '1de565933b05f74c75ff9a6520af5f9f8a5a2f1d' - ), - 'sha256': hash_to_bytes( - '34a04005bcaf206eec990bd9637d9fdb' - '6725e0a0c0d4aebf003f17f4c956eb5c' - ), - 'length': 6, - 'perms': DentryPerms.symlink, + b"symlink": { + "data": b"target", + "blake2s256": hash_to_bytes( + "595d221b30fdd8e10e2fdf18376e688e" + "9f18d56fd9b6d1eb6a822f8c146c6da6" + ), + "sha1": hash_to_bytes("0e8a3ad980ec179856012b7eecf4327e99cd44cd"), + "sha1_git": hash_to_bytes("1de565933b05f74c75ff9a6520af5f9f8a5a2f1d"), + "sha256": hash_to_bytes( + "34a04005bcaf206eec990bd9637d9fdb" + "6725e0a0c0d4aebf003f17f4c956eb5c" + ), + "length": 6, + "perms": DentryPerms.symlink, } } self.specials = { - b'fifo': os.mkfifo, + b"fifo": os.mkfifo, } self.empty_content = { - 'data': b'', - 'length': 0, - 'blake2s256': hash_to_bytes( - '69217a3079908094e11121d042354a7c' - '1f55b6482ca1a51e1b250dfd1ed0eef9' + "data": b"", + "length": 0, + "blake2s256": hash_to_bytes( + "69217a3079908094e11121d042354a7c" "1f55b6482ca1a51e1b250dfd1ed0eef9" ), - 'sha1': hash_to_bytes( - 'da39a3ee5e6b4b0d3255bfef95601890afd80709' + "sha1": hash_to_bytes("da39a3ee5e6b4b0d3255bfef95601890afd80709"), + "sha1_git": hash_to_bytes("e69de29bb2d1d6434b8b29ae775ad8c2e48c5391"), + "sha256": hash_to_bytes( + "e3b0c44298fc1c149afbf4c8996fb924" "27ae41e4649b934ca495991b7852b855" ), - 'sha1_git': hash_to_bytes( - 'e69de29bb2d1d6434b8b29ae775ad8c2e48c5391' - ), - 'sha256': hash_to_bytes( - 'e3b0c44298fc1c149afbf4c8996fb924' - '27ae41e4649b934ca495991b7852b855' - ), - 'perms': DentryPerms.content, + "perms": DentryPerms.content, } self.empty_directory = { - 'id': hash_to_bytes( - '4b825dc642cb6eb9a060e54bf8d69288fbee4904' - ), - 'entries': [], + "id": hash_to_bytes("4b825dc642cb6eb9a060e54bf8d69288fbee4904"), + "entries": [], } # Generated with generate_testdata_from_disk self.tarball_contents = { - b'': { - 'entries': [{ - 'name': b'bar', - 'perms': DentryPerms.directory, - 'target': hash_to_bytes( - '3c1f578394f4623f74a0ba7fe761729f59fc6ec4' - ), - 'type': 'dir', - }, { - 'name': b'empty-folder', - 'perms': DentryPerms.directory, - 'target': hash_to_bytes( - '4b825dc642cb6eb9a060e54bf8d69288fbee4904' - ), - 'type': 'dir', - }, { - 'name': b'foo', - 'perms': DentryPerms.directory, - 'target': hash_to_bytes( - '2b41c40f0d1fbffcba12497db71fba83fcca96e5' - ), - 'type': 'dir', - }, { - 'name': b'link-to-another-quote', - 'perms': DentryPerms.symlink, - 'target': hash_to_bytes( - '7d5c08111e21c8a9f71540939998551683375fad' - ), - 'type': 'file', - }, { - 'name': b'link-to-binary', - 'perms': DentryPerms.symlink, - 'target': hash_to_bytes( - 'e86b45e538d9b6888c969c89fbd22a85aa0e0366' - ), - 'type': 'file', - }, { - 'name': b'link-to-foo', - 'perms': DentryPerms.symlink, - 'target': hash_to_bytes( - '19102815663d23f8b75a47e7a01965dcdc96468c' - ), - 'type': 'file', - }, { - 'name': b'some-binary', - 'perms': DentryPerms.executable_content, - 'target': hash_to_bytes( - '68769579c3eaadbe555379b9c3538e6628bae1eb' - ), - 'type': 'file', - }], - 'id': hash_to_bytes( - 'e8b0f1466af8608c8a3fb9879db172b887e80759' - ), + b"": { + "entries": [ + { + "name": b"bar", + "perms": DentryPerms.directory, + "target": hash_to_bytes( + "3c1f578394f4623f74a0ba7fe761729f59fc6ec4" + ), + "type": "dir", + }, + { + "name": b"empty-folder", + "perms": DentryPerms.directory, + "target": hash_to_bytes( + "4b825dc642cb6eb9a060e54bf8d69288fbee4904" + ), + "type": "dir", + }, + { + "name": b"foo", + "perms": DentryPerms.directory, + "target": hash_to_bytes( + "2b41c40f0d1fbffcba12497db71fba83fcca96e5" + ), + "type": "dir", + }, + { + "name": b"link-to-another-quote", + "perms": DentryPerms.symlink, + "target": hash_to_bytes( + "7d5c08111e21c8a9f71540939998551683375fad" + ), + "type": "file", + }, + { + "name": b"link-to-binary", + "perms": DentryPerms.symlink, + "target": hash_to_bytes( + "e86b45e538d9b6888c969c89fbd22a85aa0e0366" + ), + "type": "file", + }, + { + "name": b"link-to-foo", + "perms": DentryPerms.symlink, + "target": hash_to_bytes( + "19102815663d23f8b75a47e7a01965dcdc96468c" + ), + "type": "file", + }, + { + "name": b"some-binary", + "perms": DentryPerms.executable_content, + "target": hash_to_bytes( + "68769579c3eaadbe555379b9c3538e6628bae1eb" + ), + "type": "file", + }, + ], + "id": hash_to_bytes("e8b0f1466af8608c8a3fb9879db172b887e80759"), }, - b'bar': { - 'entries': [{ - 'name': b'barfoo', - 'perms': DentryPerms.directory, - 'target': hash_to_bytes( - 'c3020f6bf135a38c6df3afeb5fb38232c5e07087' - ), - 'type': 'dir', - }], - 'id': hash_to_bytes( - '3c1f578394f4623f74a0ba7fe761729f59fc6ec4' - ), + b"bar": { + "entries": [ + { + "name": b"barfoo", + "perms": DentryPerms.directory, + "target": hash_to_bytes( + "c3020f6bf135a38c6df3afeb5fb38232c5e07087" + ), + "type": "dir", + } + ], + "id": hash_to_bytes("3c1f578394f4623f74a0ba7fe761729f59fc6ec4"), }, - b'bar/barfoo': { - 'entries': [{ - 'name': b'another-quote.org', - 'perms': DentryPerms.content, - 'target': hash_to_bytes( - '133693b125bad2b4ac318535b84901ebb1f6b638' - ), - 'type': 'file', - }], - 'id': hash_to_bytes( - 'c3020f6bf135a38c6df3afeb5fb38232c5e07087' - ), + b"bar/barfoo": { + "entries": [ + { + "name": b"another-quote.org", + "perms": DentryPerms.content, + "target": hash_to_bytes( + "133693b125bad2b4ac318535b84901ebb1f6b638" + ), + "type": "file", + } + ], + "id": hash_to_bytes("c3020f6bf135a38c6df3afeb5fb38232c5e07087"), }, - b'bar/barfoo/another-quote.org': { - 'blake2s256': hash_to_bytes( - 'd26c1cad82d43df0bffa5e7be11a60e3' - '4adb85a218b433cbce5278b10b954fe8' - ), - 'length': 72, - 'perms': DentryPerms.content, - 'sha1': hash_to_bytes( - '90a6138ba59915261e179948386aa1cc2aa9220a' - ), - 'sha1_git': hash_to_bytes( - '133693b125bad2b4ac318535b84901ebb1f6b638' - ), - 'sha256': hash_to_bytes( - '3db5ae168055bcd93a4d08285dc99ffe' - 'e2883303b23fac5eab850273a8ea5546' + b"bar/barfoo/another-quote.org": { + "blake2s256": hash_to_bytes( + "d26c1cad82d43df0bffa5e7be11a60e3" + "4adb85a218b433cbce5278b10b954fe8" + ), + "length": 72, + "perms": DentryPerms.content, + "sha1": hash_to_bytes("90a6138ba59915261e179948386aa1cc2aa9220a"), + "sha1_git": hash_to_bytes("133693b125bad2b4ac318535b84901ebb1f6b638"), + "sha256": hash_to_bytes( + "3db5ae168055bcd93a4d08285dc99ffe" + "e2883303b23fac5eab850273a8ea5546" ), }, - b'empty-folder': { - 'entries': [], - 'id': hash_to_bytes( - '4b825dc642cb6eb9a060e54bf8d69288fbee4904' - ), + b"empty-folder": { + "entries": [], + "id": hash_to_bytes("4b825dc642cb6eb9a060e54bf8d69288fbee4904"), }, - b'foo': { - 'entries': [{ - 'name': b'barfoo', - 'perms': DentryPerms.symlink, - 'target': hash_to_bytes( - '8185dfb2c0c2c597d16f75a8a0c37668567c3d7e' - ), - 'type': 'file', - }, { - 'name': b'quotes.md', - 'perms': DentryPerms.content, - 'target': hash_to_bytes( - '7c4c57ba9ff496ad179b8f65b1d286edbda34c9a' - ), - 'type': 'file', - }, { - 'name': b'rel-link-to-barfoo', - 'perms': DentryPerms.symlink, - 'target': hash_to_bytes( - 'acac326ddd63b0bc70840659d4ac43619484e69f' - ), - 'type': 'file', - }], - 'id': hash_to_bytes( - '2b41c40f0d1fbffcba12497db71fba83fcca96e5' - ), + b"foo": { + "entries": [ + { + "name": b"barfoo", + "perms": DentryPerms.symlink, + "target": hash_to_bytes( + "8185dfb2c0c2c597d16f75a8a0c37668567c3d7e" + ), + "type": "file", + }, + { + "name": b"quotes.md", + "perms": DentryPerms.content, + "target": hash_to_bytes( + "7c4c57ba9ff496ad179b8f65b1d286edbda34c9a" + ), + "type": "file", + }, + { + "name": b"rel-link-to-barfoo", + "perms": DentryPerms.symlink, + "target": hash_to_bytes( + "acac326ddd63b0bc70840659d4ac43619484e69f" + ), + "type": "file", + }, + ], + "id": hash_to_bytes("2b41c40f0d1fbffcba12497db71fba83fcca96e5"), }, - b'foo/barfoo': { - 'blake2s256': hash_to_bytes( - 'e1252f2caa4a72653c4efd9af871b62b' - 'f2abb7bb2f1b0e95969204bd8a70d4cd' - ), - 'data': b'bar/barfoo', - 'length': 10, - 'perms': DentryPerms.symlink, - 'sha1': hash_to_bytes( - '9057ee6d0162506e01c4d9d5459a7add1fedac37' - ), - 'sha1_git': hash_to_bytes( - '8185dfb2c0c2c597d16f75a8a0c37668567c3d7e' - ), - 'sha256': hash_to_bytes( - '29ad3f5725321b940332c78e403601af' - 'ff61daea85e9c80b4a7063b6887ead68' + b"foo/barfoo": { + "blake2s256": hash_to_bytes( + "e1252f2caa4a72653c4efd9af871b62b" + "f2abb7bb2f1b0e95969204bd8a70d4cd" + ), + "data": b"bar/barfoo", + "length": 10, + "perms": DentryPerms.symlink, + "sha1": hash_to_bytes("9057ee6d0162506e01c4d9d5459a7add1fedac37"), + "sha1_git": hash_to_bytes("8185dfb2c0c2c597d16f75a8a0c37668567c3d7e"), + "sha256": hash_to_bytes( + "29ad3f5725321b940332c78e403601af" + "ff61daea85e9c80b4a7063b6887ead68" ), }, - b'foo/quotes.md': { - 'blake2s256': hash_to_bytes( - 'bf7ce4fe304378651ee6348d3e9336ed' - '5ad603d33e83c83ba4e14b46f9b8a80b' - ), - 'length': 66, - 'perms': DentryPerms.content, - 'sha1': hash_to_bytes( - '1bf0bb721ac92c18a19b13c0eb3d741cbfadebfc' - ), - 'sha1_git': hash_to_bytes( - '7c4c57ba9ff496ad179b8f65b1d286edbda34c9a' - ), - 'sha256': hash_to_bytes( - 'caca942aeda7b308859eb56f909ec96d' - '07a499491690c453f73b9800a93b1659' + b"foo/quotes.md": { + "blake2s256": hash_to_bytes( + "bf7ce4fe304378651ee6348d3e9336ed" + "5ad603d33e83c83ba4e14b46f9b8a80b" + ), + "length": 66, + "perms": DentryPerms.content, + "sha1": hash_to_bytes("1bf0bb721ac92c18a19b13c0eb3d741cbfadebfc"), + "sha1_git": hash_to_bytes("7c4c57ba9ff496ad179b8f65b1d286edbda34c9a"), + "sha256": hash_to_bytes( + "caca942aeda7b308859eb56f909ec96d" + "07a499491690c453f73b9800a93b1659" ), }, - b'foo/rel-link-to-barfoo': { - 'blake2s256': hash_to_bytes( - 'd9c327421588a1cf61f316615005a2e9' - 'c13ac3a4e96d43a24138d718fa0e30db' - ), - 'data': b'../bar/barfoo', - 'length': 13, - 'perms': DentryPerms.symlink, - 'sha1': hash_to_bytes( - 'dc51221d308f3aeb2754db48391b85687c2869f4' - ), - 'sha1_git': hash_to_bytes( - 'acac326ddd63b0bc70840659d4ac43619484e69f' - ), - 'sha256': hash_to_bytes( - '8007d20db2af40435f42ddef4b8ad76b' - '80adbec26b249fdf0473353f8d99df08' + b"foo/rel-link-to-barfoo": { + "blake2s256": hash_to_bytes( + "d9c327421588a1cf61f316615005a2e9" + "c13ac3a4e96d43a24138d718fa0e30db" + ), + "data": b"../bar/barfoo", + "length": 13, + "perms": DentryPerms.symlink, + "sha1": hash_to_bytes("dc51221d308f3aeb2754db48391b85687c2869f4"), + "sha1_git": hash_to_bytes("acac326ddd63b0bc70840659d4ac43619484e69f"), + "sha256": hash_to_bytes( + "8007d20db2af40435f42ddef4b8ad76b" + "80adbec26b249fdf0473353f8d99df08" ), }, - b'link-to-another-quote': { - 'blake2s256': hash_to_bytes( - '2d0e73cea01ba949c1022dc10c8a43e6' - '6180639662e5dc2737b843382f7b1910' - ), - 'data': b'bar/barfoo/another-quote.org', - 'length': 28, - 'perms': DentryPerms.symlink, - 'sha1': hash_to_bytes( - 'cbeed15e79599c90de7383f420fed7acb48ea171' - ), - 'sha1_git': hash_to_bytes( - '7d5c08111e21c8a9f71540939998551683375fad' - ), - 'sha256': hash_to_bytes( - 'e6e17d0793aa750a0440eb9ad5b80b25' - '8076637ef0fb68f3ac2e59e4b9ac3ba6' + b"link-to-another-quote": { + "blake2s256": hash_to_bytes( + "2d0e73cea01ba949c1022dc10c8a43e6" + "6180639662e5dc2737b843382f7b1910" + ), + "data": b"bar/barfoo/another-quote.org", + "length": 28, + "perms": DentryPerms.symlink, + "sha1": hash_to_bytes("cbeed15e79599c90de7383f420fed7acb48ea171"), + "sha1_git": hash_to_bytes("7d5c08111e21c8a9f71540939998551683375fad"), + "sha256": hash_to_bytes( + "e6e17d0793aa750a0440eb9ad5b80b25" + "8076637ef0fb68f3ac2e59e4b9ac3ba6" ), }, - b'link-to-binary': { - 'blake2s256': hash_to_bytes( - '9ce18b1adecb33f891ca36664da676e1' - '2c772cc193778aac9a137b8dc5834b9b' - ), - 'data': b'some-binary', - 'length': 11, - 'perms': DentryPerms.symlink, - 'sha1': hash_to_bytes( - 'd0248714948b3a48a25438232a6f99f0318f59f1' - ), - 'sha1_git': hash_to_bytes( - 'e86b45e538d9b6888c969c89fbd22a85aa0e0366' - ), - 'sha256': hash_to_bytes( - '14126e97d83f7d261c5a6889cee73619' - '770ff09e40c5498685aba745be882eff' + b"link-to-binary": { + "blake2s256": hash_to_bytes( + "9ce18b1adecb33f891ca36664da676e1" + "2c772cc193778aac9a137b8dc5834b9b" + ), + "data": b"some-binary", + "length": 11, + "perms": DentryPerms.symlink, + "sha1": hash_to_bytes("d0248714948b3a48a25438232a6f99f0318f59f1"), + "sha1_git": hash_to_bytes("e86b45e538d9b6888c969c89fbd22a85aa0e0366"), + "sha256": hash_to_bytes( + "14126e97d83f7d261c5a6889cee73619" + "770ff09e40c5498685aba745be882eff" ), }, - b'link-to-foo': { - 'blake2s256': hash_to_bytes( - '08d6cad88075de8f192db097573d0e82' - '9411cd91eb6ec65e8fc16c017edfdb74' - ), - 'data': b'foo', - 'length': 3, - 'perms': DentryPerms.symlink, - 'sha1': hash_to_bytes( - '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33' - ), - 'sha1_git': hash_to_bytes( - '19102815663d23f8b75a47e7a01965dcdc96468c' - ), - 'sha256': hash_to_bytes( - '2c26b46b68ffc68ff99b453c1d304134' - '13422d706483bfa0f98a5e886266e7ae' + b"link-to-foo": { + "blake2s256": hash_to_bytes( + "08d6cad88075de8f192db097573d0e82" + "9411cd91eb6ec65e8fc16c017edfdb74" + ), + "data": b"foo", + "length": 3, + "perms": DentryPerms.symlink, + "sha1": hash_to_bytes("0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33"), + "sha1_git": hash_to_bytes("19102815663d23f8b75a47e7a01965dcdc96468c"), + "sha256": hash_to_bytes( + "2c26b46b68ffc68ff99b453c1d304134" + "13422d706483bfa0f98a5e886266e7ae" ), }, - b'some-binary': { - 'blake2s256': hash_to_bytes( - '922e0f7015035212495b090c27577357' - 'a740ddd77b0b9e0cd23b5480c07a18c6' - ), - 'length': 5, - 'perms': DentryPerms.executable_content, - 'sha1': hash_to_bytes( - '0bbc12d7f4a2a15b143da84617d95cb223c9b23c' - ), - 'sha1_git': hash_to_bytes( - '68769579c3eaadbe555379b9c3538e6628bae1eb' - ), - 'sha256': hash_to_bytes( - 'bac650d34a7638bb0aeb5342646d24e3' - 'b9ad6b44c9b383621faa482b990a367d' + b"some-binary": { + "blake2s256": hash_to_bytes( + "922e0f7015035212495b090c27577357" + "a740ddd77b0b9e0cd23b5480c07a18c6" + ), + "length": 5, + "perms": DentryPerms.executable_content, + "sha1": hash_to_bytes("0bbc12d7f4a2a15b143da84617d95cb223c9b23c"), + "sha1_git": hash_to_bytes("68769579c3eaadbe555379b9c3538e6628bae1eb"), + "sha256": hash_to_bytes( + "bac650d34a7638bb0aeb5342646d24e3" + "b9ad6b44c9b383621faa482b990a367d" ), }, } def tearDown(self): self.tmpdir.cleanup() - def assertContentEqual(self, left, right, *, # noqa - check_path=False): + def assertContentEqual(self, left, right, *, check_path=False): # noqa if not isinstance(left, Content): - raise ValueError('%s is not a Content' % left) + raise ValueError("%s is not a Content" % left) if isinstance(right, Content): right = right.get_data() # Compare dictionaries keys = DEFAULT_ALGORITHMS | { - 'length', - 'perms', + "length", + "perms", } if check_path: - keys |= {'path'} + keys |= {"path"} failed = [] for key in keys: try: lvalue = left.data[key] - if key == 'perms' and 'perms' not in right: - rvalue = from_disk.mode_to_perms(right['mode']) + if key == "perms" and "perms" not in right: + rvalue = from_disk.mode_to_perms(right["mode"]) else: rvalue = right[key] except KeyError: failed.append(key) continue if lvalue != rvalue: failed.append(key) if failed: raise self.failureException( - 'Content mismatched:\n' + - '\n'.join( - 'content[%s] = %r != %r' % ( - key, left.data.get(key), right.get(key)) + "Content mismatched:\n" + + "\n".join( + "content[%s] = %r != %r" % (key, left.data.get(key), right.get(key)) for key in failed ) ) def assertDirectoryEqual(self, left, right): # NoQA if not isinstance(left, Directory): - raise ValueError('%s is not a Directory' % left) + raise ValueError("%s is not a Directory" % left) if isinstance(right, Directory): right = right.get_data() - assert left.entries == right['entries'] - assert left.hash == right['id'] + assert left.entries == right["entries"] + assert left.hash == right["id"] assert left.to_model() == model.Directory.from_dict(right) def make_contents(self, directory): for filename, content in self.contents.items(): path = os.path.join(directory, filename) - with open(path, 'wb') as f: - f.write(content['data']) - os.chmod(path, content['mode']) + with open(path, "wb") as f: + f.write(content["data"]) + os.chmod(path, content["mode"]) def make_symlinks(self, directory): for filename, symlink in self.symlinks.items(): path = os.path.join(directory, filename) - os.symlink(symlink['data'], path) + os.symlink(symlink["data"], path) def make_specials(self, directory): for filename, fn in self.specials.items(): path = os.path.join(directory, filename) fn(path) def make_from_tarball(self, directory): - tarball = os.path.join(TEST_DATA, 'dir-folders', 'sample-folder.tgz') + tarball = os.path.join(TEST_DATA, "dir-folders", "sample-folder.tgz") - with tarfile.open(tarball, 'r:gz') as f: + with tarfile.open(tarball, "r:gz") as f: f.extractall(os.fsdecode(directory)) class TestContent(DataMixin, unittest.TestCase): def setUp(self): super().setUp() def test_data_to_content(self): for filename, content in self.contents.items(): - conv_content = Content.from_bytes(mode=content['mode'], - data=content['data']) + conv_content = Content.from_bytes( + mode=content["mode"], data=content["data"] + ) self.assertContentEqual(conv_content, content) self.assertIn(hash_to_hex(conv_content.hash), repr(conv_content)) class SymlinkToContent(DataMixin, unittest.TestCase): def setUp(self): super().setUp() self.make_symlinks(self.tmpdir_name) def test_symlink_to_content(self): for filename, symlink in self.symlinks.items(): path = os.path.join(self.tmpdir_name, filename) perms = 0o120000 conv_content = Content.from_symlink(path=path, mode=perms) self.assertContentEqual(conv_content, symlink) def test_symlink_to_base_model(self): for filename, symlink in self.symlinks.items(): path = os.path.join(self.tmpdir_name, filename) perms = 0o120000 - model_content = \ - Content.from_symlink(path=path, mode=perms).to_model() + model_content = Content.from_symlink(path=path, mode=perms).to_model() right = symlink.copy() - for key in ('perms', 'path', 'mode'): + for key in ("perms", "path", "mode"): right.pop(key, None) - right['status'] = 'visible' + right["status"] = "visible" assert model_content == model.Content.from_dict(right) class FileToContent(DataMixin, unittest.TestCase): def setUp(self): super().setUp() self.make_contents(self.tmpdir_name) self.make_symlinks(self.tmpdir_name) self.make_specials(self.tmpdir_name) def test_symlink_to_content(self): for filename, symlink in self.symlinks.items(): path = os.path.join(self.tmpdir_name, filename) conv_content = Content.from_file(path=path) self.assertContentEqual(conv_content, symlink) def test_file_to_content(self): for filename, content in self.contents.items(): path = os.path.join(self.tmpdir_name, filename) conv_content = Content.from_file(path=path) self.assertContentEqual(conv_content, content) def test_special_to_content(self): for filename in self.specials: path = os.path.join(self.tmpdir_name, filename) conv_content = Content.from_file(path=path) self.assertContentEqual(conv_content, self.empty_content) - for path in ['/dev/null', '/dev/zero']: + for path in ["/dev/null", "/dev/zero"]: path = os.path.join(self.tmpdir_name, filename) conv_content = Content.from_file(path=path) self.assertContentEqual(conv_content, self.empty_content) def test_symlink_to_content_model(self): for filename, symlink in self.symlinks.items(): path = os.path.join(self.tmpdir_name, filename) model_content = Content.from_file(path=path).to_model() right = symlink.copy() - for key in ('perms', 'path', 'mode'): + for key in ("perms", "path", "mode"): right.pop(key, None) - right['status'] = 'visible' + right["status"] = "visible" assert model_content == model.Content.from_dict(right) def test_file_to_content_model(self): for filename, content in self.contents.items(): path = os.path.join(self.tmpdir_name, filename) model_content = Content.from_file(path=path).to_model() right = content.copy() - for key in ('perms', 'mode'): + for key in ("perms", "mode"): right.pop(key, None) assert model_content.with_data() == model.Content.from_dict(right) - right['path'] = path - del right['data'] + right["path"] = path + del right["data"] assert model_content == DiskBackedContent.from_dict(right) def test_special_to_content_model(self): for filename in self.specials: path = os.path.join(self.tmpdir_name, filename) model_content = Content.from_file(path=path).to_model() right = self.empty_content.copy() - for key in ('perms', 'path', 'mode'): + for key in ("perms", "path", "mode"): right.pop(key, None) - right['status'] = 'visible' + right["status"] = "visible" assert model_content == model.Content.from_dict(right) - for path in ['/dev/null', '/dev/zero']: + for path in ["/dev/null", "/dev/zero"]: model_content = Content.from_file(path=path).to_model() right = self.empty_content.copy() - for key in ('perms', 'path', 'mode'): + for key in ("perms", "path", "mode"): right.pop(key, None) - right['status'] = 'visible' + right["status"] = "visible" assert model_content == model.Content.from_dict(right) def test_symlink_max_length(self): for max_content_length in [4, 10]: for filename, symlink in self.symlinks.items(): path = os.path.join(self.tmpdir_name, filename) content = Content.from_file(path=path) - if content.data['length'] > max_content_length: - with pytest.raises(Exception, match='too large'): + if content.data["length"] > max_content_length: + with pytest.raises(Exception, match="too large"): Content.from_file( - path=path, - max_content_length=max_content_length) + path=path, max_content_length=max_content_length + ) else: limited_content = Content.from_file( - path=path, - max_content_length=max_content_length) + path=path, max_content_length=max_content_length + ) assert content == limited_content def test_file_max_length(self): for max_content_length in [2, 4]: for filename, content in self.contents.items(): path = os.path.join(self.tmpdir_name, filename) content = Content.from_file(path=path) limited_content = Content.from_file( - path=path, - max_content_length=max_content_length) - assert content.data['length'] == limited_content.data['length'] - assert content.data['status'] == 'visible' - if content.data['length'] > max_content_length: - assert limited_content.data['status'] == 'absent' - assert limited_content.data['reason'] \ - == 'Content too large' + path=path, max_content_length=max_content_length + ) + assert content.data["length"] == limited_content.data["length"] + assert content.data["status"] == "visible" + if content.data["length"] > max_content_length: + assert limited_content.data["status"] == "absent" + assert limited_content.data["reason"] == "Content too large" else: - assert limited_content.data['status'] == 'visible' + assert limited_content.data["status"] == "visible" def test_special_file_max_length(self): for max_content_length in [None, 0, 1]: for filename in self.specials: path = os.path.join(self.tmpdir_name, filename) content = Content.from_file(path=path) limited_content = Content.from_file( - path=path, - max_content_length=max_content_length) + path=path, max_content_length=max_content_length + ) assert limited_content == content def test_file_to_content_with_path(self): for filename, content in self.contents.items(): content_w_path = content.copy() path = os.path.join(self.tmpdir_name, filename) - content_w_path['path'] = path + content_w_path["path"] = path conv_content = Content.from_file(path=path) - self.assertContentEqual(conv_content, content_w_path, - check_path=True) + self.assertContentEqual(conv_content, content_w_path, check_path=True) @pytest.mark.fs class DirectoryToObjects(DataMixin, unittest.TestCase): def setUp(self): super().setUp() - contents = os.path.join(self.tmpdir_name, b'contents') + contents = os.path.join(self.tmpdir_name, b"contents") os.mkdir(contents) self.make_contents(contents) - symlinks = os.path.join(self.tmpdir_name, b'symlinks') + symlinks = os.path.join(self.tmpdir_name, b"symlinks") os.mkdir(symlinks) self.make_symlinks(symlinks) - specials = os.path.join(self.tmpdir_name, b'specials') + specials = os.path.join(self.tmpdir_name, b"specials") os.mkdir(specials) self.make_specials(specials) - empties = os.path.join(self.tmpdir_name, b'empty1', b'empty2') + empties = os.path.join(self.tmpdir_name, b"empty1", b"empty2") os.makedirs(empties) def test_directory_to_objects(self): directory = Directory.from_disk(path=self.tmpdir_name) for name, value in self.contents.items(): - self.assertContentEqual(directory[b'contents/' + name], value) + self.assertContentEqual(directory[b"contents/" + name], value) for name, value in self.symlinks.items(): - self.assertContentEqual(directory[b'symlinks/' + name], value) + self.assertContentEqual(directory[b"symlinks/" + name], value) for name in self.specials: self.assertContentEqual( - directory[b'specials/' + name], - self.empty_content, + directory[b"specials/" + name], self.empty_content, ) self.assertEqual( - directory[b'empty1/empty2'].get_data(), - self.empty_directory, + directory[b"empty1/empty2"].get_data(), self.empty_directory, ) # Raise on non existent file with self.assertRaisesRegex(KeyError, "b'nonexistent'"): - directory[b'empty1/nonexistent'] + directory[b"empty1/nonexistent"] # Raise on non existent directory with self.assertRaisesRegex(KeyError, "b'nonexistentdir'"): - directory[b'nonexistentdir/file'] + directory[b"nonexistentdir/file"] objs = directory.collect() - self.assertCountEqual(['content', 'directory'], objs) + self.assertCountEqual(["content", "directory"], objs) - self.assertEqual(len(objs['directory']), 6) - self.assertEqual(len(objs['content']), - len(self.contents) - + len(self.symlinks) - + 1) + self.assertEqual(len(objs["directory"]), 6) + self.assertEqual( + len(objs["content"]), len(self.contents) + len(self.symlinks) + 1 + ) def test_directory_to_objects_ignore_empty(self): directory = Directory.from_disk( - path=self.tmpdir_name, - dir_filter=from_disk.ignore_empty_directories + path=self.tmpdir_name, dir_filter=from_disk.ignore_empty_directories ) for name, value in self.contents.items(): - self.assertContentEqual(directory[b'contents/' + name], value) + self.assertContentEqual(directory[b"contents/" + name], value) for name, value in self.symlinks.items(): - self.assertContentEqual(directory[b'symlinks/' + name], value) + self.assertContentEqual(directory[b"symlinks/" + name], value) for name in self.specials: self.assertContentEqual( - directory[b'specials/' + name], - self.empty_content, + directory[b"specials/" + name], self.empty_content, ) # empty directories have been ignored recursively with self.assertRaisesRegex(KeyError, "b'empty1'"): - directory[b'empty1'] + directory[b"empty1"] with self.assertRaisesRegex(KeyError, "b'empty1'"): - directory[b'empty1/empty2'] + directory[b"empty1/empty2"] objs = directory.collect() - self.assertCountEqual(['content', 'directory'], objs) + self.assertCountEqual(["content", "directory"], objs) - self.assertEqual(len(objs['directory']), 4) - self.assertEqual(len(objs['content']), - len(self.contents) - + len(self.symlinks) - + 1) + self.assertEqual(len(objs["directory"]), 4) + self.assertEqual( + len(objs["content"]), len(self.contents) + len(self.symlinks) + 1 + ) def test_directory_to_objects_ignore_name(self): directory = Directory.from_disk( path=self.tmpdir_name, - dir_filter=from_disk.ignore_named_directories([b'symlinks']) + dir_filter=from_disk.ignore_named_directories([b"symlinks"]), ) for name, value in self.contents.items(): - self.assertContentEqual(directory[b'contents/' + name], value) + self.assertContentEqual(directory[b"contents/" + name], value) for name in self.specials: self.assertContentEqual( - directory[b'specials/' + name], - self.empty_content, + directory[b"specials/" + name], self.empty_content, ) self.assertEqual( - directory[b'empty1/empty2'].get_data(), - self.empty_directory, + directory[b"empty1/empty2"].get_data(), self.empty_directory, ) with self.assertRaisesRegex(KeyError, "b'symlinks'"): - directory[b'symlinks'] + directory[b"symlinks"] objs = directory.collect() - self.assertCountEqual(['content', 'directory'], objs) + self.assertCountEqual(["content", "directory"], objs) - self.assertEqual(len(objs['directory']), 5) - self.assertEqual(len(objs['content']), - len(self.contents) - + 1) + self.assertEqual(len(objs["directory"]), 5) + self.assertEqual(len(objs["content"]), len(self.contents) + 1) def test_directory_to_objects_ignore_name_case(self): directory = Directory.from_disk( path=self.tmpdir_name, - dir_filter=from_disk.ignore_named_directories([b'symLiNks'], - case_sensitive=False) + dir_filter=from_disk.ignore_named_directories( + [b"symLiNks"], case_sensitive=False + ), ) for name, value in self.contents.items(): - self.assertContentEqual(directory[b'contents/' + name], value) + self.assertContentEqual(directory[b"contents/" + name], value) for name in self.specials: self.assertContentEqual( - directory[b'specials/' + name], - self.empty_content, + directory[b"specials/" + name], self.empty_content, ) self.assertEqual( - directory[b'empty1/empty2'].get_data(), - self.empty_directory, + directory[b"empty1/empty2"].get_data(), self.empty_directory, ) with self.assertRaisesRegex(KeyError, "b'symlinks'"): - directory[b'symlinks'] + directory[b"symlinks"] objs = directory.collect() - self.assertCountEqual(['content', 'directory'], objs) + self.assertCountEqual(["content", "directory"], objs) - self.assertEqual(len(objs['directory']), 5) - self.assertEqual(len(objs['content']), - len(self.contents) - + 1) + self.assertEqual(len(objs["directory"]), 5) + self.assertEqual(len(objs["content"]), len(self.contents) + 1) def test_directory_entry_order(self): with tempfile.TemporaryDirectory() as dirname: dirname = os.fsencode(dirname) - open(os.path.join(dirname, b'foo.'), 'a') - open(os.path.join(dirname, b'foo0'), 'a') - os.mkdir(os.path.join(dirname, b'foo')) + open(os.path.join(dirname, b"foo."), "a") + open(os.path.join(dirname, b"foo0"), "a") + os.mkdir(os.path.join(dirname, b"foo")) directory = Directory.from_disk(path=dirname) - assert [entry['name'] for entry in directory.entries] \ - == [b'foo.', b'foo', b'foo0'] + assert [entry["name"] for entry in directory.entries] == [ + b"foo.", + b"foo", + b"foo0", + ] @pytest.mark.fs class TarballTest(DataMixin, unittest.TestCase): def setUp(self): super().setUp() self.make_from_tarball(self.tmpdir_name) def test_contents_match(self): directory = Directory.from_disk( - path=os.path.join(self.tmpdir_name, b'sample-folder') + path=os.path.join(self.tmpdir_name, b"sample-folder") ) for name, expected in self.tarball_contents.items(): obj = directory[name] if isinstance(obj, Content): self.assertContentEqual(obj, expected) elif isinstance(obj, Directory): self.assertDirectoryEqual(obj, expected) else: - raise self.failureException('Unknown type for %s' % obj) + raise self.failureException("Unknown type for %s" % obj) class DirectoryManipulation(DataMixin, unittest.TestCase): def test_directory_access_nested(self): d = Directory() - d[b'a'] = Directory() - d[b'a/b'] = Directory() + d[b"a"] = Directory() + d[b"a/b"] = Directory() - self.assertEqual(d[b'a/b'].get_data(), self.empty_directory) + self.assertEqual(d[b"a/b"].get_data(), self.empty_directory) def test_directory_del_nested(self): d = Directory() - d[b'a'] = Directory() - d[b'a/b'] = Directory() + d[b"a"] = Directory() + d[b"a/b"] = Directory() with self.assertRaisesRegex(KeyError, "b'c'"): - del d[b'a/b/c'] + del d[b"a/b/c"] with self.assertRaisesRegex(KeyError, "b'level2'"): - del d[b'a/level2/c'] + del d[b"a/level2/c"] - del d[b'a/b'] + del d[b"a/b"] - self.assertEqual(d[b'a'].get_data(), self.empty_directory) + self.assertEqual(d[b"a"].get_data(), self.empty_directory) def test_directory_access_self(self): d = Directory() - self.assertIs(d, d[b'']) - self.assertIs(d, d[b'/']) - self.assertIs(d, d[b'//']) + self.assertIs(d, d[b""]) + self.assertIs(d, d[b"/"]) + self.assertIs(d, d[b"//"]) def test_directory_access_wrong_type(self): d = Directory() - with self.assertRaisesRegex(ValueError, 'bytes from Directory'): - d['foo'] - with self.assertRaisesRegex(ValueError, 'bytes from Directory'): + with self.assertRaisesRegex(ValueError, "bytes from Directory"): + d["foo"] + with self.assertRaisesRegex(ValueError, "bytes from Directory"): d[42] def test_directory_repr(self): - entries = [b'a', b'b', b'c'] + entries = [b"a", b"b", b"c"] d = Directory() for entry in entries: d[entry] = Directory() r = repr(d) self.assertIn(hash_to_hex(d.hash), r) for entry in entries: self.assertIn(str(entry), r) def test_directory_set_wrong_type_name(self): d = Directory() - with self.assertRaisesRegex(ValueError, 'bytes Directory entry'): - d['foo'] = Directory() - with self.assertRaisesRegex(ValueError, 'bytes Directory entry'): + with self.assertRaisesRegex(ValueError, "bytes Directory entry"): + d["foo"] = Directory() + with self.assertRaisesRegex(ValueError, "bytes Directory entry"): d[42] = Directory() def test_directory_set_nul_in_name(self): d = Directory() - with self.assertRaisesRegex(ValueError, 'nul bytes'): - d[b'\x00\x01'] = Directory() + with self.assertRaisesRegex(ValueError, "nul bytes"): + d[b"\x00\x01"] = Directory() def test_directory_set_empty_name(self): d = Directory() - with self.assertRaisesRegex(ValueError, 'must have a name'): - d[b''] = Directory() - with self.assertRaisesRegex(ValueError, 'must have a name'): - d[b'/'] = Directory() + with self.assertRaisesRegex(ValueError, "must have a name"): + d[b""] = Directory() + with self.assertRaisesRegex(ValueError, "must have a name"): + d[b"/"] = Directory() def test_directory_set_wrong_type(self): d = Directory() - with self.assertRaisesRegex(ValueError, 'Content or Directory'): - d[b'entry'] = object() + with self.assertRaisesRegex(ValueError, "Content or Directory"): + d[b"entry"] = object() def test_directory_del_wrong_type(self): d = Directory() - with self.assertRaisesRegex(ValueError, 'bytes Directory entry'): - del d['foo'] - with self.assertRaisesRegex(ValueError, 'bytes Directory entry'): + with self.assertRaisesRegex(ValueError, "bytes Directory entry"): + del d["foo"] + with self.assertRaisesRegex(ValueError, "bytes Directory entry"): del d[42] diff --git a/swh/model/tests/test_generate_testdata.py b/swh/model/tests/test_generate_testdata.py index 56fff65..aa9c8af 100644 --- a/swh/model/tests/test_generate_testdata.py +++ b/swh/model/tests/test_generate_testdata.py @@ -1,54 +1,54 @@ # Copyright (C) 2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from .generate_testdata import gen_contents, gen_origins, ORIGINS from swh.model.model import Origin, BaseContent def test_gen_origins_empty(): origins = gen_origins(0) assert not origins def test_gen_origins_one(): origins = gen_origins(1) assert len(origins) == 1 assert [Origin.from_dict(d) for d in origins] def test_gen_origins_default(): origins = gen_origins() assert len(origins) == 100 models = [Origin.from_dict(d).url for d in origins] assert len(origins) == len(set(models)) def test_gen_origins_max(): nmax = len(ORIGINS) - origins = gen_origins(nmax+1) + origins = gen_origins(nmax + 1) assert len(origins) == nmax models = {Origin.from_dict(d).url for d in origins} # ensure we did not generate the same origin twice assert len(origins) == len(models) def test_gen_contents_empty(): contents = gen_contents(0) assert not contents def test_gen_contents_one(): contents = gen_contents(1) assert len(contents) == 1 assert [BaseContent.from_dict(d) for d in contents] def test_gen_contents_default(): contents = gen_contents() assert len(contents) == 20 models = {BaseContent.from_dict(d) for d in contents} # ensure we did not generate the same content twice assert len(contents) == len(models) diff --git a/swh/model/tests/test_hashutil.py b/swh/model/tests/test_hashutil.py index abdff97..ff99cf2 100644 --- a/swh/model/tests/test_hashutil.py +++ b/swh/model/tests/test_hashutil.py @@ -1,334 +1,338 @@ # Copyright (C) 2015-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import hashlib import io import os import tempfile import unittest from unittest.mock import patch from swh.model import hashutil from swh.model.hashutil import MultiHash class BaseHashutil(unittest.TestCase): def setUp(self): # Reset function cache hashutil._blake2_hash_cache = {} - self.data = b'1984\n' + self.data = b"1984\n" self.hex_checksums = { - 'sha1': '62be35bf00ff0c624f4a621e2ea5595a049e0731', - 'sha1_git': '568aaf43d83b2c3df8067f3bedbb97d83260be6d', - 'sha256': '26602113b4b9afd9d55466b08580d3c2' - '4a9b50ee5b5866c0d91fab0e65907311', - 'blake2s256': '63cfb259e1fdb485bc5c55749697a6b21ef31fb7445f6c78a' - 'c9422f9f2dc8906', + "sha1": "62be35bf00ff0c624f4a621e2ea5595a049e0731", + "sha1_git": "568aaf43d83b2c3df8067f3bedbb97d83260be6d", + "sha256": "26602113b4b9afd9d55466b08580d3c2" + "4a9b50ee5b5866c0d91fab0e65907311", + "blake2s256": "63cfb259e1fdb485bc5c55749697a6b21ef31fb7445f6c78a" + "c9422f9f2dc8906", } self.checksums = { - type: bytes.fromhex(cksum) - for type, cksum in self.hex_checksums.items() + type: bytes.fromhex(cksum) for type, cksum in self.hex_checksums.items() } self.bytehex_checksums = { type: hashutil.hash_to_bytehex(cksum) for type, cksum in self.checksums.items() } self.git_hex_checksums = { - 'blob': self.hex_checksums['sha1_git'], - 'tree': '5b2e883aa33d2efab98442693ea4dd5f1b8871b0', - 'commit': '79e4093542e72f0fcb7cbd75cb7d270f9254aa8f', - 'tag': 'd6bf62466f287b4d986c545890716ce058bddf67', + "blob": self.hex_checksums["sha1_git"], + "tree": "5b2e883aa33d2efab98442693ea4dd5f1b8871b0", + "commit": "79e4093542e72f0fcb7cbd75cb7d270f9254aa8f", + "tag": "d6bf62466f287b4d986c545890716ce058bddf67", } self.git_checksums = { - type: bytes.fromhex(cksum) - for type, cksum in self.git_hex_checksums.items() + type: bytes.fromhex(cksum) for type, cksum in self.git_hex_checksums.items() } class MultiHashTest(BaseHashutil): def test_multi_hash_data(self): checksums = MultiHash.from_data(self.data).digest() self.assertEqual(checksums, self.checksums) - self.assertFalse('length' in checksums) + self.assertFalse("length" in checksums) def test_multi_hash_data_with_length(self): expected_checksums = self.checksums.copy() - expected_checksums['length'] = len(self.data) + expected_checksums["length"] = len(self.data) - algos = set(['length']).union(hashutil.DEFAULT_ALGORITHMS) + algos = set(["length"]).union(hashutil.DEFAULT_ALGORITHMS) checksums = MultiHash.from_data(self.data, hash_names=algos).digest() self.assertEqual(checksums, expected_checksums) - self.assertTrue('length' in checksums) + self.assertTrue("length" in checksums) def test_multi_hash_data_unknown_hash(self): with self.assertRaises(ValueError) as cm: - MultiHash.from_data(self.data, ['unknown-hash']) + MultiHash.from_data(self.data, ["unknown-hash"]) - self.assertIn('Unexpected hashing algorithm', cm.exception.args[0]) - self.assertIn('unknown-hash', cm.exception.args[0]) + self.assertIn("Unexpected hashing algorithm", cm.exception.args[0]) + self.assertIn("unknown-hash", cm.exception.args[0]) def test_multi_hash_file(self): fobj = io.BytesIO(self.data) checksums = MultiHash.from_file(fobj, length=len(self.data)).digest() self.assertEqual(checksums, self.checksums) def test_multi_hash_file_hexdigest(self): fobj = io.BytesIO(self.data) length = len(self.data) checksums = MultiHash.from_file(fobj, length=length).hexdigest() self.assertEqual(checksums, self.hex_checksums) def test_multi_hash_file_bytehexdigest(self): fobj = io.BytesIO(self.data) length = len(self.data) checksums = MultiHash.from_file(fobj, length=length).bytehexdigest() self.assertEqual(checksums, self.bytehex_checksums) def test_multi_hash_file_missing_length(self): fobj = io.BytesIO(self.data) with self.assertRaises(ValueError) as cm: - MultiHash.from_file(fobj, hash_names=['sha1_git']) + MultiHash.from_file(fobj, hash_names=["sha1_git"]) - self.assertIn('Missing length', cm.exception.args[0]) + self.assertIn("Missing length", cm.exception.args[0]) def test_multi_hash_path(self): with tempfile.NamedTemporaryFile(delete=False) as f: f.write(self.data) hashes = MultiHash.from_path(f.name).digest() os.remove(f.name) self.assertEqual(self.checksums, hashes) class Hashutil(BaseHashutil): - def test_hash_git_data(self): checksums = { git_type: hashutil.hash_git_data(self.data, git_type) for git_type in self.git_checksums } self.assertEqual(checksums, self.git_checksums) def test_hash_git_data_unknown_git_type(self): with self.assertRaises(ValueError) as cm: - hashutil.hash_git_data(self.data, 'unknown-git-type') + hashutil.hash_git_data(self.data, "unknown-git-type") - self.assertIn('Unexpected git object type', cm.exception.args[0]) - self.assertIn('unknown-git-type', cm.exception.args[0]) + self.assertIn("Unexpected git object type", cm.exception.args[0]) + self.assertIn("unknown-git-type", cm.exception.args[0]) def test_hash_to_hex(self): for type in self.checksums: hex = self.hex_checksums[type] hash = self.checksums[type] self.assertEqual(hashutil.hash_to_hex(hex), hex) self.assertEqual(hashutil.hash_to_hex(hash), hex) def test_hash_to_bytes(self): for type in self.checksums: hex = self.hex_checksums[type] hash = self.checksums[type] self.assertEqual(hashutil.hash_to_bytes(hex), hash) self.assertEqual(hashutil.hash_to_bytes(hash), hash) def test_hash_to_bytehex(self): for algo in self.checksums: - self.assertEqual(self.hex_checksums[algo].encode('ascii'), - hashutil.hash_to_bytehex(self.checksums[algo])) + self.assertEqual( + self.hex_checksums[algo].encode("ascii"), + hashutil.hash_to_bytehex(self.checksums[algo]), + ) def test_bytehex_to_hash(self): for algo in self.checksums: - self.assertEqual(self.checksums[algo], - hashutil.bytehex_to_hash( - self.hex_checksums[algo].encode())) + self.assertEqual( + self.checksums[algo], + hashutil.bytehex_to_hash(self.hex_checksums[algo].encode()), + ) def test_new_hash_unsupported_hashing_algorithm(self): try: - hashutil._new_hash('blake2:10') + hashutil._new_hash("blake2:10") except ValueError as e: - self.assertEqual(str(e), - 'Unexpected hashing algorithm blake2:10, ' - 'expected one of blake2b512, blake2s256, ' - 'sha1, sha1_git, sha256') - - @patch('hashlib.new') + self.assertEqual( + str(e), + "Unexpected hashing algorithm blake2:10, " + "expected one of blake2b512, blake2s256, " + "sha1, sha1_git, sha256", + ) + + @patch("hashlib.new") def test_new_hash_blake2b_blake2b512_builtin(self, mock_hashlib_new): - if 'blake2b512' not in hashlib.algorithms_available: - self.skipTest('blake2b512 not built-in') + if "blake2b512" not in hashlib.algorithms_available: + self.skipTest("blake2b512 not built-in") mock_hashlib_new.return_value = sentinel = object() - h = hashutil._new_hash('blake2b512') + h = hashutil._new_hash("blake2b512") self.assertIs(h, sentinel) - mock_hashlib_new.assert_called_with('blake2b512') + mock_hashlib_new.assert_called_with("blake2b512") - @patch('hashlib.new') + @patch("hashlib.new") def test_new_hash_blake2s_blake2s256_builtin(self, mock_hashlib_new): - if 'blake2s256' not in hashlib.algorithms_available: - self.skipTest('blake2s256 not built-in') + if "blake2s256" not in hashlib.algorithms_available: + self.skipTest("blake2s256 not built-in") mock_hashlib_new.return_value = sentinel = object() - h = hashutil._new_hash('blake2s256') + h = hashutil._new_hash("blake2s256") self.assertIs(h, sentinel) - mock_hashlib_new.assert_called_with('blake2s256') + mock_hashlib_new.assert_called_with("blake2s256") def test_new_hash_blake2b_builtin(self): removed_hash = False try: - if 'blake2b512' in hashlib.algorithms_available: + if "blake2b512" in hashlib.algorithms_available: removed_hash = True - hashlib.algorithms_available.remove('blake2b512') - if 'blake2b' not in hashlib.algorithms_available: - self.skipTest('blake2b not built in') + hashlib.algorithms_available.remove("blake2b512") + if "blake2b" not in hashlib.algorithms_available: + self.skipTest("blake2b not built in") - with patch('hashlib.blake2b') as mock_blake2b: + with patch("hashlib.blake2b") as mock_blake2b: mock_blake2b.return_value = sentinel = object() - h = hashutil._new_hash('blake2b512') + h = hashutil._new_hash("blake2b512") self.assertIs(h, sentinel) - mock_blake2b.assert_called_with(digest_size=512//8) + mock_blake2b.assert_called_with(digest_size=512 // 8) finally: if removed_hash: - hashlib.algorithms_available.add('blake2b512') + hashlib.algorithms_available.add("blake2b512") def test_new_hash_blake2s_builtin(self): removed_hash = False try: - if 'blake2s256' in hashlib.algorithms_available: + if "blake2s256" in hashlib.algorithms_available: removed_hash = True - hashlib.algorithms_available.remove('blake2s256') - if 'blake2s' not in hashlib.algorithms_available: - self.skipTest('blake2s not built in') + hashlib.algorithms_available.remove("blake2s256") + if "blake2s" not in hashlib.algorithms_available: + self.skipTest("blake2s not built in") - with patch('hashlib.blake2s') as mock_blake2s: + with patch("hashlib.blake2s") as mock_blake2s: mock_blake2s.return_value = sentinel = object() - h = hashutil._new_hash('blake2s256') + h = hashutil._new_hash("blake2s256") self.assertIs(h, sentinel) - mock_blake2s.assert_called_with(digest_size=256//8) + mock_blake2s.assert_called_with(digest_size=256 // 8) finally: if removed_hash: - hashlib.algorithms_available.add('blake2s256') + hashlib.algorithms_available.add("blake2s256") def test_new_hash_blake2b_pyblake2(self): - if 'blake2b512' in hashlib.algorithms_available: - self.skipTest('blake2b512 built in') - if 'blake2b' in hashlib.algorithms_available: - self.skipTest('blake2b built in') + if "blake2b512" in hashlib.algorithms_available: + self.skipTest("blake2b512 built in") + if "blake2b" in hashlib.algorithms_available: + self.skipTest("blake2b built in") - with patch('pyblake2.blake2b') as mock_blake2b: + with patch("pyblake2.blake2b") as mock_blake2b: mock_blake2b.return_value = sentinel = object() - h = hashutil._new_hash('blake2b512') + h = hashutil._new_hash("blake2b512") self.assertIs(h, sentinel) - mock_blake2b.assert_called_with(digest_size=512//8) + mock_blake2b.assert_called_with(digest_size=512 // 8) def test_new_hash_blake2s_pyblake2(self): - if 'blake2s256' in hashlib.algorithms_available: - self.skipTest('blake2s256 built in') - if 'blake2s' in hashlib.algorithms_available: - self.skipTest('blake2s built in') + if "blake2s256" in hashlib.algorithms_available: + self.skipTest("blake2s256 built in") + if "blake2s" in hashlib.algorithms_available: + self.skipTest("blake2s built in") - with patch('pyblake2.blake2s') as mock_blake2s: + with patch("pyblake2.blake2s") as mock_blake2s: mock_blake2s.return_value = sentinel = object() - h = hashutil._new_hash('blake2s256') + h = hashutil._new_hash("blake2s256") self.assertIs(h, sentinel) - mock_blake2s.assert_called_with(digest_size=256//8) + mock_blake2s.assert_called_with(digest_size=256 // 8) class HashlibGit(unittest.TestCase): - def setUp(self): - self.blob_data = b'42\n' - - self.tree_data = b''.join([b'40000 barfoo\0', - bytes.fromhex('c3020f6bf135a38c6df' - '3afeb5fb38232c5e07087'), - b'100644 blah\0', - bytes.fromhex('63756ef0df5e4f10b6efa' - '33cfe5c758749615f20'), - b'100644 hello\0', - bytes.fromhex('907b308167f0880fb2a' - '5c0e1614bb0c7620f9dc3')]) - - self.commit_data = """tree 1c61f7259dcb770f46b194d941df4f08ff0a3970 + self.blob_data = b"42\n" + + self.tree_data = b"".join( + [ + b"40000 barfoo\0", + bytes.fromhex("c3020f6bf135a38c6df" "3afeb5fb38232c5e07087"), + b"100644 blah\0", + bytes.fromhex("63756ef0df5e4f10b6efa" "33cfe5c758749615f20"), + b"100644 hello\0", + bytes.fromhex("907b308167f0880fb2a" "5c0e1614bb0c7620f9dc3"), + ] + ) + + self.commit_data = b"""\ +tree 1c61f7259dcb770f46b194d941df4f08ff0a3970 author Antoine R. Dumont (@ardumont) 1444054085 +0200 committer Antoine R. Dumont (@ardumont) 1444054085 +0200 initial -""".encode('utf-8') # NOQA +""" # noqa self.tag_data = """object 24d012aaec0bc5a4d2f62c56399053d6cc72a241 type commit tag 0.0.1 tagger Antoine R. Dumont (@ardumont) 1444225145 +0200 blah -""".encode('utf-8') # NOQA +""".encode( + "utf-8" + ) # NOQA self.checksums = { - 'blob_sha1_git': bytes.fromhex('d81cc0710eb6cf9efd5b920a8453e1' - 'e07157b6cd'), - 'tree_sha1_git': bytes.fromhex('ac212302c45eada382b27bfda795db' - '121dacdb1c'), - 'commit_sha1_git': bytes.fromhex('e960570b2e6e2798fa4cfb9af2c399' - 'd629189653'), - 'tag_sha1_git': bytes.fromhex('bc2b99ba469987bcf1272c189ed534' - 'e9e959f120'), + "blob_sha1_git": bytes.fromhex( + "d81cc0710eb6cf9efd5b920a8453e1" "e07157b6cd" + ), + "tree_sha1_git": bytes.fromhex( + "ac212302c45eada382b27bfda795db" "121dacdb1c" + ), + "commit_sha1_git": bytes.fromhex( + "e960570b2e6e2798fa4cfb9af2c399" "d629189653" + ), + "tag_sha1_git": bytes.fromhex( + "bc2b99ba469987bcf1272c189ed534" "e9e959f120" + ), } def test_unknown_header_type(self): with self.assertRaises(ValueError) as cm: - hashutil.hash_git_data(b'any-data', 'some-unknown-type') + hashutil.hash_git_data(b"any-data", "some-unknown-type") - self.assertIn('Unexpected git object type', cm.exception.args[0]) + self.assertIn("Unexpected git object type", cm.exception.args[0]) def test_hashdata_content(self): # when - actual_hash = hashutil.hash_git_data(self.blob_data, git_type='blob') + actual_hash = hashutil.hash_git_data(self.blob_data, git_type="blob") # then - self.assertEqual(actual_hash, - self.checksums['blob_sha1_git']) + self.assertEqual(actual_hash, self.checksums["blob_sha1_git"]) def test_hashdata_tree(self): # when - actual_hash = hashutil.hash_git_data(self.tree_data, git_type='tree') + actual_hash = hashutil.hash_git_data(self.tree_data, git_type="tree") # then - self.assertEqual(actual_hash, - self.checksums['tree_sha1_git']) + self.assertEqual(actual_hash, self.checksums["tree_sha1_git"]) def test_hashdata_revision(self): # when - actual_hash = hashutil.hash_git_data(self.commit_data, - git_type='commit') + actual_hash = hashutil.hash_git_data(self.commit_data, git_type="commit") # then - self.assertEqual(actual_hash, - self.checksums['commit_sha1_git']) + self.assertEqual(actual_hash, self.checksums["commit_sha1_git"]) def test_hashdata_tag(self): # when - actual_hash = hashutil.hash_git_data(self.tag_data, git_type='tag') + actual_hash = hashutil.hash_git_data(self.tag_data, git_type="tag") # then - self.assertEqual(actual_hash, - self.checksums['tag_sha1_git']) + self.assertEqual(actual_hash, self.checksums["tag_sha1_git"]) diff --git a/swh/model/tests/test_hypothesis_strategies.py b/swh/model/tests/test_hypothesis_strategies.py index 7601184..b790f9a 100644 --- a/swh/model/tests/test_hypothesis_strategies.py +++ b/swh/model/tests/test_hypothesis_strategies.py @@ -1,124 +1,122 @@ # Copyright (C) 2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime import attr from hypothesis import given, settings from swh.model.hashutil import DEFAULT_ALGORITHMS -from swh.model.hypothesis_strategies import ( - objects, object_dicts, snapshots -) +from swh.model.hypothesis_strategies import objects, object_dicts, snapshots from swh.model.model import TargetType -target_types = ( - 'content', 'directory', 'revision', 'release', 'snapshot', 'alias') +target_types = ("content", "directory", "revision", "release", "snapshot", "alias") @given(objects()) def test_generation(obj_type_and_obj): (obj_type, object_) = obj_type_and_obj attr.validate(object_) def assert_nested_dict(obj): """Tests the object is a nested dict and contains no more class from swh.model.model.""" if isinstance(obj, dict): for (key, value) in obj.items(): assert isinstance(key, (str, bytes)), key assert_nested_dict(value) elif isinstance(obj, list): for value in obj: assert_nested_dict(value) - elif isinstance(obj, (int, float, str, bytes, bool, type(None), - datetime.datetime)): + elif isinstance(obj, (int, float, str, bytes, bool, type(None), datetime.datetime)): pass else: assert False, obj @given(object_dicts()) def test_dicts_generation(obj_type_and_obj): (obj_type, object_) = obj_type_and_obj assert_nested_dict(object_) - if obj_type == 'content': - COMMON_KEYS = set(DEFAULT_ALGORITHMS) | {'length', 'status', 'ctime'} - if object_['status'] == 'visible': - assert set(object_) <= COMMON_KEYS | {'data'} - elif object_['status'] == 'absent': - assert set(object_) == COMMON_KEYS | {'reason'} - elif object_['status'] == 'hidden': - assert set(object_) <= COMMON_KEYS | {'data'} + if obj_type == "content": + COMMON_KEYS = set(DEFAULT_ALGORITHMS) | {"length", "status", "ctime"} + if object_["status"] == "visible": + assert set(object_) <= COMMON_KEYS | {"data"} + elif object_["status"] == "absent": + assert set(object_) == COMMON_KEYS | {"reason"} + elif object_["status"] == "hidden": + assert set(object_) <= COMMON_KEYS | {"data"} else: assert False, object_ - elif obj_type == 'release': - assert object_['target_type'] in target_types - elif obj_type == 'snapshot': - for branch in object_['branches'].values(): - assert branch is None or branch['target_type'] in target_types + elif obj_type == "release": + assert object_["target_type"] in target_types + elif obj_type == "snapshot": + for branch in object_["branches"].values(): + assert branch is None or branch["target_type"] in target_types @given(objects()) def test_model_to_dicts(obj_type_and_obj): (obj_type, object_) = obj_type_and_obj obj_dict = object_.to_dict() assert_nested_dict(obj_dict) - if obj_type == 'content': - COMMON_KEYS = set(DEFAULT_ALGORITHMS) | {'length', 'status', 'ctime'} - if obj_dict['status'] == 'visible': - assert set(obj_dict) == COMMON_KEYS | {'data'} - elif obj_dict['status'] == 'absent': - assert set(obj_dict) == COMMON_KEYS | {'reason'} - elif obj_dict['status'] == 'hidden': - assert set(obj_dict) == COMMON_KEYS | {'data'} + if obj_type == "content": + COMMON_KEYS = set(DEFAULT_ALGORITHMS) | {"length", "status", "ctime"} + if obj_dict["status"] == "visible": + assert set(obj_dict) == COMMON_KEYS | {"data"} + elif obj_dict["status"] == "absent": + assert set(obj_dict) == COMMON_KEYS | {"reason"} + elif obj_dict["status"] == "hidden": + assert set(obj_dict) == COMMON_KEYS | {"data"} else: assert False, obj_dict - elif obj_type == 'release': - assert obj_dict['target_type'] in target_types - elif obj_type == 'snapshot': - for branch in obj_dict['branches'].values(): - assert branch is None or branch['target_type'] in target_types + elif obj_type == "release": + assert obj_dict["target_type"] in target_types + elif obj_type == "snapshot": + for branch in obj_dict["branches"].values(): + assert branch is None or branch["target_type"] in target_types _min_snp_size = 10 _max_snp_size = 100 @given(snapshots(min_size=_min_snp_size, max_size=_max_snp_size)) @settings(max_examples=1) def test_snapshots_strategy(snapshot): branches = snapshot.branches assert len(branches) >= _min_snp_size assert len(branches) <= _max_snp_size aliases = [] # check snapshot integrity for name, branch in branches.items(): assert branch is None or branch.target_type.value in target_types if branch is not None and branch.target_type == TargetType.ALIAS: aliases.append(name) assert branch.target in branches # check no cycles between aliases for alias in aliases: processed_alias = set() current_alias = alias - while (branches[current_alias] is not None - and branches[current_alias].target_type == TargetType.ALIAS): + while ( + branches[current_alias] is not None + and branches[current_alias].target_type == TargetType.ALIAS + ): assert branches[current_alias].target not in processed_alias processed_alias.add(current_alias) current_alias = branches[current_alias].target @given(snapshots(min_size=_min_snp_size, max_size=_min_snp_size)) @settings(max_examples=1) def test_snapshots_strategy_fixed_size(snapshot): assert len(snapshot.branches) == _min_snp_size diff --git a/swh/model/tests/test_identifiers.py b/swh/model/tests/test_identifiers.py index 8c6c70d..da0e2aa 100644 --- a/swh/model/tests/test_identifiers.py +++ b/swh/model/tests/test_identifiers.py @@ -1,964 +1,965 @@ # Copyright (C) 2015-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import binascii import datetime import unittest from swh.model import hashutil, identifiers from swh.model.exceptions import ValidationError from swh.model.hashutil import hash_to_bytes as _x -from swh.model.identifiers import (CONTENT, DIRECTORY, - RELEASE, REVISION, - SNAPSHOT, PersistentId) +from swh.model.identifiers import ( + CONTENT, + DIRECTORY, + RELEASE, + REVISION, + SNAPSHOT, + PersistentId, +) class UtilityFunctionsIdentifier(unittest.TestCase): def setUp(self): - self.str_id = 'c2e41aae41ac17bd4a650770d6ee77f62e52235b' + self.str_id = "c2e41aae41ac17bd4a650770d6ee77f62e52235b" self.bytes_id = binascii.unhexlify(self.str_id) self.bad_type_id = object() def test_identifier_to_bytes(self): for id in [self.str_id, self.bytes_id]: - self.assertEqual(identifiers.identifier_to_bytes(id), - self.bytes_id) + self.assertEqual(identifiers.identifier_to_bytes(id), self.bytes_id) # wrong length with self.assertRaises(ValueError) as cm: identifiers.identifier_to_bytes(id[:-2]) - self.assertIn('length', str(cm.exception)) + self.assertIn("length", str(cm.exception)) with self.assertRaises(ValueError) as cm: identifiers.identifier_to_bytes(self.bad_type_id) - self.assertIn('type', str(cm.exception)) + self.assertIn("type", str(cm.exception)) def test_identifier_to_str(self): for id in [self.str_id, self.bytes_id]: - self.assertEqual(identifiers.identifier_to_str(id), - self.str_id) + self.assertEqual(identifiers.identifier_to_str(id), self.str_id) # wrong length with self.assertRaises(ValueError) as cm: identifiers.identifier_to_str(id[:-2]) - self.assertIn('length', str(cm.exception)) + self.assertIn("length", str(cm.exception)) with self.assertRaises(ValueError) as cm: identifiers.identifier_to_str(self.bad_type_id) - self.assertIn('type', str(cm.exception)) + self.assertIn("type", str(cm.exception)) class UtilityFunctionsDateOffset(unittest.TestCase): def setUp(self): self.dates = { - b'1448210036': { - 'seconds': 1448210036, - 'microseconds': 0, - }, - b'1448210036.002342': { - 'seconds': 1448210036, - 'microseconds': 2342, - }, - b'1448210036.12': { - 'seconds': 1448210036, - 'microseconds': 120000, - } + b"1448210036": {"seconds": 1448210036, "microseconds": 0,}, + b"1448210036.002342": {"seconds": 1448210036, "microseconds": 2342,}, + b"1448210036.12": {"seconds": 1448210036, "microseconds": 120000,}, } self.broken_dates = [ 1448210036.12, ] self.offsets = { - 0: b'+0000', - -630: b'-1030', - 800: b'+1320', + 0: b"+0000", + -630: b"-1030", + 800: b"+1320", } def test_format_date(self): for date_repr, date in self.dates.items(): self.assertEqual(identifiers.format_date(date), date_repr) def test_format_date_fail(self): for date in self.broken_dates: with self.assertRaises(ValueError): identifiers.format_date(date) def test_format_offset(self): for offset, res in self.offsets.items(): self.assertEqual(identifiers.format_offset(offset), res) class ContentIdentifier(unittest.TestCase): def setUp(self): self.content = { - 'status': 'visible', - 'length': 5, - 'data': b'1984\n', - 'ctime': datetime.datetime(2015, 11, 22, 16, 33, 56, - tzinfo=datetime.timezone.utc), + "status": "visible", + "length": 5, + "data": b"1984\n", + "ctime": datetime.datetime( + 2015, 11, 22, 16, 33, 56, tzinfo=datetime.timezone.utc + ), } - self.content_id = hashutil.MultiHash.from_data( - self.content['data']).digest() + self.content_id = hashutil.MultiHash.from_data(self.content["data"]).digest() def test_content_identifier(self): - self.assertEqual(identifiers.content_identifier(self.content), - self.content_id) + self.assertEqual(identifiers.content_identifier(self.content), self.content_id) directory_example = { - 'id': 'd7ed3d2c31d608823be58b1cbe57605310615231', - 'entries': [ + "id": "d7ed3d2c31d608823be58b1cbe57605310615231", + "entries": [ { - 'type': 'file', - 'perms': 33188, - 'name': b'README', - 'target': _x('37ec8ea2110c0b7a32fbb0e872f6e7debbf95e21') + "type": "file", + "perms": 33188, + "name": b"README", + "target": _x("37ec8ea2110c0b7a32fbb0e872f6e7debbf95e21"), }, { - 'type': 'file', - 'perms': 33188, - 'name': b'Rakefile', - 'target': _x('3bb0e8592a41ae3185ee32266c860714980dbed7') + "type": "file", + "perms": 33188, + "name": b"Rakefile", + "target": _x("3bb0e8592a41ae3185ee32266c860714980dbed7"), }, { - 'type': 'dir', - 'perms': 16384, - 'name': b'app', - 'target': _x('61e6e867f5d7ba3b40540869bc050b0c4fed9e95') + "type": "dir", + "perms": 16384, + "name": b"app", + "target": _x("61e6e867f5d7ba3b40540869bc050b0c4fed9e95"), }, { - 'type': 'file', - 'perms': 33188, - 'name': b'1.megabyte', - 'target': _x('7c2b2fbdd57d6765cdc9d84c2d7d333f11be7fb3') + "type": "file", + "perms": 33188, + "name": b"1.megabyte", + "target": _x("7c2b2fbdd57d6765cdc9d84c2d7d333f11be7fb3"), }, { - 'type': 'dir', - 'perms': 16384, - 'name': b'config', - 'target': _x('591dfe784a2e9ccc63aaba1cb68a765734310d98') + "type": "dir", + "perms": 16384, + "name": b"config", + "target": _x("591dfe784a2e9ccc63aaba1cb68a765734310d98"), }, { - 'type': 'dir', - 'perms': 16384, - 'name': b'public', - 'target': _x('9588bf4522c2b4648bfd1c61d175d1f88c1ad4a5') + "type": "dir", + "perms": 16384, + "name": b"public", + "target": _x("9588bf4522c2b4648bfd1c61d175d1f88c1ad4a5"), }, { - 'type': 'file', - 'perms': 33188, - 'name': b'development.sqlite3', - 'target': _x('e69de29bb2d1d6434b8b29ae775ad8c2e48c5391') + "type": "file", + "perms": 33188, + "name": b"development.sqlite3", + "target": _x("e69de29bb2d1d6434b8b29ae775ad8c2e48c5391"), }, { - 'type': 'dir', - 'perms': 16384, - 'name': b'doc', - 'target': _x('154705c6aa1c8ead8c99c7915373e3c44012057f') + "type": "dir", + "perms": 16384, + "name": b"doc", + "target": _x("154705c6aa1c8ead8c99c7915373e3c44012057f"), }, { - 'type': 'dir', - 'perms': 16384, - 'name': b'db', - 'target': _x('85f157bdc39356b7bc7de9d0099b4ced8b3b382c') + "type": "dir", + "perms": 16384, + "name": b"db", + "target": _x("85f157bdc39356b7bc7de9d0099b4ced8b3b382c"), }, { - 'type': 'dir', - 'perms': 16384, - 'name': b'log', - 'target': _x('5e3d3941c51cce73352dff89c805a304ba96fffe') + "type": "dir", + "perms": 16384, + "name": b"log", + "target": _x("5e3d3941c51cce73352dff89c805a304ba96fffe"), }, { - 'type': 'dir', - 'perms': 16384, - 'name': b'script', - 'target': _x('1b278423caf176da3f3533592012502aa10f566c') + "type": "dir", + "perms": 16384, + "name": b"script", + "target": _x("1b278423caf176da3f3533592012502aa10f566c"), }, { - 'type': 'dir', - 'perms': 16384, - 'name': b'test', - 'target': _x('035f0437c080bfd8711670b3e8677e686c69c763') + "type": "dir", + "perms": 16384, + "name": b"test", + "target": _x("035f0437c080bfd8711670b3e8677e686c69c763"), }, { - 'type': 'dir', - 'perms': 16384, - 'name': b'vendor', - 'target': _x('7c0dc9ad978c1af3f9a4ce061e50f5918bd27138') + "type": "dir", + "perms": 16384, + "name": b"vendor", + "target": _x("7c0dc9ad978c1af3f9a4ce061e50f5918bd27138"), }, { - 'type': 'rev', - 'perms': 57344, - 'name': b'will_paginate', - 'target': _x('3d531e169db92a16a9a8974f0ae6edf52e52659e') + "type": "rev", + "perms": 57344, + "name": b"will_paginate", + "target": _x("3d531e169db92a16a9a8974f0ae6edf52e52659e"), }, - # in git order, the dir named "order" should be between the files # named "order." and "order0" { - 'type': 'dir', - 'perms': 16384, - 'name': b'order', - 'target': _x('62cdb7020ff920e5aa642c3d4066950dd1f01f4d') + "type": "dir", + "perms": 16384, + "name": b"order", + "target": _x("62cdb7020ff920e5aa642c3d4066950dd1f01f4d"), }, { - 'type': 'file', - 'perms': 16384, - 'name': b'order.', - 'target': _x('0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33') + "type": "file", + "perms": 16384, + "name": b"order.", + "target": _x("0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33"), }, { - 'type': 'file', - 'perms': 16384, - 'name': b'order0', - 'target': _x('bbe960a25ea311d21d40669e93df2003ba9b90a2') + "type": "file", + "perms": 16384, + "name": b"order0", + "target": _x("bbe960a25ea311d21d40669e93df2003ba9b90a2"), }, ], } class DirectoryIdentifier(unittest.TestCase): def setUp(self): self.directory = directory_example self.empty_directory = { - 'id': '4b825dc642cb6eb9a060e54bf8d69288fbee4904', - 'entries': [], + "id": "4b825dc642cb6eb9a060e54bf8d69288fbee4904", + "entries": [], } def test_dir_identifier(self): self.assertEqual( - identifiers.directory_identifier(self.directory), - self.directory['id']) + identifiers.directory_identifier(self.directory), self.directory["id"] + ) def test_dir_identifier_entry_order(self): # Reverse order of entries, check the id is still the same. - directory = {'entries': reversed(self.directory['entries'])} + directory = {"entries": reversed(self.directory["entries"])} self.assertEqual( - identifiers.directory_identifier(directory), - self.directory['id']) + identifiers.directory_identifier(directory), self.directory["id"] + ) def test_dir_identifier_empty_directory(self): self.assertEqual( identifiers.directory_identifier(self.empty_directory), - self.empty_directory['id']) + self.empty_directory["id"], + ) linus_tz = datetime.timezone(datetime.timedelta(minutes=-420)) revision_example = { - 'id': 'bc0195aad0daa2ad5b0d76cce22b167bc3435590', - 'directory': _x('85a74718d377195e1efd0843ba4f3260bad4fe07'), - 'parents': [_x('01e2d0627a9a6edb24c37db45db5ecb31e9de808')], - 'author': { - 'name': b'Linus Torvalds', - 'email': b'torvalds@linux-foundation.org', - 'fullname': b'Linus Torvalds ' + "id": "bc0195aad0daa2ad5b0d76cce22b167bc3435590", + "directory": _x("85a74718d377195e1efd0843ba4f3260bad4fe07"), + "parents": [_x("01e2d0627a9a6edb24c37db45db5ecb31e9de808")], + "author": { + "name": b"Linus Torvalds", + "email": b"torvalds@linux-foundation.org", + "fullname": b"Linus Torvalds ", }, - 'date': datetime.datetime(2015, 7, 12, 15, 10, 30, - tzinfo=linus_tz), - 'committer': { - 'name': b'Linus Torvalds', - 'email': b'torvalds@linux-foundation.org', - 'fullname': b'Linus Torvalds ' + "date": datetime.datetime(2015, 7, 12, 15, 10, 30, tzinfo=linus_tz), + "committer": { + "name": b"Linus Torvalds", + "email": b"torvalds@linux-foundation.org", + "fullname": b"Linus Torvalds ", }, - 'committer_date': datetime.datetime(2015, 7, 12, 15, 10, 30, - tzinfo=linus_tz), - 'message': b'Linux 4.2-rc2\n', - 'type': 'git', - 'synthetic': False + "committer_date": datetime.datetime(2015, 7, 12, 15, 10, 30, tzinfo=linus_tz), + "message": b"Linux 4.2-rc2\n", + "type": "git", + "synthetic": False, } class RevisionIdentifier(unittest.TestCase): def setUp(self): - gpgsig = b'''\ + gpgsig = b"""\ -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.13 (Darwin) iQIcBAABAgAGBQJVJcYsAAoJEBiY3kIkQRNJVAUQAJ8/XQIfMqqC5oYeEFfHOPYZ L7qy46bXHVBa9Qd8zAJ2Dou3IbI2ZoF6/Et89K/UggOycMlt5FKV/9toWyuZv4Po L682wonoxX99qvVTHo6+wtnmYO7+G0f82h+qHMErxjP+I6gzRNBvRr+SfY7VlGdK wikMKOMWC5smrScSHITnOq1Ews5pe3N7qDYMzK0XVZmgDoaem4RSWMJs4My/qVLN e0CqYWq2A22GX7sXl6pjneJYQvcAXUX+CAzp24QnPSb+Q22Guj91TcxLFcHCTDdn qgqMsEyMiisoglwrCbO+D+1xq9mjN9tNFWP66SQ48mrrHYTBV5sz9eJyDfroJaLP CWgbDTgq6GzRMehHT3hXfYS5NNatjnhkNISXR7pnVP/obIi/vpWh5ll6Gd8q26z+ a/O41UzOaLTeNI365MWT4/cnXohVLRG7iVJbAbCxoQmEgsYMRc/pBAzWJtLfcB2G jdTswYL6+MUdL8sB9pZ82D+BP/YAdHe69CyTu1lk9RT2pYtI/kkfjHubXBCYEJSG +VGllBbYG6idQJpyrOYNRJyrDi9yvDJ2W+S0iQrlZrxzGBVGTB/y65S8C+2WTBcE lf1Qb5GDsQrZWgD+jtWTywOYHtCBwyCKSAXxSARMbNPeak9WPlcW/Jmu+fUcMe2x dg1KdHOa34shrKDaOVzW =od6m ------END PGP SIGNATURE-----''' +-----END PGP SIGNATURE-----""" self.revision = revision_example self.revision_none_metadata = { - 'id': 'bc0195aad0daa2ad5b0d76cce22b167bc3435590', - 'directory': _x('85a74718d377195e1efd0843ba4f3260bad4fe07'), - 'parents': [_x('01e2d0627a9a6edb24c37db45db5ecb31e9de808')], - 'author': { - 'name': b'Linus Torvalds', - 'email': b'torvalds@linux-foundation.org', - }, - 'date': datetime.datetime(2015, 7, 12, 15, 10, 30, - tzinfo=linus_tz), - 'committer': { - 'name': b'Linus Torvalds', - 'email': b'torvalds@linux-foundation.org', - }, - 'committer_date': datetime.datetime(2015, 7, 12, 15, 10, 30, - tzinfo=linus_tz), - 'message': b'Linux 4.2-rc2\n', - 'metadata': None, + "id": "bc0195aad0daa2ad5b0d76cce22b167bc3435590", + "directory": _x("85a74718d377195e1efd0843ba4f3260bad4fe07"), + "parents": [_x("01e2d0627a9a6edb24c37db45db5ecb31e9de808")], + "author": { + "name": b"Linus Torvalds", + "email": b"torvalds@linux-foundation.org", + }, + "date": datetime.datetime(2015, 7, 12, 15, 10, 30, tzinfo=linus_tz), + "committer": { + "name": b"Linus Torvalds", + "email": b"torvalds@linux-foundation.org", + }, + "committer_date": datetime.datetime( + 2015, 7, 12, 15, 10, 30, tzinfo=linus_tz + ), + "message": b"Linux 4.2-rc2\n", + "metadata": None, } self.synthetic_revision = { - 'id': b'\xb2\xa7\xe1&\x04\x92\xe3D\xfa\xb3\xcb\xf9\x1b\xc1<\x91' - b'\xe0T&\xfd', - 'author': { - 'name': b'Software Heritage', - 'email': b'robot@softwareheritage.org', - }, - 'date': { - 'timestamp': {'seconds': 1437047495}, - 'offset': 0, - 'negative_utc': False, - }, - 'type': 'tar', - 'committer': { - 'name': b'Software Heritage', - 'email': b'robot@softwareheritage.org', + "id": b"\xb2\xa7\xe1&\x04\x92\xe3D\xfa\xb3\xcb\xf9\x1b\xc1<\x91" + b"\xe0T&\xfd", + "author": { + "name": b"Software Heritage", + "email": b"robot@softwareheritage.org", + }, + "date": { + "timestamp": {"seconds": 1437047495}, + "offset": 0, + "negative_utc": False, + }, + "type": "tar", + "committer": { + "name": b"Software Heritage", + "email": b"robot@softwareheritage.org", + }, + "committer_date": 1437047495, + "synthetic": True, + "parents": [None], + "message": b"synthetic revision message\n", + "directory": b"\xd1\x1f\x00\xa6\xa0\xfe\xa6\x05SA\xd2U\x84\xb5\xa9" + b"e\x16\xc0\xd2\xb8", + "metadata": { + "original_artifact": [ + { + "archive_type": "tar", + "name": "gcc-5.2.0.tar.bz2", + "sha1_git": "39d281aff934d44b439730057e55b055e206a586", + "sha1": "fe3f5390949d47054b613edc36c557eb1d51c18e", + "sha256": "5f835b04b5f7dd4f4d2dc96190ec1621b8d89f" + "2dc6f638f9f8bc1b1014ba8cad", + } + ] }, - 'committer_date': 1437047495, - 'synthetic': True, - 'parents': [None], - 'message': b'synthetic revision message\n', - 'directory': b'\xd1\x1f\x00\xa6\xa0\xfe\xa6\x05SA\xd2U\x84\xb5\xa9' - b'e\x16\xc0\xd2\xb8', - 'metadata': {'original_artifact': [ - {'archive_type': 'tar', - 'name': 'gcc-5.2.0.tar.bz2', - 'sha1_git': '39d281aff934d44b439730057e55b055e206a586', - 'sha1': 'fe3f5390949d47054b613edc36c557eb1d51c18e', - 'sha256': '5f835b04b5f7dd4f4d2dc96190ec1621b8d89f' - '2dc6f638f9f8bc1b1014ba8cad'}]}, - } # cat commit.txt | git hash-object -t commit --stdin self.revision_with_extra_headers = { - 'id': '010d34f384fa99d047cdd5e2f41e56e5c2feee45', - 'directory': _x('85a74718d377195e1efd0843ba4f3260bad4fe07'), - 'parents': [_x('01e2d0627a9a6edb24c37db45db5ecb31e9de808')], - 'author': { - 'name': b'Linus Torvalds', - 'email': b'torvalds@linux-foundation.org', - 'fullname': b'Linus Torvalds ', - }, - 'date': datetime.datetime(2015, 7, 12, 15, 10, 30, - tzinfo=linus_tz), - 'committer': { - 'name': b'Linus Torvalds', - 'email': b'torvalds@linux-foundation.org', - 'fullname': b'Linus Torvalds ', - }, - 'committer_date': datetime.datetime(2015, 7, 12, 15, 10, 30, - tzinfo=linus_tz), - 'message': b'Linux 4.2-rc2\n', - 'metadata': { - 'extra_headers': [ - ['svn-repo-uuid', '046f1af7-66c2-d61b-5410-ce57b7db7bff'], - ['svn-revision', 10], + "id": "010d34f384fa99d047cdd5e2f41e56e5c2feee45", + "directory": _x("85a74718d377195e1efd0843ba4f3260bad4fe07"), + "parents": [_x("01e2d0627a9a6edb24c37db45db5ecb31e9de808")], + "author": { + "name": b"Linus Torvalds", + "email": b"torvalds@linux-foundation.org", + "fullname": b"Linus Torvalds ", + }, + "date": datetime.datetime(2015, 7, 12, 15, 10, 30, tzinfo=linus_tz), + "committer": { + "name": b"Linus Torvalds", + "email": b"torvalds@linux-foundation.org", + "fullname": b"Linus Torvalds ", + }, + "committer_date": datetime.datetime( + 2015, 7, 12, 15, 10, 30, tzinfo=linus_tz + ), + "message": b"Linux 4.2-rc2\n", + "metadata": { + "extra_headers": [ + ["svn-repo-uuid", "046f1af7-66c2-d61b-5410-ce57b7db7bff"], + ["svn-revision", 10], ] - } + }, } self.revision_with_gpgsig = { - 'id': '44cc742a8ca17b9c279be4cc195a93a6ef7a320e', - 'directory': _x('b134f9b7dc434f593c0bab696345548b37de0558'), - 'parents': [_x('689664ae944b4692724f13b709a4e4de28b54e57'), - _x('c888305e1efbaa252d01b4e5e6b778f865a97514')], - 'author': { - 'name': b'Jiang Xin', - 'email': b'worldhello.net@gmail.com', - 'fullname': b'Jiang Xin ', - }, - 'date': { - 'timestamp': 1428538899, - 'offset': 480, - }, - 'committer': { - 'name': b'Jiang Xin', - 'email': b'worldhello.net@gmail.com', - }, - 'committer_date': { - 'timestamp': 1428538899, - 'offset': 480, - }, - 'metadata': { - 'extra_headers': [ - ['gpgsig', gpgsig], - ], - }, - 'message': b'''Merge branch 'master' of git://github.com/alexhenrie/git-po + "id": "44cc742a8ca17b9c279be4cc195a93a6ef7a320e", + "directory": _x("b134f9b7dc434f593c0bab696345548b37de0558"), + "parents": [ + _x("689664ae944b4692724f13b709a4e4de28b54e57"), + _x("c888305e1efbaa252d01b4e5e6b778f865a97514"), + ], + "author": { + "name": b"Jiang Xin", + "email": b"worldhello.net@gmail.com", + "fullname": b"Jiang Xin ", + }, + "date": {"timestamp": 1428538899, "offset": 480,}, + "committer": {"name": b"Jiang Xin", "email": b"worldhello.net@gmail.com",}, + "committer_date": {"timestamp": 1428538899, "offset": 480,}, + "metadata": {"extra_headers": [["gpgsig", gpgsig],],}, + "message": b"""Merge branch 'master' of git://github.com/alexhenrie/git-po * 'master' of git://github.com/alexhenrie/git-po: l10n: ca.po: update translation -''' +""", } self.revision_no_message = { - 'id': '4cfc623c9238fa92c832beed000ce2d003fd8333', - 'directory': _x('b134f9b7dc434f593c0bab696345548b37de0558'), - 'parents': [_x('689664ae944b4692724f13b709a4e4de28b54e57'), - _x('c888305e1efbaa252d01b4e5e6b778f865a97514')], - 'author': { - 'name': b'Jiang Xin', - 'email': b'worldhello.net@gmail.com', - 'fullname': b'Jiang Xin ', - }, - 'date': { - 'timestamp': 1428538899, - 'offset': 480, - }, - 'committer': { - 'name': b'Jiang Xin', - 'email': b'worldhello.net@gmail.com', - }, - 'committer_date': { - 'timestamp': 1428538899, - 'offset': 480, - }, - 'message': None, + "id": "4cfc623c9238fa92c832beed000ce2d003fd8333", + "directory": _x("b134f9b7dc434f593c0bab696345548b37de0558"), + "parents": [ + _x("689664ae944b4692724f13b709a4e4de28b54e57"), + _x("c888305e1efbaa252d01b4e5e6b778f865a97514"), + ], + "author": { + "name": b"Jiang Xin", + "email": b"worldhello.net@gmail.com", + "fullname": b"Jiang Xin ", + }, + "date": {"timestamp": 1428538899, "offset": 480,}, + "committer": {"name": b"Jiang Xin", "email": b"worldhello.net@gmail.com",}, + "committer_date": {"timestamp": 1428538899, "offset": 480,}, + "message": None, } self.revision_empty_message = { - 'id': '7442cd78bd3b4966921d6a7f7447417b7acb15eb', - 'directory': _x('b134f9b7dc434f593c0bab696345548b37de0558'), - 'parents': [_x('689664ae944b4692724f13b709a4e4de28b54e57'), - _x('c888305e1efbaa252d01b4e5e6b778f865a97514')], - 'author': { - 'name': b'Jiang Xin', - 'email': b'worldhello.net@gmail.com', - 'fullname': b'Jiang Xin ', - }, - 'date': { - 'timestamp': 1428538899, - 'offset': 480, - }, - 'committer': { - 'name': b'Jiang Xin', - 'email': b'worldhello.net@gmail.com', - }, - 'committer_date': { - 'timestamp': 1428538899, - 'offset': 480, - }, - 'message': b'', + "id": "7442cd78bd3b4966921d6a7f7447417b7acb15eb", + "directory": _x("b134f9b7dc434f593c0bab696345548b37de0558"), + "parents": [ + _x("689664ae944b4692724f13b709a4e4de28b54e57"), + _x("c888305e1efbaa252d01b4e5e6b778f865a97514"), + ], + "author": { + "name": b"Jiang Xin", + "email": b"worldhello.net@gmail.com", + "fullname": b"Jiang Xin ", + }, + "date": {"timestamp": 1428538899, "offset": 480,}, + "committer": {"name": b"Jiang Xin", "email": b"worldhello.net@gmail.com",}, + "committer_date": {"timestamp": 1428538899, "offset": 480,}, + "message": b"", } self.revision_only_fullname = { - 'id': '010d34f384fa99d047cdd5e2f41e56e5c2feee45', - 'directory': _x('85a74718d377195e1efd0843ba4f3260bad4fe07'), - 'parents': [_x('01e2d0627a9a6edb24c37db45db5ecb31e9de808')], - 'author': { - 'fullname': b'Linus Torvalds ', - }, - 'date': datetime.datetime(2015, 7, 12, 15, 10, 30, - tzinfo=linus_tz), - 'committer': { - 'fullname': b'Linus Torvalds ', - }, - 'committer_date': datetime.datetime(2015, 7, 12, 15, 10, 30, - tzinfo=linus_tz), - 'message': b'Linux 4.2-rc2\n', - 'metadata': { - 'extra_headers': [ - ['svn-repo-uuid', '046f1af7-66c2-d61b-5410-ce57b7db7bff'], - ['svn-revision', 10], + "id": "010d34f384fa99d047cdd5e2f41e56e5c2feee45", + "directory": _x("85a74718d377195e1efd0843ba4f3260bad4fe07"), + "parents": [_x("01e2d0627a9a6edb24c37db45db5ecb31e9de808")], + "author": {"fullname": b"Linus Torvalds ",}, + "date": datetime.datetime(2015, 7, 12, 15, 10, 30, tzinfo=linus_tz), + "committer": { + "fullname": b"Linus Torvalds ", + }, + "committer_date": datetime.datetime( + 2015, 7, 12, 15, 10, 30, tzinfo=linus_tz + ), + "message": b"Linux 4.2-rc2\n", + "metadata": { + "extra_headers": [ + ["svn-repo-uuid", "046f1af7-66c2-d61b-5410-ce57b7db7bff"], + ["svn-revision", 10], ] - } + }, } def test_revision_identifier(self): self.assertEqual( identifiers.revision_identifier(self.revision), - identifiers.identifier_to_str(self.revision['id']), + identifiers.identifier_to_str(self.revision["id"]), ) def test_revision_identifier_none_metadata(self): self.assertEqual( identifiers.revision_identifier(self.revision_none_metadata), - identifiers.identifier_to_str(self.revision_none_metadata['id']), + identifiers.identifier_to_str(self.revision_none_metadata["id"]), ) def test_revision_identifier_synthetic(self): self.assertEqual( identifiers.revision_identifier(self.synthetic_revision), - identifiers.identifier_to_str(self.synthetic_revision['id']), + identifiers.identifier_to_str(self.synthetic_revision["id"]), ) def test_revision_identifier_with_extra_headers(self): self.assertEqual( - identifiers.revision_identifier( - self.revision_with_extra_headers), - identifiers.identifier_to_str( - self.revision_with_extra_headers['id']), + identifiers.revision_identifier(self.revision_with_extra_headers), + identifiers.identifier_to_str(self.revision_with_extra_headers["id"]), ) def test_revision_identifier_with_gpgsig(self): self.assertEqual( - identifiers.revision_identifier( - self.revision_with_gpgsig), - identifiers.identifier_to_str( - self.revision_with_gpgsig['id']), + identifiers.revision_identifier(self.revision_with_gpgsig), + identifiers.identifier_to_str(self.revision_with_gpgsig["id"]), ) def test_revision_identifier_no_message(self): self.assertEqual( - identifiers.revision_identifier( - self.revision_no_message), - identifiers.identifier_to_str( - self.revision_no_message['id']), + identifiers.revision_identifier(self.revision_no_message), + identifiers.identifier_to_str(self.revision_no_message["id"]), ) def test_revision_identifier_empty_message(self): self.assertEqual( - identifiers.revision_identifier( - self.revision_empty_message), - identifiers.identifier_to_str( - self.revision_empty_message['id']), + identifiers.revision_identifier(self.revision_empty_message), + identifiers.identifier_to_str(self.revision_empty_message["id"]), ) def test_revision_identifier_only_fullname(self): self.assertEqual( - identifiers.revision_identifier( - self.revision_only_fullname), - identifiers.identifier_to_str( - self.revision_only_fullname['id']), + identifiers.revision_identifier(self.revision_only_fullname), + identifiers.identifier_to_str(self.revision_only_fullname["id"]), ) release_example = { - 'id': '2b10839e32c4c476e9d94492756bb1a3e1ec4aa8', - 'target': b't\x1b"R\xa5\xe1Ml`\xa9\x13\xc7z`\x99\xab\xe7:\x85J', - 'target_type': 'revision', - 'name': b'v2.6.14', - 'author': { - 'name': b'Linus Torvalds', - 'email': b'torvalds@g5.osdl.org', - 'fullname': b'Linus Torvalds ' + "id": "2b10839e32c4c476e9d94492756bb1a3e1ec4aa8", + "target": b't\x1b"R\xa5\xe1Ml`\xa9\x13\xc7z`\x99\xab\xe7:\x85J', + "target_type": "revision", + "name": b"v2.6.14", + "author": { + "name": b"Linus Torvalds", + "email": b"torvalds@g5.osdl.org", + "fullname": b"Linus Torvalds ", }, - 'date': datetime.datetime(2005, 10, 27, 17, 2, 33, - tzinfo=linus_tz), - 'message': b'''\ + "date": datetime.datetime(2005, 10, 27, 17, 2, 33, tzinfo=linus_tz), + "message": b"""\ Linux 2.6.14 release -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.1 (GNU/Linux) iD8DBQBDYWq6F3YsRnbiHLsRAmaeAJ9RCez0y8rOBbhSv344h86l/VVcugCeIhO1 wdLOnvj91G4wxYqrvThthbE= =7VeT -----END PGP SIGNATURE----- -''', - 'synthetic': False, +""", + "synthetic": False, } class ReleaseIdentifier(unittest.TestCase): def setUp(self): linus_tz = datetime.timezone(datetime.timedelta(minutes=-420)) self.release = release_example self.release_no_author = { - 'id': b'&y\x1a\x8b\xcf\x0em3\xf4:\xefv\x82\xbd\xb5U#mV\xde', - 'target': '9ee1c939d1cb936b1f98e8d81aeffab57bae46ab', - 'target_type': 'revision', - 'name': b'v2.6.12', - 'message': b'''\ + "id": b"&y\x1a\x8b\xcf\x0em3\xf4:\xefv\x82\xbd\xb5U#mV\xde", + "target": "9ee1c939d1cb936b1f98e8d81aeffab57bae46ab", + "target_type": "revision", + "name": b"v2.6.12", + "message": b"""\ This is the final 2.6.12 release -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.2.4 (GNU/Linux) iD8DBQBCsykyF3YsRnbiHLsRAvPNAJ482tCZwuxp/bJRz7Q98MHlN83TpACdHr37 o6X/3T+vm8K3bf3driRr34c= =sBHn -----END PGP SIGNATURE----- -''', - 'synthetic': False, +""", + "synthetic": False, } self.release_no_message = { - 'id': 'b6f4f446715f7d9543ef54e41b62982f0db40045', - 'target': '9ee1c939d1cb936b1f98e8d81aeffab57bae46ab', - 'target_type': 'revision', - 'name': b'v2.6.12', - 'author': { - 'name': b'Linus Torvalds', - 'email': b'torvalds@g5.osdl.org', - }, - 'date': datetime.datetime(2005, 10, 27, 17, 2, 33, - tzinfo=linus_tz), - 'message': None, + "id": "b6f4f446715f7d9543ef54e41b62982f0db40045", + "target": "9ee1c939d1cb936b1f98e8d81aeffab57bae46ab", + "target_type": "revision", + "name": b"v2.6.12", + "author": {"name": b"Linus Torvalds", "email": b"torvalds@g5.osdl.org",}, + "date": datetime.datetime(2005, 10, 27, 17, 2, 33, tzinfo=linus_tz), + "message": None, } self.release_empty_message = { - 'id': '71a0aea72444d396575dc25ac37fec87ee3c6492', - 'target': '9ee1c939d1cb936b1f98e8d81aeffab57bae46ab', - 'target_type': 'revision', - 'name': b'v2.6.12', - 'author': { - 'name': b'Linus Torvalds', - 'email': b'torvalds@g5.osdl.org', - }, - 'date': datetime.datetime(2005, 10, 27, 17, 2, 33, - tzinfo=linus_tz), - 'message': b'', + "id": "71a0aea72444d396575dc25ac37fec87ee3c6492", + "target": "9ee1c939d1cb936b1f98e8d81aeffab57bae46ab", + "target_type": "revision", + "name": b"v2.6.12", + "author": {"name": b"Linus Torvalds", "email": b"torvalds@g5.osdl.org",}, + "date": datetime.datetime(2005, 10, 27, 17, 2, 33, tzinfo=linus_tz), + "message": b"", } self.release_negative_utc = { - 'id': '97c8d2573a001f88e72d75f596cf86b12b82fd01', - 'name': b'20081029', - 'target': '54e9abca4c77421e2921f5f156c9fe4a9f7441c7', - 'target_type': 'revision', - 'date': { - 'timestamp': {'seconds': 1225281976}, - 'offset': 0, - 'negative_utc': True, - }, - 'author': { - 'name': b'Otavio Salvador', - 'email': b'otavio@debian.org', - 'id': 17640, - }, - 'synthetic': False, - 'message': b'tagging version 20081029\n\nr56558\n', + "id": "97c8d2573a001f88e72d75f596cf86b12b82fd01", + "name": b"20081029", + "target": "54e9abca4c77421e2921f5f156c9fe4a9f7441c7", + "target_type": "revision", + "date": { + "timestamp": {"seconds": 1225281976}, + "offset": 0, + "negative_utc": True, + }, + "author": { + "name": b"Otavio Salvador", + "email": b"otavio@debian.org", + "id": 17640, + }, + "synthetic": False, + "message": b"tagging version 20081029\n\nr56558\n", } self.release_newline_in_author = { - 'author': { - 'email': b'esycat@gmail.com', - 'fullname': b'Eugene Janusov\n', - 'name': b'Eugene Janusov\n', - }, - 'date': { - 'negative_utc': None, - 'offset': 600, - 'timestamp': { - 'microseconds': 0, - 'seconds': 1377480558, - }, - }, - 'id': b'\\\x98\xf5Y\xd04\x16-\xe2->\xbe\xb9T3\xe6\xf8\x88R1', - 'message': b'Release of v0.3.2.', - 'name': b'0.3.2', - 'synthetic': False, - 'target': (b'\xc0j\xa3\xd9;x\xa2\x86\\I5\x17' - b'\x000\xf8\xc2\xd79o\xd3'), - 'target_type': 'revision', + "author": { + "email": b"esycat@gmail.com", + "fullname": b"Eugene Janusov\n", + "name": b"Eugene Janusov\n", + }, + "date": { + "negative_utc": None, + "offset": 600, + "timestamp": {"microseconds": 0, "seconds": 1377480558,}, + }, + "id": b"\\\x98\xf5Y\xd04\x16-\xe2->\xbe\xb9T3\xe6\xf8\x88R1", + "message": b"Release of v0.3.2.", + "name": b"0.3.2", + "synthetic": False, + "target": (b"\xc0j\xa3\xd9;x\xa2\x86\\I5\x17" b"\x000\xf8\xc2\xd79o\xd3"), + "target_type": "revision", } self.release_snapshot_target = dict(self.release) - self.release_snapshot_target['target_type'] = 'snapshot' - self.release_snapshot_target['id'] = ( - 'c29c3ddcc6769a04e54dd69d63a6fdcbc566f850') + self.release_snapshot_target["target_type"] = "snapshot" + self.release_snapshot_target["id"] = "c29c3ddcc6769a04e54dd69d63a6fdcbc566f850" def test_release_identifier(self): self.assertEqual( identifiers.release_identifier(self.release), - identifiers.identifier_to_str(self.release['id']) + identifiers.identifier_to_str(self.release["id"]), ) def test_release_identifier_no_author(self): self.assertEqual( identifiers.release_identifier(self.release_no_author), - identifiers.identifier_to_str(self.release_no_author['id']) + identifiers.identifier_to_str(self.release_no_author["id"]), ) def test_release_identifier_no_message(self): self.assertEqual( identifiers.release_identifier(self.release_no_message), - identifiers.identifier_to_str(self.release_no_message['id']) + identifiers.identifier_to_str(self.release_no_message["id"]), ) def test_release_identifier_empty_message(self): self.assertEqual( identifiers.release_identifier(self.release_empty_message), - identifiers.identifier_to_str(self.release_empty_message['id']) + identifiers.identifier_to_str(self.release_empty_message["id"]), ) def test_release_identifier_negative_utc(self): self.assertEqual( identifiers.release_identifier(self.release_negative_utc), - identifiers.identifier_to_str(self.release_negative_utc['id']) + identifiers.identifier_to_str(self.release_negative_utc["id"]), ) def test_release_identifier_newline_in_author(self): self.assertEqual( identifiers.release_identifier(self.release_newline_in_author), - identifiers.identifier_to_str(self.release_newline_in_author['id']) + identifiers.identifier_to_str(self.release_newline_in_author["id"]), ) def test_release_identifier_snapshot_target(self): self.assertEqual( identifiers.release_identifier(self.release_snapshot_target), - identifiers.identifier_to_str(self.release_snapshot_target['id']) + identifiers.identifier_to_str(self.release_snapshot_target["id"]), ) snapshot_example = { - 'id': _x('6e65b86363953b780d92b0a928f3e8fcdd10db36'), - 'branches': { - b'directory': { - 'target': _x('1bd0e65f7d2ff14ae994de17a1e7fe65111dcad8'), - 'target_type': 'directory', + "id": _x("6e65b86363953b780d92b0a928f3e8fcdd10db36"), + "branches": { + b"directory": { + "target": _x("1bd0e65f7d2ff14ae994de17a1e7fe65111dcad8"), + "target_type": "directory", }, - b'content': { - 'target': _x('fe95a46679d128ff167b7c55df5d02356c5a1ae1'), - 'target_type': 'content', + b"content": { + "target": _x("fe95a46679d128ff167b7c55df5d02356c5a1ae1"), + "target_type": "content", }, - b'alias': { - 'target': b'revision', - 'target_type': 'alias', + b"alias": {"target": b"revision", "target_type": "alias",}, + b"revision": { + "target": _x("aafb16d69fd30ff58afdd69036a26047f3aebdc6"), + "target_type": "revision", }, - b'revision': { - 'target': _x('aafb16d69fd30ff58afdd69036a26047f3aebdc6'), - 'target_type': 'revision', + b"release": { + "target": _x("7045404f3d1c54e6473c71bbb716529fbad4be24"), + "target_type": "release", }, - b'release': { - 'target': _x('7045404f3d1c54e6473c71bbb716529fbad4be24'), - 'target_type': 'release', + b"snapshot": { + "target": _x("1a8893e6a86f444e8be8e7bda6cb34fb1735a00e"), + "target_type": "snapshot", }, - b'snapshot': { - 'target': _x('1a8893e6a86f444e8be8e7bda6cb34fb1735a00e'), - 'target_type': 'snapshot', - }, - b'dangling': None, - } + b"dangling": None, + }, } class SnapshotIdentifier(unittest.TestCase): def setUp(self): super().setUp() self.empty = { - 'id': '1a8893e6a86f444e8be8e7bda6cb34fb1735a00e', - 'branches': {}, + "id": "1a8893e6a86f444e8be8e7bda6cb34fb1735a00e", + "branches": {}, } self.dangling_branch = { - 'id': 'c84502e821eb21ed84e9fd3ec40973abc8b32353', - 'branches': { - b'HEAD': None, - }, + "id": "c84502e821eb21ed84e9fd3ec40973abc8b32353", + "branches": {b"HEAD": None,}, } self.unresolved = { - 'id': '84b4548ea486e4b0a7933fa541ff1503a0afe1e0', - 'branches': { - b'foo': { - 'target': b'bar', - 'target_type': 'alias', - }, - }, + "id": "84b4548ea486e4b0a7933fa541ff1503a0afe1e0", + "branches": {b"foo": {"target": b"bar", "target_type": "alias",},}, } self.all_types = snapshot_example def test_empty_snapshot(self): self.assertEqual( identifiers.snapshot_identifier(self.empty), - identifiers.identifier_to_str(self.empty['id']), + identifiers.identifier_to_str(self.empty["id"]), ) def test_dangling_branch(self): self.assertEqual( identifiers.snapshot_identifier(self.dangling_branch), - identifiers.identifier_to_str(self.dangling_branch['id']), + identifiers.identifier_to_str(self.dangling_branch["id"]), ) def test_unresolved(self): with self.assertRaisesRegex(ValueError, "b'foo' -> b'bar'"): identifiers.snapshot_identifier(self.unresolved) def test_unresolved_force(self): self.assertEqual( - identifiers.snapshot_identifier( - self.unresolved, - ignore_unresolved=True, - ), - identifiers.identifier_to_str(self.unresolved['id']), + identifiers.snapshot_identifier(self.unresolved, ignore_unresolved=True,), + identifiers.identifier_to_str(self.unresolved["id"]), ) def test_all_types(self): self.assertEqual( identifiers.snapshot_identifier(self.all_types), - identifiers.identifier_to_str(self.all_types['id']), + identifiers.identifier_to_str(self.all_types["id"]), ) def test_persistent_identifier(self): - _snapshot_id = _x('c7c108084bc0bf3d81436bf980b46e98bd338453') - _release_id = '22ece559cc7cc2364edc5e5593d63ae8bd229f9f' - _revision_id = '309cf2674ee7a0749978cf8265ab91a60aea0f7d' - _directory_id = 'd198bc9d7a6bcf6db04f476d29314f157507d505' - _content_id = '94a9ed024d3859793618152ea559a168bbcbb5e2' - _snapshot = {'id': _snapshot_id} - _release = {'id': _release_id} - _revision = {'id': _revision_id} - _directory = {'id': _directory_id} - _content = {'sha1_git': _content_id} + _snapshot_id = _x("c7c108084bc0bf3d81436bf980b46e98bd338453") + _release_id = "22ece559cc7cc2364edc5e5593d63ae8bd229f9f" + _revision_id = "309cf2674ee7a0749978cf8265ab91a60aea0f7d" + _directory_id = "d198bc9d7a6bcf6db04f476d29314f157507d505" + _content_id = "94a9ed024d3859793618152ea559a168bbcbb5e2" + _snapshot = {"id": _snapshot_id} + _release = {"id": _release_id} + _revision = {"id": _revision_id} + _directory = {"id": _directory_id} + _content = {"sha1_git": _content_id} for full_type, _hash, expected_persistent_id, version, _meta in [ - (SNAPSHOT, _snapshot_id, - 'swh:1:snp:c7c108084bc0bf3d81436bf980b46e98bd338453', - None, {}), - (RELEASE, _release_id, - 'swh:1:rel:22ece559cc7cc2364edc5e5593d63ae8bd229f9f', - 1, {}), - (REVISION, _revision_id, - 'swh:1:rev:309cf2674ee7a0749978cf8265ab91a60aea0f7d', - None, {}), - (DIRECTORY, _directory_id, - 'swh:1:dir:d198bc9d7a6bcf6db04f476d29314f157507d505', - None, {}), - (CONTENT, _content_id, - 'swh:1:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2', - 1, {}), - (SNAPSHOT, _snapshot, - 'swh:1:snp:c7c108084bc0bf3d81436bf980b46e98bd338453', - None, {}), - (RELEASE, _release, - 'swh:1:rel:22ece559cc7cc2364edc5e5593d63ae8bd229f9f', - 1, {}), - (REVISION, _revision, - 'swh:1:rev:309cf2674ee7a0749978cf8265ab91a60aea0f7d', - None, {}), - (DIRECTORY, _directory, - 'swh:1:dir:d198bc9d7a6bcf6db04f476d29314f157507d505', - None, {}), - (CONTENT, _content, - 'swh:1:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2', - 1, {}), - (CONTENT, _content, - 'swh:1:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2;origin=1', - 1, {'origin': '1'}), + ( + SNAPSHOT, + _snapshot_id, + "swh:1:snp:c7c108084bc0bf3d81436bf980b46e98bd338453", + None, + {}, + ), + ( + RELEASE, + _release_id, + "swh:1:rel:22ece559cc7cc2364edc5e5593d63ae8bd229f9f", + 1, + {}, + ), + ( + REVISION, + _revision_id, + "swh:1:rev:309cf2674ee7a0749978cf8265ab91a60aea0f7d", + None, + {}, + ), + ( + DIRECTORY, + _directory_id, + "swh:1:dir:d198bc9d7a6bcf6db04f476d29314f157507d505", + None, + {}, + ), + ( + CONTENT, + _content_id, + "swh:1:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2", + 1, + {}, + ), + ( + SNAPSHOT, + _snapshot, + "swh:1:snp:c7c108084bc0bf3d81436bf980b46e98bd338453", + None, + {}, + ), + ( + RELEASE, + _release, + "swh:1:rel:22ece559cc7cc2364edc5e5593d63ae8bd229f9f", + 1, + {}, + ), + ( + REVISION, + _revision, + "swh:1:rev:309cf2674ee7a0749978cf8265ab91a60aea0f7d", + None, + {}, + ), + ( + DIRECTORY, + _directory, + "swh:1:dir:d198bc9d7a6bcf6db04f476d29314f157507d505", + None, + {}, + ), + ( + CONTENT, + _content, + "swh:1:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2", + 1, + {}, + ), + ( + CONTENT, + _content, + "swh:1:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2;origin=1", + 1, + {"origin": "1"}, + ), ]: if version: actual_value = identifiers.persistent_identifier( - full_type, _hash, version, metadata=_meta) + full_type, _hash, version, metadata=_meta + ) else: actual_value = identifiers.persistent_identifier( - full_type, _hash, metadata=_meta) + full_type, _hash, metadata=_meta + ) self.assertEqual(actual_value, expected_persistent_id) def test_persistent_identifier_wrong_input(self): - _snapshot_id = 'notahash4bc0bf3d81436bf980b46e98bd338453' - _snapshot = {'id': _snapshot_id} + _snapshot_id = "notahash4bc0bf3d81436bf980b46e98bd338453" + _snapshot = {"id": _snapshot_id} for _type, _hash in [ - (SNAPSHOT, _snapshot_id), - (SNAPSHOT, _snapshot), - ('foo', ''), + (SNAPSHOT, _snapshot_id), + (SNAPSHOT, _snapshot), + ("foo", ""), ]: with self.assertRaises(ValidationError): identifiers.persistent_identifier(_type, _hash) def test_parse_persistent_identifier(self): for pid, _type, _version, _hash in [ - ('swh:1:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2', - CONTENT, 1, '94a9ed024d3859793618152ea559a168bbcbb5e2'), - ('swh:1:dir:d198bc9d7a6bcf6db04f476d29314f157507d505', - DIRECTORY, 1, 'd198bc9d7a6bcf6db04f476d29314f157507d505'), - ('swh:1:rev:309cf2674ee7a0749978cf8265ab91a60aea0f7d', - REVISION, 1, '309cf2674ee7a0749978cf8265ab91a60aea0f7d'), - ('swh:1:rel:22ece559cc7cc2364edc5e5593d63ae8bd229f9f', - RELEASE, 1, '22ece559cc7cc2364edc5e5593d63ae8bd229f9f'), - ('swh:1:snp:c7c108084bc0bf3d81436bf980b46e98bd338453', - SNAPSHOT, 1, 'c7c108084bc0bf3d81436bf980b46e98bd338453'), + ( + "swh:1:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2", + CONTENT, + 1, + "94a9ed024d3859793618152ea559a168bbcbb5e2", + ), + ( + "swh:1:dir:d198bc9d7a6bcf6db04f476d29314f157507d505", + DIRECTORY, + 1, + "d198bc9d7a6bcf6db04f476d29314f157507d505", + ), + ( + "swh:1:rev:309cf2674ee7a0749978cf8265ab91a60aea0f7d", + REVISION, + 1, + "309cf2674ee7a0749978cf8265ab91a60aea0f7d", + ), + ( + "swh:1:rel:22ece559cc7cc2364edc5e5593d63ae8bd229f9f", + RELEASE, + 1, + "22ece559cc7cc2364edc5e5593d63ae8bd229f9f", + ), + ( + "swh:1:snp:c7c108084bc0bf3d81436bf980b46e98bd338453", + SNAPSHOT, + 1, + "c7c108084bc0bf3d81436bf980b46e98bd338453", + ), ]: expected_result = PersistentId( - namespace='swh', + namespace="swh", scheme_version=_version, object_type=_type, object_id=_hash, - metadata={} + metadata={}, ) actual_result = identifiers.parse_persistent_identifier(pid) self.assertEqual(actual_result, expected_result) for pid, _type, _version, _hash, _metadata in [ - ('swh:1:cnt:9c95815d9e9d91b8dae8e05d8bbc696fe19f796b;lines=1-18;origin=https://github.com/python/cpython', # noqa - CONTENT, 1, '9c95815d9e9d91b8dae8e05d8bbc696fe19f796b', - { - 'lines': '1-18', - 'origin': 'https://github.com/python/cpython' - }), - ('swh:1:dir:0b6959356d30f1a4e9b7f6bca59b9a336464c03d;origin=deb://Debian/packages/linuxdoc-tools', # noqa - DIRECTORY, 1, '0b6959356d30f1a4e9b7f6bca59b9a336464c03d', - { - 'origin': 'deb://Debian/packages/linuxdoc-tools' - }) + ( + "swh:1:cnt:9c95815d9e9d91b8dae8e05d8bbc696fe19f796b;lines=1-18;origin=https://github.com/python/cpython", # noqa + CONTENT, + 1, + "9c95815d9e9d91b8dae8e05d8bbc696fe19f796b", + {"lines": "1-18", "origin": "https://github.com/python/cpython"}, + ), + ( + "swh:1:dir:0b6959356d30f1a4e9b7f6bca59b9a336464c03d;origin=deb://Debian/packages/linuxdoc-tools", # noqa + DIRECTORY, + 1, + "0b6959356d30f1a4e9b7f6bca59b9a336464c03d", + {"origin": "deb://Debian/packages/linuxdoc-tools"}, + ), ]: expected_result = PersistentId( - namespace='swh', + namespace="swh", scheme_version=_version, object_type=_type, object_id=_hash, - metadata=_metadata + metadata=_metadata, ) actual_result = identifiers.parse_persistent_identifier(pid) self.assertEqual(actual_result, expected_result) def test_parse_persistent_identifier_parsing_error(self): for pid in [ - ('swh:1:cnt'), - ('swh:1:'), - ('swh:'), - ('swh:1:cnt:'), - ('foo:1:cnt:abc8bc9d7a6bcf6db04f476d29314f157507d505'), - ('swh:2:dir:def8bc9d7a6bcf6db04f476d29314f157507d505'), - ('swh:1:foo:fed8bc9d7a6bcf6db04f476d29314f157507d505'), - ('swh:1:dir:0b6959356d30f1a4e9b7f6bca59b9a336464c03d;invalid;' - 'malformed'), - ('swh:1:snp:gh6959356d30f1a4e9b7f6bca59b9a336464c03d'), - ('swh:1:snp:foo'), + ("swh:1:cnt"), + ("swh:1:"), + ("swh:"), + ("swh:1:cnt:"), + ("foo:1:cnt:abc8bc9d7a6bcf6db04f476d29314f157507d505"), + ("swh:2:dir:def8bc9d7a6bcf6db04f476d29314f157507d505"), + ("swh:1:foo:fed8bc9d7a6bcf6db04f476d29314f157507d505"), + ("swh:1:dir:0b6959356d30f1a4e9b7f6bca59b9a336464c03d;invalid;" "malformed"), + ("swh:1:snp:gh6959356d30f1a4e9b7f6bca59b9a336464c03d"), + ("swh:1:snp:foo"), ]: with self.assertRaises(ValidationError): identifiers.parse_persistent_identifier(pid) def test_persistentid_class_validation_error(self): for _ns, _version, _type, _id in [ - ('foo', 1, CONTENT, 'abc8bc9d7a6bcf6db04f476d29314f157507d505'), - ('swh', 2, DIRECTORY, 'def8bc9d7a6bcf6db04f476d29314f157507d505'), - ('swh', 1, 'foo', 'fed8bc9d7a6bcf6db04f476d29314f157507d505'), - ('swh', 1, SNAPSHOT, 'gh6959356d30f1a4e9b7f6bca59b9a336464c03d'), + ("foo", 1, CONTENT, "abc8bc9d7a6bcf6db04f476d29314f157507d505"), + ("swh", 2, DIRECTORY, "def8bc9d7a6bcf6db04f476d29314f157507d505"), + ("swh", 1, "foo", "fed8bc9d7a6bcf6db04f476d29314f157507d505"), + ("swh", 1, SNAPSHOT, "gh6959356d30f1a4e9b7f6bca59b9a336464c03d"), ]: with self.assertRaises(ValidationError): PersistentId( namespace=_ns, scheme_version=_version, object_type=_type, - object_id=_id + object_id=_id, ) class OriginIdentifier(unittest.TestCase): def setUp(self): self.origin = { - 'url': 'https://github.com/torvalds/linux', + "url": "https://github.com/torvalds/linux", } def test_content_identifier(self): - self.assertEqual(identifiers.origin_identifier(self.origin), - 'b63a575fe3faab7692c9f38fb09d4bb45651bb0f') + self.assertEqual( + identifiers.origin_identifier(self.origin), + "b63a575fe3faab7692c9f38fb09d4bb45651bb0f", + ) diff --git a/swh/model/tests/test_merkle.py b/swh/model/tests/test_merkle.py index 734f7c0..7e3538b 100644 --- a/swh/model/tests/test_merkle.py +++ b/swh/model/tests/test_merkle.py @@ -1,255 +1,243 @@ # Copyright (C) 2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import unittest from swh.model import merkle class MerkleTestNode(merkle.MerkleNode): - type = 'tested_merkle_node_type' + type = "tested_merkle_node_type" def __init__(self, data): super().__init__(data) self.compute_hash_called = 0 def compute_hash(self): self.compute_hash_called += 1 - child_data = [ - child + b'=' + self[child].hash - for child in sorted(self) - ] - - return ( - b'hash(' - + b', '.join([self.data['value']] + child_data) - + b')' - ) + child_data = [child + b"=" + self[child].hash for child in sorted(self)] + + return b"hash(" + b", ".join([self.data["value"]] + child_data) + b")" class MerkleTestLeaf(merkle.MerkleLeaf): - type = 'tested_merkle_leaf_type' + type = "tested_merkle_leaf_type" def __init__(self, data): super().__init__(data) self.compute_hash_called = 0 def compute_hash(self): self.compute_hash_called += 1 - return b'hash(' + self.data['value'] + b')' + return b"hash(" + self.data["value"] + b")" class TestMerkleLeaf(unittest.TestCase): def setUp(self): - self.data = {'value': b'value'} + self.data = {"value": b"value"} self.instance = MerkleTestLeaf(self.data) def test_equality(self): leaf1 = MerkleTestLeaf(self.data) leaf2 = MerkleTestLeaf(self.data) leaf3 = MerkleTestLeaf({}) self.assertEqual(leaf1, leaf2) self.assertNotEqual(leaf1, leaf3) def test_hash(self): self.assertEqual(self.instance.compute_hash_called, 0) instance_hash = self.instance.hash self.assertEqual(self.instance.compute_hash_called, 1) instance_hash2 = self.instance.hash self.assertEqual(self.instance.compute_hash_called, 1) self.assertEqual(instance_hash, instance_hash2) def test_data(self): self.assertEqual(self.instance.get_data(), self.data) def test_collect(self): collected = self.instance.collect() self.assertEqual( - collected, { - self.instance.type: { - self.instance.hash: self.instance.get_data(), - }, - }, + collected, + {self.instance.type: {self.instance.hash: self.instance.get_data(),},}, ) collected2 = self.instance.collect() self.assertEqual(collected2, {}) self.instance.reset_collect() collected3 = self.instance.collect() self.assertEqual(collected, collected3) def test_leaf(self): - with self.assertRaisesRegex(ValueError, 'is a leaf'): - self.instance[b'key1'] = 'Test' + with self.assertRaisesRegex(ValueError, "is a leaf"): + self.instance[b"key1"] = "Test" - with self.assertRaisesRegex(ValueError, 'is a leaf'): - del self.instance[b'key1'] + with self.assertRaisesRegex(ValueError, "is a leaf"): + del self.instance[b"key1"] - with self.assertRaisesRegex(ValueError, 'is a leaf'): - self.instance[b'key1'] + with self.assertRaisesRegex(ValueError, "is a leaf"): + self.instance[b"key1"] - with self.assertRaisesRegex(ValueError, 'is a leaf'): + with self.assertRaisesRegex(ValueError, "is a leaf"): self.instance.update(self.data) class TestMerkleNode(unittest.TestCase): maxDiff = None def setUp(self): - self.root = MerkleTestNode({'value': b'root'}) - self.nodes = {b'root': self.root} - for i in (b'a', b'b', b'c'): - value = b'root/' + i - node = MerkleTestNode({ - 'value': value, - }) + self.root = MerkleTestNode({"value": b"root"}) + self.nodes = {b"root": self.root} + for i in (b"a", b"b", b"c"): + value = b"root/" + i + node = MerkleTestNode({"value": value,}) self.root[i] = node self.nodes[value] = node - for j in (b'a', b'b', b'c'): - value2 = value + b'/' + j - node2 = MerkleTestNode({ - 'value': value2, - }) + for j in (b"a", b"b", b"c"): + value2 = value + b"/" + j + node2 = MerkleTestNode({"value": value2,}) node[j] = node2 self.nodes[value2] = node2 - for k in (b'a', b'b', b'c'): - value3 = value2 + b'/' + j - node3 = MerkleTestNode({ - 'value': value3, - }) + for k in (b"a", b"b", b"c"): + value3 = value2 + b"/" + j + node3 = MerkleTestNode({"value": value3,}) node2[j] = node3 self.nodes[value3] = node3 def test_equality(self): - node1 = merkle.MerkleNode({'foo': b'bar'}) - node2 = merkle.MerkleNode({'foo': b'bar'}) + node1 = merkle.MerkleNode({"foo": b"bar"}) + node2 = merkle.MerkleNode({"foo": b"bar"}) node3 = merkle.MerkleNode({}) self.assertEqual(node1, node2) self.assertNotEqual(node1, node3, node1 == node3) - node1['foo'] = node3 + node1["foo"] = node3 self.assertNotEqual(node1, node2) - node2['foo'] = node3 + node2["foo"] = node3 self.assertEqual(node1, node2) def test_hash(self): for node in self.nodes.values(): self.assertEqual(node.compute_hash_called, 0) # Root hash will compute hash for all the nodes hash = self.root.hash for node in self.nodes.values(): self.assertEqual(node.compute_hash_called, 1) - self.assertIn(node.data['value'], hash) + self.assertIn(node.data["value"], hash) # Should use the cached value hash2 = self.root.hash self.assertEqual(hash, hash2) for node in self.nodes.values(): self.assertEqual(node.compute_hash_called, 1) # Should still use the cached value hash3 = self.root.update_hash(force=False) self.assertEqual(hash, hash3) for node in self.nodes.values(): self.assertEqual(node.compute_hash_called, 1) # Force update of the cached value for a deeply nested node - self.root[b'a'][b'b'].update_hash(force=True) + self.root[b"a"][b"b"].update_hash(force=True) for key, node in self.nodes.items(): # update_hash rehashes all children - if key.startswith(b'root/a/b'): + if key.startswith(b"root/a/b"): self.assertEqual(node.compute_hash_called, 2) else: self.assertEqual(node.compute_hash_called, 1) hash4 = self.root.hash self.assertEqual(hash, hash4) for key, node in self.nodes.items(): # update_hash also invalidates all parents - if key in (b'root', b'root/a') or key.startswith(b'root/a/b'): + if key in (b"root", b"root/a") or key.startswith(b"root/a/b"): self.assertEqual(node.compute_hash_called, 2) else: self.assertEqual(node.compute_hash_called, 1) def test_collect(self): collected = self.root.collect() self.assertEqual(len(collected[self.root.type]), len(self.nodes)) for node in self.nodes.values(): self.assertTrue(node.collected) collected2 = self.root.collect() self.assertEqual(collected2, {}) def test_iter_tree(self): nodes = list(self.root.iter_tree()) self.assertCountEqual(nodes, self.nodes.values()) def test_get(self): - for key in (b'a', b'b', b'c'): - self.assertEqual(self.root[key], self.nodes[b'root/' + key]) + for key in (b"a", b"b", b"c"): + self.assertEqual(self.root[key], self.nodes[b"root/" + key]) with self.assertRaisesRegex(KeyError, "b'nonexistent'"): - self.root[b'nonexistent'] + self.root[b"nonexistent"] def test_del(self): hash_root = self.root.hash - hash_a = self.nodes[b'root/a'].hash - del self.root[b'a'][b'c'] + hash_a = self.nodes[b"root/a"].hash + del self.root[b"a"][b"c"] hash_root2 = self.root.hash - hash_a2 = self.nodes[b'root/a'].hash + hash_a2 = self.nodes[b"root/a"].hash self.assertNotEqual(hash_root, hash_root2) self.assertNotEqual(hash_a, hash_a2) - self.assertEqual(self.nodes[b'root/a/c'].parents, []) + self.assertEqual(self.nodes[b"root/a/c"].parents, []) with self.assertRaisesRegex(KeyError, "b'nonexistent'"): - del self.root[b'nonexistent'] + del self.root[b"nonexistent"] def test_update(self): hash_root = self.root.hash - hash_b = self.root[b'b'].hash + hash_b = self.root[b"b"].hash new_children = { - b'c': MerkleTestNode({'value': b'root/b/new_c'}), - b'd': MerkleTestNode({'value': b'root/b/d'}), + b"c": MerkleTestNode({"value": b"root/b/new_c"}), + b"d": MerkleTestNode({"value": b"root/b/d"}), } # collect all nodes self.root.collect() - self.root[b'b'].update(new_children) + self.root[b"b"].update(new_children) # Ensure everyone got reparented - self.assertEqual(new_children[b'c'].parents, [self.root[b'b']]) - self.assertEqual(new_children[b'd'].parents, [self.root[b'b']]) - self.assertEqual(self.nodes[b'root/b/c'].parents, []) + self.assertEqual(new_children[b"c"].parents, [self.root[b"b"]]) + self.assertEqual(new_children[b"d"].parents, [self.root[b"b"]]) + self.assertEqual(self.nodes[b"root/b/c"].parents, []) hash_root2 = self.root.hash self.assertNotEqual(hash_root, hash_root2) - self.assertIn(b'root/b/new_c', hash_root2) - self.assertIn(b'root/b/d', hash_root2) + self.assertIn(b"root/b/new_c", hash_root2) + self.assertIn(b"root/b/d", hash_root2) - hash_b2 = self.root[b'b'].hash + hash_b2 = self.root[b"b"].hash self.assertNotEqual(hash_b, hash_b2) for key, node in self.nodes.items(): - if key in (b'root', b'root/b'): + if key in (b"root", b"root/b"): self.assertEqual(node.compute_hash_called, 2) else: self.assertEqual(node.compute_hash_called, 1) # Ensure we collected root, root/b, and both new children collected_after_update = self.root.collect() self.assertCountEqual( collected_after_update[MerkleTestNode.type], - [self.nodes[b'root'].hash, self.nodes[b'root/b'].hash, - new_children[b'c'].hash, new_children[b'd'].hash], + [ + self.nodes[b"root"].hash, + self.nodes[b"root/b"].hash, + new_children[b"c"].hash, + new_children[b"d"].hash, + ], ) # test that noop updates doesn't invalidate anything - self.root[b'a'][b'b'].update({}) + self.root[b"a"][b"b"].update({}) self.assertEqual(self.root.collect(), {}) diff --git a/swh/model/tests/test_model.py b/swh/model/tests/test_model.py index f6164ef..b65f75d 100644 --- a/swh/model/tests/test_model.py +++ b/swh/model/tests/test_model.py @@ -1,480 +1,439 @@ # Copyright (C) 2019-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import copy import datetime import attr from attrs_strict import AttributeTypeError from hypothesis import given from hypothesis.strategies import binary import pytest from swh.model.model import ( - Content, SkippedContent, Directory, Revision, Release, Snapshot, - Origin, Timestamp, TimestampWithTimezone, - MissingData, Person + Content, + SkippedContent, + Directory, + Revision, + Release, + Snapshot, + Origin, + Timestamp, + TimestampWithTimezone, + MissingData, + Person, ) from swh.model.hashutil import hash_to_bytes, MultiHash import swh.model.hypothesis_strategies as strategies from swh.model.identifiers import ( - directory_identifier, revision_identifier, release_identifier, - snapshot_identifier + directory_identifier, + revision_identifier, + release_identifier, + snapshot_identifier, ) from swh.model.tests.test_identifiers import ( - directory_example, revision_example, release_example, snapshot_example + directory_example, + revision_example, + release_example, + snapshot_example, ) @given(strategies.objects()) def test_todict_inverse_fromdict(objtype_and_obj): (obj_type, obj) = objtype_and_obj - if obj_type in ('origin', 'origin_visit'): + if obj_type in ("origin", "origin_visit"): return obj_as_dict = obj.to_dict() obj_as_dict_copy = copy.deepcopy(obj_as_dict) # Check the composition of to_dict and from_dict is the identity assert obj == type(obj).from_dict(obj_as_dict) # Check from_dict() does not change the input dict assert obj_as_dict == obj_as_dict_copy # Check the composition of from_dict and to_dict is the identity assert obj_as_dict == type(obj).from_dict(obj_as_dict).to_dict() @given(strategies.origins()) def test_todict_origins(origin): obj = origin.to_dict() - assert 'type' not in obj + assert "type" not in obj assert type(origin)(url=origin.url) == type(origin).from_dict(obj) @given(strategies.origin_visits()) def test_todict_origin_visits(origin_visit): obj = origin_visit.to_dict() assert origin_visit == type(origin_visit).from_dict(obj) @given(strategies.origin_visit_updates()) def test_todict_origin_visit_updates(origin_visit_update): obj = origin_visit_update.to_dict() assert origin_visit_update == type(origin_visit_update).from_dict(obj) # Timestamp + @given(strategies.timestamps()) def test_timestamps_strategy(timestamp): attr.validate(timestamp) def test_timestamp_seconds(): attr.validate(Timestamp(seconds=0, microseconds=0)) with pytest.raises(AttributeTypeError): - Timestamp(seconds='0', microseconds=0) + Timestamp(seconds="0", microseconds=0) - attr.validate(Timestamp(seconds=2**63-1, microseconds=0)) + attr.validate(Timestamp(seconds=2 ** 63 - 1, microseconds=0)) with pytest.raises(ValueError): - Timestamp(seconds=2**63, microseconds=0) + Timestamp(seconds=2 ** 63, microseconds=0) - attr.validate(Timestamp(seconds=-2**63, microseconds=0)) + attr.validate(Timestamp(seconds=-(2 ** 63), microseconds=0)) with pytest.raises(ValueError): - Timestamp(seconds=-2**63-1, microseconds=0) + Timestamp(seconds=-(2 ** 63) - 1, microseconds=0) def test_timestamp_microseconds(): attr.validate(Timestamp(seconds=0, microseconds=0)) with pytest.raises(AttributeTypeError): - Timestamp(seconds=0, microseconds='0') + Timestamp(seconds=0, microseconds="0") - attr.validate(Timestamp(seconds=0, microseconds=10**6-1)) + attr.validate(Timestamp(seconds=0, microseconds=10 ** 6 - 1)) with pytest.raises(ValueError): - Timestamp(seconds=0, microseconds=10**6) + Timestamp(seconds=0, microseconds=10 ** 6) with pytest.raises(ValueError): Timestamp(seconds=0, microseconds=-1) def test_timestamp_from_dict(): - assert Timestamp.from_dict({'seconds': 10, 'microseconds': 5}) + assert Timestamp.from_dict({"seconds": 10, "microseconds": 5}) with pytest.raises(AttributeTypeError): - Timestamp.from_dict({'seconds': '10', 'microseconds': 5}) + Timestamp.from_dict({"seconds": "10", "microseconds": 5}) with pytest.raises(AttributeTypeError): - Timestamp.from_dict({'seconds': 10, 'microseconds': '5'}) + Timestamp.from_dict({"seconds": 10, "microseconds": "5"}) with pytest.raises(ValueError): - Timestamp.from_dict({'seconds': 0, 'microseconds': -1}) + Timestamp.from_dict({"seconds": 0, "microseconds": -1}) - Timestamp.from_dict({'seconds': 0, 'microseconds': 10**6 - 1}) + Timestamp.from_dict({"seconds": 0, "microseconds": 10 ** 6 - 1}) with pytest.raises(ValueError): - Timestamp.from_dict({'seconds': 0, 'microseconds': 10**6}) + Timestamp.from_dict({"seconds": 0, "microseconds": 10 ** 6}) # TimestampWithTimezone + def test_timestampwithtimezone(): ts = Timestamp(seconds=0, microseconds=0) - tstz = TimestampWithTimezone( - timestamp=ts, - offset=0, - negative_utc=False) + tstz = TimestampWithTimezone(timestamp=ts, offset=0, negative_utc=False) attr.validate(tstz) assert tstz.negative_utc is False - attr.validate(TimestampWithTimezone( - timestamp=ts, - offset=10, - negative_utc=False)) + attr.validate(TimestampWithTimezone(timestamp=ts, offset=10, negative_utc=False)) - attr.validate(TimestampWithTimezone( - timestamp=ts, - offset=-10, - negative_utc=False)) + attr.validate(TimestampWithTimezone(timestamp=ts, offset=-10, negative_utc=False)) - tstz = TimestampWithTimezone( - timestamp=ts, - offset=0, - negative_utc=True) + tstz = TimestampWithTimezone(timestamp=ts, offset=0, negative_utc=True) attr.validate(tstz) assert tstz.negative_utc is True with pytest.raises(AttributeTypeError): TimestampWithTimezone( - timestamp=datetime.datetime.now(), - offset=0, - negative_utc=False) + timestamp=datetime.datetime.now(), offset=0, negative_utc=False + ) with pytest.raises(AttributeTypeError): - TimestampWithTimezone( - timestamp=ts, - offset='0', - negative_utc=False) + TimestampWithTimezone(timestamp=ts, offset="0", negative_utc=False) with pytest.raises(AttributeTypeError): - TimestampWithTimezone( - timestamp=ts, - offset=1.0, - negative_utc=False) + TimestampWithTimezone(timestamp=ts, offset=1.0, negative_utc=False) with pytest.raises(AttributeTypeError): - TimestampWithTimezone( - timestamp=ts, - offset=1, - negative_utc=0) + TimestampWithTimezone(timestamp=ts, offset=1, negative_utc=0) with pytest.raises(ValueError): - TimestampWithTimezone( - timestamp=ts, - offset=1, - negative_utc=True) + TimestampWithTimezone(timestamp=ts, offset=1, negative_utc=True) with pytest.raises(ValueError): - TimestampWithTimezone( - timestamp=ts, - offset=-1, - negative_utc=True) + TimestampWithTimezone(timestamp=ts, offset=-1, negative_utc=True) def test_timestampwithtimezone_from_datetime(): tz = datetime.timezone(datetime.timedelta(minutes=+60)) - date = datetime.datetime( - 2020, 2, 27, 14, 39, 19, tzinfo=tz) + date = datetime.datetime(2020, 2, 27, 14, 39, 19, tzinfo=tz) tstz = TimestampWithTimezone.from_datetime(date) assert tstz == TimestampWithTimezone( - timestamp=Timestamp( - seconds=1582810759, - microseconds=0, - ), + timestamp=Timestamp(seconds=1582810759, microseconds=0,), offset=60, negative_utc=False, ) def test_timestampwithtimezone_from_iso8601(): - date = '2020-02-27 14:39:19.123456+0100' + date = "2020-02-27 14:39:19.123456+0100" tstz = TimestampWithTimezone.from_iso8601(date) assert tstz == TimestampWithTimezone( - timestamp=Timestamp( - seconds=1582810759, - microseconds=123456, - ), + timestamp=Timestamp(seconds=1582810759, microseconds=123456,), offset=60, negative_utc=False, ) def test_timestampwithtimezone_from_iso8601_negative_utc(): - date = '2020-02-27 13:39:19-0000' + date = "2020-02-27 13:39:19-0000" tstz = TimestampWithTimezone.from_iso8601(date) assert tstz == TimestampWithTimezone( - timestamp=Timestamp( - seconds=1582810759, - microseconds=0, - ), + timestamp=Timestamp(seconds=1582810759, microseconds=0,), offset=0, negative_utc=True, ) def test_person_from_fullname(): """The author should have name, email and fullname filled. """ - actual_person = Person.from_fullname(b'tony ') + actual_person = Person.from_fullname(b"tony ") assert actual_person == Person( - fullname=b'tony ', - name=b'tony', - email=b'ynot@dagobah', + fullname=b"tony ", name=b"tony", email=b"ynot@dagobah", ) def test_person_from_fullname_no_email(): """The author and fullname should be the same as the input (author). """ - actual_person = Person.from_fullname(b'tony') - assert actual_person == Person( - fullname=b'tony', - name=b'tony', - email=None, - ) + actual_person = Person.from_fullname(b"tony") + assert actual_person == Person(fullname=b"tony", name=b"tony", email=None,) def test_person_from_fullname_empty_person(): """Empty person has only its fullname filled with the empty byte-string. """ - actual_person = Person.from_fullname(b'') - assert actual_person == Person( - fullname=b'', - name=None, - email=None, - ) + actual_person = Person.from_fullname(b"") + assert actual_person == Person(fullname=b"", name=None, email=None,) def test_git_author_line_to_author(): # edge case out of the way with pytest.raises(TypeError): Person.from_fullname(None) tests = { - b'a ': Person( - name=b'a', - email=b'b@c.com', - fullname=b'a ', + b"a ": Person(name=b"a", email=b"b@c.com", fullname=b"a ",), + b"": Person( + name=None, email=b"foo@bar.com", fullname=b"", ), - b'': Person( - name=None, - email=b'foo@bar.com', - fullname=b'', - ), - b'malformed ': Person( - name=b'malformed', + name=b"malformed", email=b'"', ), - b'trailing ': Person( - name=b'trailing', - email=b'sp@c.e', - fullname=b'trailing ', - ), - b'no': Person( - name=b'no', - email=b'sp@c.e', - fullname=b'no', - ), - b' more ': Person( - name=b'more', - email=b'sp@c.es', - fullname=b' more ', + b"trailing ": Person( + name=b"trailing", email=b"sp@c.e", fullname=b"trailing ", ), - b' <>': Person( - name=None, - email=None, - fullname=b' <>', + b"no": Person(name=b"no", email=b"sp@c.e", fullname=b"no",), + b" more ": Person( + name=b"more", email=b"sp@c.es", fullname=b" more ", ), + b" <>": Person(name=None, email=None, fullname=b" <>",), } for person in sorted(tests): expected_person = tests[person] assert expected_person == Person.from_fullname(person) # Content + def test_content_get_hash(): - hashes = dict( - sha1=b'foo', sha1_git=b'bar', sha256=b'baz', blake2s256=b'qux') - c = Content(length=42, status='visible', **hashes) + hashes = dict(sha1=b"foo", sha1_git=b"bar", sha256=b"baz", blake2s256=b"qux") + c = Content(length=42, status="visible", **hashes) for (hash_name, hash_) in hashes.items(): assert c.get_hash(hash_name) == hash_ def test_content_hashes(): - hashes = dict( - sha1=b'foo', sha1_git=b'bar', sha256=b'baz', blake2s256=b'qux') - c = Content(length=42, status='visible', **hashes) + hashes = dict(sha1=b"foo", sha1_git=b"bar", sha256=b"baz", blake2s256=b"qux") + c = Content(length=42, status="visible", **hashes) assert c.hashes() == hashes def test_content_data(): c = Content( - length=42, status='visible', data=b'foo', - sha1=b'foo', sha1_git=b'bar', sha256=b'baz', blake2s256=b'qux') + length=42, + status="visible", + data=b"foo", + sha1=b"foo", + sha1_git=b"bar", + sha256=b"baz", + blake2s256=b"qux", + ) assert c.with_data() == c def test_content_data_missing(): c = Content( - length=42, status='visible', - sha1=b'foo', sha1_git=b'bar', sha256=b'baz', blake2s256=b'qux') + length=42, + status="visible", + sha1=b"foo", + sha1_git=b"bar", + sha256=b"baz", + blake2s256=b"qux", + ) with pytest.raises(MissingData): c.with_data() @given(strategies.present_contents_d()) def test_content_from_dict(content_d): c = Content.from_data(**content_d) assert c - assert c.ctime == content_d['ctime'] + assert c.ctime == content_d["ctime"] content_d2 = c.to_dict() c2 = Content.from_dict(content_d2) assert c2.ctime == c.ctime def test_content_from_dict_str_ctime(): # test with ctime as a string n = datetime.datetime(2020, 5, 6, 12, 34) content_d = { - 'ctime': n.isoformat(), - 'data': b'', - 'length': 0, - 'sha1': b'\x00', - 'sha256': b'\x00', - 'sha1_git': b'\x00', - 'blake2s256': b'\x00', - } + "ctime": n.isoformat(), + "data": b"", + "length": 0, + "sha1": b"\x00", + "sha256": b"\x00", + "sha1_git": b"\x00", + "blake2s256": b"\x00", + } c = Content.from_dict(content_d) assert c.ctime == n @given(binary(max_size=4096)) def test_content_from_data(data): c = Content.from_data(data) assert c.data == data assert c.length == len(data) - assert c.status == 'visible' + assert c.status == "visible" for key, value in MultiHash.from_data(data).digest().items(): assert getattr(c, key) == value @given(binary(max_size=4096)) def test_hidden_content_from_data(data): - c = Content.from_data(data, status='hidden') + c = Content.from_data(data, status="hidden") assert c.data == data assert c.length == len(data) - assert c.status == 'hidden' + assert c.status == "hidden" for key, value in MultiHash.from_data(data).digest().items(): assert getattr(c, key) == value # SkippedContent + @given(binary(max_size=4096)) def test_skipped_content_from_data(data): - c = SkippedContent.from_data(data, reason='reason') - assert c.reason == 'reason' + c = SkippedContent.from_data(data, reason="reason") + assert c.reason == "reason" assert c.length == len(data) - assert c.status == 'absent' + assert c.status == "absent" for key, value in MultiHash.from_data(data).digest().items(): assert getattr(c, key) == value @given(strategies.skipped_contents_d()) def test_skipped_content_origin_is_str(skipped_content_d): assert SkippedContent.from_dict(skipped_content_d) - skipped_content_d['origin'] = 'http://path/to/origin' + skipped_content_d["origin"] = "http://path/to/origin" assert SkippedContent.from_dict(skipped_content_d) - skipped_content_d['origin'] = Origin(url='http://path/to/origin') - with pytest.raises(ValueError, match='origin'): + skipped_content_d["origin"] = Origin(url="http://path/to/origin") + with pytest.raises(ValueError, match="origin"): SkippedContent.from_dict(skipped_content_d) # ID computation + def test_directory_model_id_computation(): dir_dict = directory_example.copy() - del dir_dict['id'] + del dir_dict["id"] dir_id = hash_to_bytes(directory_identifier(dir_dict)) dir_model = Directory.from_dict(dir_dict) assert dir_model.id == dir_id def test_revision_model_id_computation(): rev_dict = revision_example.copy() - del rev_dict['id'] + del rev_dict["id"] rev_id = hash_to_bytes(revision_identifier(rev_dict)) rev_model = Revision.from_dict(rev_dict) assert rev_model.id == rev_id def test_revision_model_id_computation_with_no_date(): """We can have revision with date to None """ rev_dict = revision_example.copy() - rev_dict['date'] = None - rev_dict['committer_date'] = None - del rev_dict['id'] + rev_dict["date"] = None + rev_dict["committer_date"] = None + del rev_dict["id"] rev_id = hash_to_bytes(revision_identifier(rev_dict)) rev_model = Revision.from_dict(rev_dict) assert rev_model.date is None assert rev_model.committer_date is None assert rev_model.id == rev_id def test_release_model_id_computation(): rel_dict = release_example.copy() - del rel_dict['id'] + del rel_dict["id"] rel_id = hash_to_bytes(release_identifier(rel_dict)) rel_model = Release.from_dict(rel_dict) assert isinstance(rel_model.date, TimestampWithTimezone) assert rel_model.id == hash_to_bytes(rel_id) def test_snapshot_model_id_computation(): snp_dict = snapshot_example.copy() - del snp_dict['id'] + del snp_dict["id"] snp_id = hash_to_bytes(snapshot_identifier(snp_dict)) snp_model = Snapshot.from_dict(snp_dict) assert snp_model.id == snp_id diff --git a/swh/model/tests/test_toposort.py b/swh/model/tests/test_toposort.py index 174368f..d1f841d 100644 --- a/swh/model/tests/test_toposort.py +++ b/swh/model/tests/test_toposort.py @@ -1,100 +1,83 @@ # Copyright (C) 2017-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import unittest from swh.model.toposort import toposort def is_toposorted_slow(revision_log): """Check (inefficiently) that the given revision log is in any topological order. Complexity: O(n^2). (Note: It's totally possible to write a O(n) is_toposorted function, but it requires computing the transitive closure of the input DAG, which requires computing a topological ordering of that DAG, which kind of defeats the purpose of writing unit tests for toposort().) Args: revision_log: Revision log as returned by swh.storage.Storage.revision_log(). Returns: True if the revision log is topologically sorted. """ - rev_by_id = {r['id']: r for r in revision_log} + rev_by_id = {r["id"]: r for r in revision_log} def all_parents(revision): - for parent in revision['parents']: + for parent in revision["parents"]: yield parent yield from all_parents(rev_by_id[parent]) visited = set() for rev in revision_log: - visited.add(rev['id']) + visited.add(rev["id"]) if not all(parent in visited for parent in all_parents(rev)): return False return True class TestToposort(unittest.TestCase): def generate_log(self, graph): for node_id, parents in graph.items(): - yield {'id': node_id, 'parents': tuple(parents)} + yield {"id": node_id, "parents": tuple(parents)} def unordered_log(self, log): - return {(d['id'], tuple(d['parents'])) for d in log} + return {(d["id"], tuple(d["parents"])) for d in log} def check(self, graph): log = list(self.generate_log(graph)) topolog = list(toposort(log)) self.assertEqual(len(topolog), len(graph)) self.assertEqual(self.unordered_log(topolog), self.unordered_log(log)) self.assertTrue(is_toposorted_slow(toposort(log))) def test_linked_list(self): - self.check({3: [2], - 2: [1], - 1: []}) + self.check({3: [2], 2: [1], 1: []}) def test_fork(self): - self.check({7: [6], - 6: [4], - 5: [3], - 4: [2], - 3: [2], - 2: [1], - 1: []}) + self.check({7: [6], 6: [4], 5: [3], 4: [2], 3: [2], 2: [1], 1: []}) def test_fork_merge(self): - self.check({8: [7, 5], - 7: [6], - 6: [4], - 5: [3], - 4: [2], - 3: [2], - 2: [1], - 1: []}) + self.check({8: [7, 5], 7: [6], 6: [4], 5: [3], 4: [2], 3: [2], 2: [1], 1: []}) def test_two_origins(self): - self.check({9: [8], - 8: [7, 5], - 7: [6], - 6: [4], - 5: [3], - 4: [], - 3: []}) + self.check({9: [8], 8: [7, 5], 7: [6], 6: [4], 5: [3], 4: [], 3: []}) def test_three_way(self): - self.check({9: [8, 4, 2], - 8: [7, 5], - 7: [6], - 6: [4], - 5: [3], - 4: [2], - 3: [2], - 2: [1], - 1: []}) + self.check( + { + 9: [8, 4, 2], + 8: [7, 5], + 7: [6], + 6: [4], + 5: [3], + 4: [2], + 3: [2], + 2: [1], + 1: [], + } + ) diff --git a/swh/model/tests/test_validators.py b/swh/model/tests/test_validators.py index 691c579..784e6fe 100644 --- a/swh/model/tests/test_validators.py +++ b/swh/model/tests/test_validators.py @@ -1,75 +1,78 @@ # Copyright (C) 2015 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime import unittest from swh.model import exceptions, hashutil, validators def hash_data(raw_content): return hashutil.MultiHash.from_data(raw_content).digest() class TestValidators(unittest.TestCase): def setUp(self): self.valid_visible_content = { - 'status': 'visible', - 'length': 5, - 'data': b'1984\n', - 'ctime': datetime.datetime(2015, 11, 22, 16, 33, 56, - tzinfo=datetime.timezone.utc), + "status": "visible", + "length": 5, + "data": b"1984\n", + "ctime": datetime.datetime( + 2015, 11, 22, 16, 33, 56, tzinfo=datetime.timezone.utc + ), } - self.valid_visible_content.update( - hash_data(self.valid_visible_content['data'])) + self.valid_visible_content.update(hash_data(self.valid_visible_content["data"])) self.valid_absent_content = { - 'status': 'absent', - 'length': 5, - 'ctime': datetime.datetime(2015, 11, 22, 16, 33, 56, - tzinfo=datetime.timezone.utc), - 'reason': 'Content too large', - 'sha1_git': self.valid_visible_content['sha1_git'], - 'origin': 42, + "status": "absent", + "length": 5, + "ctime": datetime.datetime( + 2015, 11, 22, 16, 33, 56, tzinfo=datetime.timezone.utc + ), + "reason": "Content too large", + "sha1_git": self.valid_visible_content["sha1_git"], + "origin": 42, } self.invalid_content_hash_mismatch = self.valid_visible_content.copy() self.invalid_content_hash_mismatch.update( - hash_data(b"this is not the data you're looking for")) + hash_data(b"this is not the data you're looking for") + ) def test_validate_content(self): - self.assertTrue( - validators.validate_content(self.valid_visible_content)) + self.assertTrue(validators.validate_content(self.valid_visible_content)) - self.assertTrue( - validators.validate_content(self.valid_absent_content)) + self.assertTrue(validators.validate_content(self.valid_absent_content)) def test_validate_content_hash_mismatch(self): with self.assertRaises(exceptions.ValidationError) as cm: validators.validate_content(self.invalid_content_hash_mismatch) # All the hashes are wrong. The exception should be of the form: # ValidationError({ # NON_FIELD_ERRORS: [ # ValidationError('content-hash-mismatch', 'sha1'), # ValidationError('content-hash-mismatch', 'sha1_git'), # ValidationError('content-hash-mismatch', 'sha256'), # ] # }) exc = cm.exception self.assertIsInstance(str(exc), str) - self.assertEqual(set(exc.error_dict.keys()), - {exceptions.NON_FIELD_ERRORS}) + self.assertEqual(set(exc.error_dict.keys()), {exceptions.NON_FIELD_ERRORS}) hash_mismatches = exc.error_dict[exceptions.NON_FIELD_ERRORS] self.assertIsInstance(hash_mismatches, list) self.assertEqual(len(hash_mismatches), 4) - self.assertTrue(all(mismatch.code == 'content-hash-mismatch' - for mismatch in hash_mismatches)) - self.assertEqual(set(mismatch.params['hash'] - for mismatch in hash_mismatches), - {'sha1', 'sha1_git', 'sha256', 'blake2s256'}) + self.assertTrue( + all( + mismatch.code == "content-hash-mismatch" for mismatch in hash_mismatches + ) + ) + self.assertEqual( + set(mismatch.params["hash"] for mismatch in hash_mismatches), + {"sha1", "sha1_git", "sha256", "blake2s256"}, + ) diff --git a/swh/model/toposort.py b/swh/model/toposort.py index b0a7231..6e4cba7 100644 --- a/swh/model/toposort.py +++ b/swh/model/toposort.py @@ -1,43 +1,43 @@ # Copyright (C) 2017-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import collections def toposort(revision_log): """Perform a topological sort on a revision log graph. Complexity: O(N) (linear in the length of the revision log) Args: revision_log: Revision log as returned by swh.storage.Storage.revision_log(). Yields: The revision log sorted by a topological order """ in_degree = {} # rev_id -> numbers of parents left to compute children = collections.defaultdict(list) # rev_id -> children # Compute the in_degrees and the parents of all the revisions. # Add the roots to the processing queue. queue = collections.deque() for rev in revision_log: - parents = rev['parents'] - in_degree[rev['id']] = len(parents) + parents = rev["parents"] + in_degree[rev["id"]] = len(parents) if not parents: queue.append(rev) for parent in parents: children[parent].append(rev) # Topological sort: yield the 'ready' nodes, decrease the in degree of # their children and add the 'ready' ones to the queue. while queue: rev = queue.popleft() yield rev - for child in children[rev['id']]: - in_degree[child['id']] -= 1 - if in_degree[child['id']] == 0: + for child in children[rev["id"]]: + in_degree[child["id"]] -= 1 + if in_degree[child["id"]] == 0: queue.append(child) diff --git a/swh/model/validators.py b/swh/model/validators.py index 6d2c370..6cd7fc1 100644 --- a/swh/model/validators.py +++ b/swh/model/validators.py @@ -1,77 +1,78 @@ # Copyright (C) 2015-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from .exceptions import ValidationError, NON_FIELD_ERRORS from . import fields from .hashutil import MultiHash, hash_to_bytes def validate_content(content): """Validate that a content has the correct schema. Args: a content (dictionary) to validate.""" def validate_content_status(status): - return fields.validate_enum(status, {'absent', 'visible', 'hidden'}) + return fields.validate_enum(status, {"absent", "visible", "hidden"}) def validate_keys(content): - hashes = {'sha1', 'sha1_git', 'sha256'} + hashes = {"sha1", "sha1_git", "sha256"} errors = [] out = True - if content['status'] == 'absent': + if content["status"] == "absent": try: - out = out and fields.validate_all_keys(content, {'reason', - 'origin'}) + out = out and fields.validate_all_keys(content, {"reason", "origin"}) except ValidationError as e: errors.append(e) try: out = out and fields.validate_any_key(content, hashes) except ValidationError as e: errors.append(e) else: try: out = out and fields.validate_all_keys(content, hashes) except ValidationError as e: errors.append(e) if errors: raise ValidationError(errors) return out def validate_hashes(content): errors = [] - if 'data' in content: - hashes = MultiHash.from_data(content['data']).digest() + if "data" in content: + hashes = MultiHash.from_data(content["data"]).digest() for hash_type, computed_hash in hashes.items(): if hash_type not in content: continue content_hash = hash_to_bytes(content[hash_type]) if content_hash != computed_hash: - errors.append(ValidationError( - 'hash mismatch in content for hash %(hash)s', - params={'hash': hash_type}, - code='content-hash-mismatch', - )) + errors.append( + ValidationError( + "hash mismatch in content for hash %(hash)s", + params={"hash": hash_type}, + code="content-hash-mismatch", + ) + ) if errors: raise ValidationError(errors) return True content_schema = { - 'sha1': (False, fields.validate_sha1), - 'sha1_git': (False, fields.validate_sha1_git), - 'sha256': (False, fields.validate_sha256), - 'status': (True, validate_content_status), - 'length': (True, fields.validate_int), - 'ctime': (True, fields.validate_datetime), - 'reason': (False, fields.validate_str), - 'origin': (False, fields.validate_int), - 'data': (False, fields.validate_bytes), + "sha1": (False, fields.validate_sha1), + "sha1_git": (False, fields.validate_sha1_git), + "sha256": (False, fields.validate_sha256), + "status": (True, validate_content_status), + "length": (True, fields.validate_int), + "ctime": (True, fields.validate_datetime), + "reason": (False, fields.validate_str), + "origin": (False, fields.validate_int), + "data": (False, fields.validate_bytes), NON_FIELD_ERRORS: [validate_keys, validate_hashes], } - return fields.validate_against_schema('content', content_schema, content) + return fields.validate_against_schema("content", content_schema, content) diff --git a/tox.ini b/tox.ini index ec89c85..f0adbaf 100644 --- a/tox.ini +++ b/tox.ini @@ -1,27 +1,34 @@ [tox] -envlist=flake8,mypy,py3 +envlist=black,flake8,mypy,py3 [testenv] extras = testing deps = pytest-cov commands = pytest --cov={envsitepackagesdir}/swh/model \ {envsitepackagesdir}/swh/model \ --cov-branch {posargs} +[testenv:black] +skip_install = true +deps = + black +commands = + {envpython} -m black --check swh + [testenv:flake8] skip_install = true deps = flake8 commands = {envpython} -m flake8 [testenv:mypy] extras = testing deps = mypy commands = mypy swh