diff --git a/.gitignore b/.gitignore index 303d302..43b2d61 100644 --- a/.gitignore +++ b/.gitignore @@ -1,9 +1,13 @@ -*.pyc -*.sw? *~ +build .coverage +dist +*.egg-info/ .eggs/ +.hypothesis +*.pyc __pycache__ -*.egg-info/ -dist +.pytest_cache +*.sw? +.tox version.txt diff --git a/MANIFEST.in b/MANIFEST.in index e7c46fc..99c8c1a 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,4 +1,6 @@ +include README.md include Makefile include requirements.txt include requirements-swh.txt include version.txt +recursive-include swh/model/tests/data *.tgz diff --git a/PKG-INFO b/PKG-INFO index 653b8da..b07e4b8 100644 --- a/PKG-INFO +++ b/PKG-INFO @@ -1,10 +1,38 @@ -Metadata-Version: 1.0 +Metadata-Version: 2.1 Name: swh.model -Version: 0.0.27 +Version: 0.0.28 Summary: Software Heritage data model Home-page: https://forge.softwareheritage.org/diffusion/DMOD/ Author: Software Heritage developers Author-email: swh-devel@inria.fr License: UNKNOWN -Description: UNKNOWN +Project-URL: Bug Reports, https://forge.softwareheritage.org/maniphest +Project-URL: Funding, https://www.softwareheritage.org/donate +Project-URL: Source, https://forge.softwareheritage.org/source/swh-model +Description: swh-model + ========= + + Implementation of the Data model of the Software Heritage project, used to + archive source code artifacts. + + This module defines the notion of Persistent Identifier (PID) and provides + tools to compute them: + + ```sh + $ swh-identify fork.c kmod.c sched/deadline.c + swh:1:cnt:2e391c754ae730bd2d8520c2ab497c403220c6e3 fork.c + swh:1:cnt:0277d1216f80ae1adeed84a686ed34c9b2931fc2 kmod.c + swh:1:cnt:57b939c81bce5d06fa587df8915f05affbe22b82 sched/deadline.c + + $ swh-identify --no-filename /usr/src/linux/kernel/ + swh:1:dir:f9f858a48d663b3809c9e2f336412717496202ab + ``` + Platform: UNKNOWN +Classifier: Programming Language :: Python :: 3 +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3) +Classifier: Operating System :: OS Independent +Classifier: Development Status :: 5 - Production/Stable +Description-Content-Type: text/markdown +Provides-Extra: testing diff --git a/README.md b/README.md new file mode 100644 index 0000000..f26f274 --- /dev/null +++ b/README.md @@ -0,0 +1,18 @@ +swh-model +========= + +Implementation of the Data model of the Software Heritage project, used to +archive source code artifacts. + +This module defines the notion of Persistent Identifier (PID) and provides +tools to compute them: + +```sh + $ swh-identify fork.c kmod.c sched/deadline.c + swh:1:cnt:2e391c754ae730bd2d8520c2ab497c403220c6e3 fork.c + swh:1:cnt:0277d1216f80ae1adeed84a686ed34c9b2931fc2 kmod.c + swh:1:cnt:57b939c81bce5d06fa587df8915f05affbe22b82 sched/deadline.c + + $ swh-identify --no-filename /usr/src/linux/kernel/ + swh:1:dir:f9f858a48d663b3809c9e2f336412717496202ab +``` diff --git a/bin/swh-hashtree b/bin/swh-hashtree new file mode 100755 index 0000000..faf258f --- /dev/null +++ b/bin/swh-hashtree @@ -0,0 +1,58 @@ +#!/usr/bin/env python3 + +# Use sample: +# swh-hashtree --path . --ignore '.svn' --ignore '.git-svn' \ +# --ignore-empty-folders +# 38f8d2c3a951f6b94007896d0981077e48bbd702 + +import click +import os + +from swh.model import from_disk, hashutil + + +def combine_filters(*filters): + """Combine several ignore filters""" + if len(filters) == 0: + return from_disk.accept_all_directories + elif len(filters) == 1: + return filters[0] + + def combined_filter(*args, **kwargs): + return all(filter(*args, **kwargs) for filter in filters) + + return combined_filter + + +@click.command() +@click.option('--path', default='.', + help='Optional path to hash.') +@click.option('--ignore-empty-folder', is_flag=True, default=False, + help='Ignore empty folder.') +@click.option('--ignore', multiple=True, + help='Ignore pattern.') +def main(path, ignore_empty_folder=False, ignore=None): + + filters = [] + if ignore_empty_folder: + filters.append(from_disk.ignore_empty_directories) + if ignore: + filters.append( + from_disk.ignore_named_directories( + [os.fsencode(name) for name in ignore] + ) + ) + + try: + d = from_disk.Directory.from_disk(path=os.fsencode(path), + dir_filter=combine_filters(*filters)) + hash = d.hash + except Exception as e: + print(e) + return + else: + print(hashutil.hash_to_hex(hash)) + + +if __name__ == '__main__': + main() diff --git a/debian/control b/debian/control index 8124280..8e6257b 100644 --- a/debian/control +++ b/debian/control @@ -1,22 +1,22 @@ Source: swh-model Maintainer: Software Heritage developers Section: python Priority: optional Build-Depends: debhelper (>= 9), dh-python (>= 2), python3 (>= 3.5) | python3-pyblake2, python3-all, python3-click, - python3-nose, + python3-pytest, python3-setuptools, python3-vcversioner Standards-Version: 3.9.6 Homepage: https://forge.softwareheritage.org/diffusion/DMOD/ Package: python3-swh.model Architecture: all Depends: ${misc:Depends}, ${python3:Depends} Breaks: python3-swh.loader.core (<< 0.0.16~), python3-swh.loader.dir (<< 0.0.28~), python3-swh.loader.svn (<< 0.0.28~) Description: Software Heritage data model diff --git a/debian/rules b/debian/rules index 9a1760e..ee36e37 100755 --- a/debian/rules +++ b/debian/rules @@ -1,11 +1,11 @@ #!/usr/bin/make -f export PYBUILD_NAME=swh.model -export PYBUILD_TEST_ARGS=--with-doctest -sv -a !db,!fs +export export PYBUILD_TEST_ARGS=-m 'not db and not fs' %: dh $@ --with python3 --buildsystem=pybuild override_dh_install: dh_install rm -v $(CURDIR)/debian/python3-*/usr/lib/python*/dist-packages/swh/__init__.py diff --git a/docs/Makefile b/docs/Makefile index c30c50a..b97c753 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -1 +1,2 @@ include ../../swh-docs/Makefile.sphinx +-include Makefile.local diff --git a/docs/Makefile.local b/docs/Makefile.local new file mode 100644 index 0000000..352ffd3 --- /dev/null +++ b/docs/Makefile.local @@ -0,0 +1,14 @@ +sphinx/html: images +sphinx/clean: clean-images + +images: + make -C images/ +clean-images: + make -C images/ clean + +.PHONY: images clean-images + + +# Local Variables: +# mode: makefile +# End: diff --git a/docs/images/Makefile b/docs/images/Makefile index 2bc794e..ddc859d 100644 --- a/docs/images/Makefile +++ b/docs/images/Makefile @@ -1,25 +1,17 @@ MERKLE_DAG = swh-merkle-dag.pdf swh-merkle-dag.svg BUILD_TARGETS = BUILD_TARGETS += $(MERKLE_DAG) all: $(BUILD_TARGETS) -# dia exporters - -%.eps: %.dia - dia -t eps --export $@ $< - %.svg: %.dia - dia -t svg --export $@ $< - -# generic converters - -%.pdf: %.eps - epstopdf $< + inkscape -l $@ $< +%.pdf: %.dia + inkscape -A $@ $< clean: -rm -f $(BUILD_TARGETS) diff --git a/docs/index.rst b/docs/index.rst index 74756e7..55ab5fd 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,23 +1,26 @@ .. _swh-model: -Software Heritage - Development Documentation -============================================= +Software Heritage - Data model +============================== + +Implementation of the :ref:`data-model` to archive source code artifacts. + .. toctree:: :maxdepth: 2 :caption: Contents: Overview -------- * :ref:`data-model` * :ref:`persistent-identifiers` Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` diff --git a/docs/persistent-identifiers.rst b/docs/persistent-identifiers.rst index 29bf797..89b0365 100644 --- a/docs/persistent-identifiers.rst +++ b/docs/persistent-identifiers.rst @@ -1,193 +1,227 @@ .. _persistent-identifiers: Persistent identifiers ====================== You can point to objects present in the Software Heritage archive by the means of **persistent identifiers** that are guaranteed to remain stable (persistent) over time. Their syntax, meaning, and usage is described below. Note that they are identifiers and not URLs, even though an URL-based resolver for Software Heritage persistent identifiers is also provided. A persistent identifier can point to any software artifact (or "object") available in the Software Heritage archive. Objects come in different types, and most notably: * contents * directories * revisions * releases * snapshots Each object is identified by an intrinsic, type-specific object identifier that is embedded in its persistent identifier as described below. Object identifiers are strong cryptographic hashes computed on the entire set of object properties to form a `Merkle structure `_. See :ref:`data-model` for an overview of object types and how they are linked together. See :py:mod:`swh.model.identifiers` for details on how intrinsic object identifiers are computed. Syntax ------ Syntactically, persistent identifiers are generated by the ```` entry point of the grammar: .. code-block:: bnf ::= "swh" ":" ":" ":" ; ::= "1" ; ::= "snp" (* snapshot *) | "rel" (* release *) | "rev" (* revision *) | "dir" (* directory *) | "cnt" (* content *) ; ::= 40 * ; (* intrinsic object id, as hex-encoded SHA1 *) ::= "0" | "1" | "2" | "3" | "4" | "5" | "6" | "7" | "8" | "9" ::= | "a" | "b" | "c" | "d" | "e" | "f" ; Semantics --------- ``:`` is used as separator between the logical parts of identifiers. The ``swh`` prefix makes explicit that these identifiers are related to *SoftWare Heritage*. ``1`` (````) is the current version of this identifier *scheme*; future editions will use higher version numbers, possibly breaking backward compatibility (but without breaking the resolvability of identifiers that conform to previous versions of the scheme). A persistent identifier points to a single object, whose type is explicitly captured by ````: * ``snp`` identifiers points to **snapshots**, * ``rel`` to **releases**, * ``rev`` to **revisions**, * ``dir`` to **directories**, * ``cnt`` to **contents**. The actual object pointed to is identified by the intrinsic identifier ````, which is a hex-encoded (using lowercase ASCII characters) SHA1 computed on the content and metadata of the object itself, as follows: * for **snapshots**, intrinsic identifiers are computed as per :py:func:`swh.model.identifiers.snapshot_identifier` * for **releases**, as per :py:func:`swh.model.identifiers.release_identifier` * for **revisions**, as per :py:func:`swh.model.identifiers.revision_identifier` * for **directories**, as per :py:func:`swh.model.identifiers.directory_identifier` * for **contents**, the intrinsic identifier is the ``sha1_git`` hash of the multiple hashes returned by :py:func:`swh.model.identifiers.content_identifier`, i.e., the SHA1 of a byte sequence obtained by juxtaposing the ASCII string ``"blob"`` (without quotes), a space, the length of the content as decimal digits, a NULL byte, and the actual content of the file. Git compatibility ~~~~~~~~~~~~~~~~~ Intrinsic object identifiers for contents, directories, revisions, and releases are, at present, compatible with the `Git `_ way of `computing identifiers `_ for its objects. A Software Heritage content identifier will be identical to a Git blob identifier of any file with the same content, a Software Heritage revision identifier will be identical to the corresponding Git commit identifier, etc. This is not the case for snapshot identifiers as Git doesn't have a corresponding object type. Note that Git compatibility is incidental and is not guaranteed to be maintained in future versions of this scheme (or Git). Examples -------- * ``swh:1:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2`` points to the content of a file containing the full text of the GPL3 license * ``swh:1:dir:d198bc9d7a6bcf6db04f476d29314f157507d505`` points to a directory containing the source code of the Darktable photography application as it was at some point on 4 May 2017 * ``swh:1:rev:309cf2674ee7a0749978cf8265ab91a60aea0f7d`` points to a commit in the development history of Darktable, dated 16 January 2017, that added undo/redo supports for masks * ``swh:1:rel:22ece559cc7cc2364edc5e5593d63ae8bd229f9f`` points to Darktable release 2.3.0, dated 24 December 2016 * ``swh:1:snp:c7c108084bc0bf3d81436bf980b46e98bd338453`` points to a snapshot of the entire Darktable Git repository taken on 4 May 2017 from GitHub -Resolution ----------- - -Persistent identifiers can be resolved using the Software Heritage Web -application (see :py:mod:`swh.web`). - -In particular, the root endpoint ``/`` can be given a persistent identifier and -will lead to the browsing page of the corresponding object, like this: -``https://archive.softwareheritage.org/``. For example: - -* ``_ -* ``_ -* ``_ -* ``_ -* ``_ - - Contextual information ====================== It is often useful to complement persistent identifiers with **contextual information** about where the identified object has been found as well as which specific parts of it are of interest. To that end it is possible, via a dedicated syntax, to extend persistent identifiers with the following pieces of information: * the **software origin** where an object has been found/observed * the **line number(s)** of interest, usually within a content object Syntax ------ The full-syntax to complement identifiers with contextual information is given by the ```` entry point of the grammar: .. code-block:: bnf ::= [] [] ::= ";" "lines" "=" ["-" ] ::= ";" "origin" "=" ::= + ::= (* RFC 3986 compliant URLs *) Semantics --------- ``;`` is used as separator between persistent identifiers and additional optional contextual information. Each piece of contextual information is specified as a key/value pair, using ``=`` as a separator. The following piece of contextual information are supported: * line numbers: it is possible to specify a single line number or a line range, separating two numbers with ``-``. Note that line numbers are purely indicative and are not meant to be stable, as in some degenerate cases (e.g., text files which mix different types of line terminators) it is impossible to resolve them unambiguously. * software origin: where a given object has been found or observed in the wild, as the URI that was used by Software Heritage to ingest the object into the archive + + +Resolution +========== + + +Dedicated resolvers +------------------- + +Persistent identifiers can be resolved using the Software Heritage Web +application (see :py:mod:`swh.web`). In particular, the **root endpoint** +``/`` can be given a persistent identifier and will lead to the browsing page +of the corresponding object, like this: +``https://archive.softwareheritage.org/``. + +A **dedicated** ``/resolve`` **endpoint** of the HTTP API is also available to +explicitly request persistent identifier resolution; see: +:http:get:`/api/1/resolve/(swh_id)/`. + +Examples: + +* ``_ +* ``_ +* ``_ +* ``_ +* ``_ + + +External resolvers +------------------ + +The following **independent resolvers** support resolution of Software +Heritage persistent identifiers: + +* `Identifiers.org `_; see: + ``_ (registry identifier `MIR:00000655 + `_). + +* `Name-to-Thing (N2T) `_ + +Examples: + +* ``_ +* ``_ +* ``_ +* ``_ +* ``_ + +Note that resolution via Identifiers.org does not support contextual +information, due to `syntactic incompatibilities +`_. diff --git a/pytest.ini b/pytest.ini new file mode 100644 index 0000000..e86f7f4 --- /dev/null +++ b/pytest.ini @@ -0,0 +1,3 @@ +[pytest] +addopts = --doctest-modules +norecursedirs = docs diff --git a/requirements-test.txt b/requirements-test.txt new file mode 100644 index 0000000..e079f8a --- /dev/null +++ b/requirements-test.txt @@ -0,0 +1 @@ +pytest diff --git a/setup.py b/setup.py old mode 100644 new mode 100755 index 7e4a47c..c28e4bf --- a/setup.py +++ b/setup.py @@ -1,53 +1,89 @@ -import hashlib +#!/usr/bin/env python3 +# Copyright (C) 2015-2018 The Software Heritage developers +# See the AUTHORS file at the top-level directory of this distribution +# License: GNU General Public License version 3, or any later version +# See top-level LICENSE file for more information from setuptools import setup, find_packages +import hashlib + +from os import path +from io import open + +here = path.abspath(path.dirname(__file__)) +# Get the long description from the README file +with open(path.join(here, 'README.md'), encoding='utf-8') as f: + long_description = f.read() + + +def parse_requirements(name=None): + if name: + reqf = 'requirements-%s.txt' % name + else: + reqf = 'requirements.txt' -def parse_requirements(): requirements = [] - for reqf in ('requirements.txt', 'requirements-swh.txt'): - with open(reqf) as f: - for line in f.readlines(): - line = line.strip() - if not line or line.startswith('#'): - continue - requirements.append(line) + if not path.exists(reqf): + return requirements + + with open(reqf) as f: + for line in f.readlines(): + line = line.strip() + if not line or line.startswith('#'): + continue + requirements.append(line) return requirements -extra_requirements = [] +blake2_requirements = [] pyblake2_hash_sets = [ # Built-in implementation in Python 3.6+ {'blake2s', 'blake2b'}, # Potentially shipped by OpenSSL 1.1 (e.g. Python 3.5 in Debian stretch # has these) {'blake2s256', 'blake2b512'}, ] for pyblake2_hashes in pyblake2_hash_sets: if not pyblake2_hashes - set(hashlib.algorithms_available): # The required blake2 hashes have been found break else: # None of the possible sets of blake2 hashes are available. # use pyblake2 instead - extra_requirements.append('pyblake2') + blake2_requirements.append('pyblake2') setup( name='swh.model', description='Software Heritage data model', + long_description=long_description, + long_description_content_type='text/markdown', author='Software Heritage developers', author_email='swh-devel@inria.fr', url='https://forge.softwareheritage.org/diffusion/DMOD/', - packages=find_packages(), # packages's modules - scripts=[], # scripts to package - install_requires=parse_requirements() + extra_requirements, + packages=find_packages(), + setup_requires=['vcversioner'], + install_requires=(parse_requirements() + parse_requirements('swh') + + blake2_requirements), + extras_require={'testing': parse_requirements('test')}, + vcversioner={}, + include_package_data=True, entry_points=''' [console_scripts] swh-identify=swh.model.cli:identify ''', - setup_requires=['vcversioner'], - vcversioner={}, - include_package_data=True, + classifiers=[ + "Programming Language :: Python :: 3", + "Intended Audience :: Developers", + "License :: OSI Approved :: GNU General Public License v3 (GPLv3)", + "Operating System :: OS Independent", + "Development Status :: 5 - Production/Stable", + ], + project_urls={ + 'Bug Reports': 'https://forge.softwareheritage.org/maniphest', + 'Funding': 'https://www.softwareheritage.org/donate', + 'Source': 'https://forge.softwareheritage.org/source/swh-model', + }, ) diff --git a/swh.model.egg-info/PKG-INFO b/swh.model.egg-info/PKG-INFO index 653b8da..b07e4b8 100644 --- a/swh.model.egg-info/PKG-INFO +++ b/swh.model.egg-info/PKG-INFO @@ -1,10 +1,38 @@ -Metadata-Version: 1.0 +Metadata-Version: 2.1 Name: swh.model -Version: 0.0.27 +Version: 0.0.28 Summary: Software Heritage data model Home-page: https://forge.softwareheritage.org/diffusion/DMOD/ Author: Software Heritage developers Author-email: swh-devel@inria.fr License: UNKNOWN -Description: UNKNOWN +Project-URL: Bug Reports, https://forge.softwareheritage.org/maniphest +Project-URL: Funding, https://www.softwareheritage.org/donate +Project-URL: Source, https://forge.softwareheritage.org/source/swh-model +Description: swh-model + ========= + + Implementation of the Data model of the Software Heritage project, used to + archive source code artifacts. + + This module defines the notion of Persistent Identifier (PID) and provides + tools to compute them: + + ```sh + $ swh-identify fork.c kmod.c sched/deadline.c + swh:1:cnt:2e391c754ae730bd2d8520c2ab497c403220c6e3 fork.c + swh:1:cnt:0277d1216f80ae1adeed84a686ed34c9b2931fc2 kmod.c + swh:1:cnt:57b939c81bce5d06fa587df8915f05affbe22b82 sched/deadline.c + + $ swh-identify --no-filename /usr/src/linux/kernel/ + swh:1:dir:f9f858a48d663b3809c9e2f336412717496202ab + ``` + Platform: UNKNOWN +Classifier: Programming Language :: Python :: 3 +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3) +Classifier: Operating System :: OS Independent +Classifier: Development Status :: 5 - Production/Stable +Description-Content-Type: text/markdown +Provides-Extra: testing diff --git a/swh.model.egg-info/SOURCES.txt b/swh.model.egg-info/SOURCES.txt index f6a2f94..81e3d7d 100644 --- a/swh.model.egg-info/SOURCES.txt +++ b/swh.model.egg-info/SOURCES.txt @@ -1,62 +1,69 @@ .gitignore AUTHORS LICENSE MANIFEST.in Makefile Makefile.local +README.md +pytest.ini requirements-swh.txt +requirements-test.txt requirements.txt setup.py +tox.ini version.txt bin/git-revhash +bin/swh-hashtree bin/swh-revhash debian/changelog debian/compat debian/control debian/copyright debian/rules debian/source/format docs/.gitignore docs/Makefile +docs/Makefile.local docs/conf.py docs/data-model.rst docs/index.rst docs/persistent-identifiers.rst docs/_static/.placeholder docs/_templates/.placeholder docs/images/.gitignore docs/images/Makefile docs/images/swh-merkle-dag.dia swh/__init__.py swh.model.egg-info/PKG-INFO swh.model.egg-info/SOURCES.txt swh.model.egg-info/dependency_links.txt swh.model.egg-info/entry_points.txt swh.model.egg-info/requires.txt swh.model.egg-info/top_level.txt swh/model/__init__.py swh/model/cli.py swh/model/exceptions.py swh/model/from_disk.py swh/model/hashutil.py swh/model/identifiers.py swh/model/merkle.py swh/model/toposort.py swh/model/validators.py swh/model/fields/__init__.py swh/model/fields/compound.py swh/model/fields/hashes.py swh/model/fields/simple.py swh/model/tests/__init__.py swh/model/tests/generate_testdata_from_disk.py swh/model/tests/test_cli.py swh/model/tests/test_from_disk.py swh/model/tests/test_hashutil.py swh/model/tests/test_identifiers.py swh/model/tests/test_merkle.py swh/model/tests/test_toposort.py swh/model/tests/test_validators.py +swh/model/tests/data/dir-folders/sample-folder.tgz swh/model/tests/fields/__init__.py swh/model/tests/fields/test_compound.py swh/model/tests/fields/test_hashes.py swh/model/tests/fields/test_simple.py \ No newline at end of file diff --git a/swh.model.egg-info/requires.txt b/swh.model.egg-info/requires.txt index dbcd308..091cefe 100644 --- a/swh.model.egg-info/requires.txt +++ b/swh.model.egg-info/requires.txt @@ -1,2 +1,5 @@ Click vcversioner + +[testing] +pytest diff --git a/swh/model/from_disk.py b/swh/model/from_disk.py index f9f3729..bfd7c7c 100644 --- a/swh/model/from_disk.py +++ b/swh/model/from_disk.py @@ -1,346 +1,349 @@ -# Copyright (C) 2017 The Software Heritage developers +# Copyright (C) 2017-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import enum import os import stat -from . import hashutil +from .hashutil import MultiHash, HASH_BLOCK_SIZE from .merkle import MerkleLeaf, MerkleNode from .identifiers import ( directory_identifier, identifier_to_bytes as id_to_bytes, identifier_to_str as id_to_str, ) class DentryPerms(enum.IntEnum): """Admissible permissions for directory entries.""" content = 0o100644 """Content""" executable_content = 0o100755 """Executable content (e.g. executable script)""" symlink = 0o120000 """Symbolic link""" directory = 0o040000 """Directory""" revision = 0o160000 """Revision (e.g. submodule)""" def mode_to_perms(mode): """Convert a file mode to a permission compatible with Software Heritage directory entries Args: mode (int): a file mode as returned by :func:`os.stat` in :attr:`os.stat_result.st_mode` Returns: DentryPerms: one of the following values: :const:`DentryPerms.content`: plain file :const:`DentryPerms.executable_content`: executable file :const:`DentryPerms.symlink`: symbolic link :const:`DentryPerms.directory`: directory """ if stat.S_ISLNK(mode): return DentryPerms.symlink if stat.S_ISDIR(mode): return DentryPerms.directory else: # file is executable in any way if mode & (0o111): return DentryPerms.executable_content else: return DentryPerms.content class Content(MerkleLeaf): """Representation of a Software Heritage content as a node in a Merkle tree. The current Merkle hash for the Content nodes is the `sha1_git`, which makes it consistent with what :class:`Directory` uses for its own hash computation. """ __slots__ = [] type = 'content' @classmethod def from_bytes(cls, *, mode, data): """Convert data (raw :class:`bytes`) to a Software Heritage content entry Args: mode (int): a file mode (passed to :func:`mode_to_perms`) data (bytes): raw contents of the file """ - ret = hashutil.hash_data(data) + ret = MultiHash.from_data(data).digest() ret['length'] = len(data) ret['perms'] = mode_to_perms(mode) ret['data'] = data return cls(ret) @classmethod def from_symlink(cls, *, path, mode): """Convert a symbolic link to a Software Heritage content entry""" return cls.from_bytes(mode=mode, data=os.readlink(path)) @classmethod def from_file(cls, *, path, data=False, save_path=False): - """Compute the Software Heritage content entry corresponding to an on-disk - file. + """Compute the Software Heritage content entry corresponding to an + on-disk file. The returned dictionary contains keys useful for both: - loading the content in the archive (hashes, `length`) - using the content as a directory entry in a directory Args: path (bytes): path to the file for which we're computing the content entry data (bool): add the file data to the entry save_path (bool): add the file path to the entry + """ file_stat = os.lstat(path) mode = file_stat.st_mode if stat.S_ISLNK(mode): # Symbolic link: return a file whose contents are the link target return cls.from_symlink(path=path, mode=mode) elif not stat.S_ISREG(mode): # not a regular file: return the empty file instead return cls.from_bytes(mode=mode, data=b'') length = file_stat.st_size if not data: - ret = hashutil.hash_path(path) + ret = MultiHash.from_path(path).digest() else: + h = MultiHash(length=length) chunks = [] - - def append_chunk(x, chunks=chunks): - chunks.append(x) - with open(path, 'rb') as fobj: - ret = hashutil.hash_file(fobj, length=length, - chunk_cb=append_chunk) - + while True: + chunk = fobj.read(HASH_BLOCK_SIZE) + if not chunk: + break + h.update(chunk) + chunks.append(chunk) + + ret = h.digest() ret['data'] = b''.join(chunks) if save_path: ret['path'] = path ret['perms'] = mode_to_perms(mode) ret['length'] = length obj = cls(ret) return obj def __repr__(self): return 'Content(id=%s)' % id_to_str(self.hash) def compute_hash(self): return self.data['sha1_git'] def accept_all_directories(dirname, entries): """Default filter for :func:`Directory.from_disk` accepting all directories Args: dirname (bytes): directory name entries (list): directory entries """ return True def ignore_empty_directories(dirname, entries): """Filter for :func:`directory_to_objects` ignoring empty directories Args: dirname (bytes): directory name entries (list): directory entries Returns: True if the directory is not empty, false if the directory is empty """ return bool(entries) def ignore_named_directories(names, *, case_sensitive=True): """Filter for :func:`directory_to_objects` to ignore directories named one of names. Args: names (list of bytes): names to ignore case_sensitive (bool): whether to do the filtering in a case sensitive way Returns: a directory filter for :func:`directory_to_objects` """ if not case_sensitive: names = [name.lower() for name in names] def named_filter(dirname, entries, names=names, case_sensitive=case_sensitive): if case_sensitive: return dirname not in names else: return dirname.lower() not in names return named_filter class Directory(MerkleNode): """Representation of a Software Heritage directory as a node in a Merkle Tree. This class can be used to generate, from an on-disk directory, all the objects that need to be sent to the Software Heritage archive. The :func:`from_disk` constructor allows you to generate the data structure from a directory on disk. The resulting :class:`Directory` can then be manipulated as a dictionary, using the path as key. The :func:`collect` method is used to retrieve all the objects that need to be added to the Software Heritage archive since the last collection, by class (contents and directories). When using the dict-like methods to update the contents of the directory, the affected levels of hierarchy are reset and can be collected again using the same method. This enables the efficient collection of updated nodes, for instance when the client is applying diffs. """ __slots__ = ['__entries'] type = 'directory' @classmethod def from_disk(cls, *, path, data=False, save_path=False, dir_filter=accept_all_directories): """Compute the Software Heritage objects for a given directory tree Args: path (bytes): the directory to traverse data (bool): whether to add the data to the content objects save_path (bool): whether to add the path to the content objects dir_filter (function): a filter to ignore some directories by name or contents. Takes two arguments: dirname and entries, and returns True if the directory should be added, False if the directory should be ignored. """ top_path = path dirs = {} for root, dentries, fentries in os.walk(top_path, topdown=False): entries = {} # Join fentries and dentries in the same processing, as symbolic # links to directories appear in dentries... for name in fentries + dentries: path = os.path.join(root, name) if not os.path.isdir(path) or os.path.islink(path): content = Content.from_file(path=path, data=data, save_path=save_path) entries[name] = content else: if dir_filter(name, dirs[path].entries): entries[name] = dirs[path] dirs[root] = cls({'name': os.path.basename(root)}) dirs[root].update(entries) return dirs[top_path] def __init__(self, data=None): super().__init__(data=data) self.__entries = None def invalidate_hash(self): self.__entries = None super().invalidate_hash() @staticmethod def child_to_directory_entry(name, child): if isinstance(child, Directory): return { 'type': 'dir', 'perms': DentryPerms.directory, 'target': child.hash, 'name': name, } elif isinstance(child, Content): return { 'type': 'file', 'perms': child.data['perms'], 'target': child.hash, 'name': name, } else: raise ValueError('unknown child') def get_data(self, **kwargs): return { 'id': self.hash, 'entries': self.entries, } @property def entries(self): if self.__entries is None: self.__entries = [ self.child_to_directory_entry(name, child) for name, child in self.items() ] return self.__entries def compute_hash(self): return id_to_bytes(directory_identifier({'entries': self.entries})) def __getitem__(self, key): if not isinstance(key, bytes): raise ValueError('Can only get a bytes from Directory') # Convenience shortcut if key == b'': return self if b'/' not in key: return super().__getitem__(key) else: key1, key2 = key.split(b'/', 1) return self.__getitem__(key1)[key2] def __setitem__(self, key, value): if not isinstance(key, bytes): raise ValueError('Can only set a bytes Directory entry') if not isinstance(value, (Content, Directory)): raise ValueError('Can only set a Directory entry to a Content or ' 'Directory') if key == b'': raise ValueError('Directory entry must have a name') if b'\x00' in key: raise ValueError('Directory entry name must not contain nul bytes') if b'/' not in key: return super().__setitem__(key, value) else: key1, key2 = key.rsplit(b'/', 1) self[key1].__setitem__(key2, value) def __delitem__(self, key): if not isinstance(key, bytes): raise ValueError('Can only delete a bytes Directory entry') if b'/' not in key: super().__delitem__(key) else: key1, key2 = key.rsplit(b'/', 1) del self[key1][key2] def __repr__(self): return 'Directory(id=%s, entries=[%s])' % ( id_to_str(self.hash), ', '.join(str(entry) for entry in self), ) diff --git a/swh/model/hashutil.py b/swh/model/hashutil.py index d8249bc..e58f687 100644 --- a/swh/model/hashutil.py +++ b/swh/model/hashutil.py @@ -1,455 +1,453 @@ # Copyright (C) 2015-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information """Module in charge of hashing function definitions. This is the base module use to compute swh's hashes. Only a subset of hashing algorithms is supported as defined in the ALGORITHMS set. Any provided algorithms not in that list will result in a ValueError explaining the error. This module defines a MultiHash class to ease the softwareheritage hashing algorithms computation. This allows to compute hashes from file object, path, data using a similar interface as what the standard hashlib module provides. Basic usage examples: - file object: MultiHash.from_file( file_object, hash_names=DEFAULT_ALGORITHMS).digest() - path (filepath): MultiHash.from_path(b'foo').hexdigest() - data (bytes): MultiHash.from_data(b'foo').bytehexdigest() "Complex" usage, defining a swh hashlib instance first: - To compute length, integrate the length to the set of algorithms to compute, for example: - h = MultiHash(hash_names=set({'length'}).union(DEFAULT_ALGORITHMS)) - with open(filepath, 'rb') as f: - h.update(f.read(HASH_BLOCK_SIZE)) - hashes = h.digest() # returns a dict of {hash_algo_name: hash_in_bytes} + .. code-block:: python - for chunk in - # then use h as you would + h = MultiHash(hash_names=set({'length'}).union(DEFAULT_ALGORITHMS)) + with open(filepath, 'rb') as f: + h.update(f.read(HASH_BLOCK_SIZE)) + hashes = h.digest() # returns a dict of {hash_algo_name: hash_in_bytes} - Write alongside computing hashing algorithms (from a stream), example: - h = MultiHash(length=length) - with open(filepath, 'wb') as f: - for chunk in r.iter_content(): # r a stream of sort - h.update(chunk) - f.write(chunk) - hashes = h.hexdigest() # returns a dict of {hash_algo_name: hash_in_hex} + .. code-block:: python + + h = MultiHash(length=length) + with open(filepath, 'wb') as f: + for chunk in r.iter_content(): # r a stream of sort + h.update(chunk) + f.write(chunk) + hashes = h.hexdigest() # returns a dict of {hash_algo_name: hash_in_hex} - Note: Prior to this, we would have to use chunk_cb (cf. hash_file, - hash_path) + Note: Prior to this, we would have to use chunk_cb (cf. hash_file, + hash_path) This module also defines the following (deprecated) hashing functions: - hash_file: Hash the contents of the given file object with the given algorithms (defaulting to DEFAULT_ALGORITHMS if none provided). - hash_data: Hash the given binary blob with the given algorithms (defaulting to DEFAULT_ALGORITHMS if none provided). - hash_path: Hash the contents of the file at the given path with the given algorithms (defaulting to DEFAULT_ALGORITHMS if none provided). """ import binascii import functools import hashlib import os from io import BytesIO ALGORITHMS = set(['sha1', 'sha256', 'sha1_git', 'blake2s256', 'blake2b512']) """Hashing algorithms supported by this module""" DEFAULT_ALGORITHMS = set(['sha1', 'sha256', 'sha1_git', 'blake2s256']) """Algorithms computed by default when calling the functions from this module. Subset of :const:`ALGORITHMS`. """ HASH_BLOCK_SIZE = 32768 """Block size for streaming hash computations made in this module""" _blake2_hash_cache = {} class MultiHash: """Hashutil class to support multiple hashes computation. Args: hash_names (set): Set of hash algorithms (+ optionally length) to compute hashes (cf. DEFAULT_ALGORITHMS) length (int): Length of the total sum of chunks to read If the length is provided as algorithm, the length is also computed and returned. """ def __init__(self, hash_names=DEFAULT_ALGORITHMS, length=None): self.state = {} self.track_length = False for name in hash_names: if name == 'length': self.state['length'] = 0 self.track_length = True else: self.state[name] = _new_hash(name, length) @classmethod def from_state(cls, state, track_length): ret = cls([]) ret.state = state ret.track_length = track_length @classmethod - def from_file(cls, file, hash_names=DEFAULT_ALGORITHMS, length=None): + def from_file(cls, fobj, hash_names=DEFAULT_ALGORITHMS, length=None): ret = cls(length=length, hash_names=hash_names) - for chunk in file: + while True: + chunk = fobj.read(HASH_BLOCK_SIZE) + if not chunk: + break ret.update(chunk) return ret @classmethod - def from_path(cls, path, hash_names=DEFAULT_ALGORITHMS, length=None, - track_length=True): - if not length: - length = os.path.getsize(path) + def from_path(cls, path, hash_names=DEFAULT_ALGORITHMS): + length = os.path.getsize(path) with open(path, 'rb') as f: ret = cls.from_file(f, hash_names=hash_names, length=length) - # For compatibility reason with `hash_path` - if track_length: - ret.state['length'] = length return ret @classmethod - def from_data(cls, data, hash_names=DEFAULT_ALGORITHMS, length=None): - if not length: - length = len(data) + def from_data(cls, data, hash_names=DEFAULT_ALGORITHMS): + length = len(data) fobj = BytesIO(data) return cls.from_file(fobj, hash_names=hash_names, length=length) def update(self, chunk): for name, h in self.state.items(): if name == 'length': continue h.update(chunk) if self.track_length: self.state['length'] += len(chunk) def digest(self): return { name: h.digest() if name != 'length' else h for name, h in self.state.items() } def hexdigest(self): return { name: h.hexdigest() if name != 'length' else h for name, h in self.state.items() } def bytehexdigest(self): return { name: hash_to_bytehex(h.digest()) if name != 'length' else h for name, h in self.state.items() } def copy(self): copied_state = { name: h.copy() if name != 'length' else h for name, h in self.state.items() } return self.from_state(copied_state, self.track_length) def _new_blake2_hash(algo): """Return a function that initializes a blake2 hash. """ if algo in _blake2_hash_cache: return _blake2_hash_cache[algo]() lalgo = algo.lower() if not lalgo.startswith('blake2'): raise ValueError('Algorithm %s is not a blake2 hash' % algo) blake_family = lalgo[:7] digest_size = None if lalgo[7:]: try: digest_size, remainder = divmod(int(lalgo[7:]), 8) except ValueError: raise ValueError( 'Unknown digest size for algo %s' % algo ) from None if remainder: raise ValueError( 'Digest size for algorithm %s must be a multiple of 8' % algo ) if lalgo in hashlib.algorithms_available: # Handle the case where OpenSSL ships the given algorithm # (e.g. Python 3.5 on Debian 9 stretch) _blake2_hash_cache[algo] = lambda: hashlib.new(lalgo) else: # Try using the built-in implementation for Python 3.6+ if blake_family in hashlib.algorithms_available: blake2 = getattr(hashlib, blake_family) else: import pyblake2 blake2 = getattr(pyblake2, blake_family) _blake2_hash_cache[algo] = lambda: blake2(digest_size=digest_size) return _blake2_hash_cache[algo]() def _new_hashlib_hash(algo): """Initialize a digest object from hashlib. Handle the swh-specific names for the blake2-related algorithms """ if algo.startswith('blake2'): return _new_blake2_hash(algo) else: return hashlib.new(algo) def _new_git_hash(base_algo, git_type, length): """Initialize a digest object (as returned by python's hashlib) for the requested algorithm, and feed it with the header for a git object of the given type and length. The header for hashing a git object consists of: - The type of the object (encoded in ASCII) - One ASCII space (\x20) - The length of the object (decimal encoded in ASCII) - One NUL byte Args: base_algo (str from :const:`ALGORITHMS`): a hashlib-supported algorithm git_type: the type of the git object (supposedly one of 'blob', 'commit', 'tag', 'tree') length: the length of the git object you're encoding Returns: a hashutil.hash object """ h = _new_hashlib_hash(base_algo) git_header = '%s %d\0' % (git_type, length) h.update(git_header.encode('ascii')) return h def _new_hash(algo, length=None): """Initialize a digest object (as returned by python's hashlib) for the requested algorithm. See the constant ALGORITHMS for the list of supported algorithms. If a git-specific hashing algorithm is requested (e.g., "sha1_git"), the hashing object will be pre-fed with the needed header; for this to work, length must be given. Args: algo (str): a hashing algorithm (one of ALGORITHMS) length (int): the length of the hashed payload (needed for git-specific algorithms) Returns: a hashutil.hash object Raises: ValueError if algo is unknown, or length is missing for a git-specific hash. """ if algo not in ALGORITHMS: raise ValueError( 'Unexpected hashing algorithm %s, expected one of %s' % (algo, ', '.join(sorted(ALGORITHMS)))) if algo.endswith('_git'): if length is None: raise ValueError('Missing length for git hashing algorithm') base_algo = algo[:-4] return _new_git_hash(base_algo, 'blob', length) return _new_hashlib_hash(algo) def hash_file(fobj, length=None, algorithms=DEFAULT_ALGORITHMS, chunk_cb=None): """(Deprecated) cf. MultiHash.from_file Hash the contents of the given file object with the given algorithms. Args: fobj: a file-like object length (int): the length of the contents of the file-like object (for the git-specific algorithms) algorithms (set): the hashing algorithms to be used, as an iterable over strings chunk_cb (fun): a callback function taking a chunk of data as parameter Returns: a dict mapping each algorithm to a digest (bytes by default). Raises: ValueError if algorithms contains an unknown hash algorithm. """ h = MultiHash(algorithms, length) while True: chunk = fobj.read(HASH_BLOCK_SIZE) if not chunk: break h.update(chunk) if chunk_cb: chunk_cb(chunk) return h.digest() def hash_path(path, algorithms=DEFAULT_ALGORITHMS, chunk_cb=None): """(deprecated) cf. MultiHash.from_path Hash the contents of the file at the given path with the given algorithms. Args: path (str): the path of the file to hash algorithms (set): the hashing algorithms used chunk_cb (fun): a callback function taking a chunk of data as parameter Returns: a dict mapping each algorithm to a bytes digest. Raises: ValueError if algorithms contains an unknown hash algorithm. OSError on file access error """ length = os.path.getsize(path) with open(path, 'rb') as fobj: hashes = hash_file(fobj, length, algorithms, chunk_cb=chunk_cb) hashes['length'] = length return hashes def hash_data(data, algorithms=DEFAULT_ALGORITHMS): """(deprecated) cf. MultiHash.from_data Hash the given binary blob with the given algorithms. Args: data (bytes): raw content to hash algorithms (set): the hashing algorithms used Returns: a dict mapping each algorithm to a bytes digest Raises: TypeError if data does not support the buffer interface. ValueError if algorithms contains an unknown hash algorithm. """ return MultiHash.from_data(data, hash_names=algorithms).digest() def hash_git_data(data, git_type, base_algo='sha1'): """Hash the given data as a git object of type git_type. Args: data: a bytes object git_type: the git object type base_algo: the base hashing algorithm used (default: sha1) Returns: a dict mapping each algorithm to a bytes digest Raises: ValueError if the git_type is unexpected. """ git_object_types = {'blob', 'tree', 'commit', 'tag', 'snapshot'} if git_type not in git_object_types: raise ValueError('Unexpected git object type %s, expected one of %s' % (git_type, ', '.join(sorted(git_object_types)))) h = _new_git_hash(base_algo, git_type, len(data)) h.update(data) return h.digest() @functools.lru_cache() def hash_to_hex(hash): """Converts a hash (in hex or bytes form) to its hexadecimal ascii form Args: hash (str or bytes): a :class:`bytes` hash or a :class:`str` containing the hexadecimal form of the hash Returns: str: the hexadecimal form of the hash """ if isinstance(hash, str): return hash return binascii.hexlify(hash).decode('ascii') @functools.lru_cache() def hash_to_bytehex(hash): """Converts a hash to its hexadecimal bytes representation Args: hash (bytes): a :class:`bytes` hash Returns: bytes: the hexadecimal form of the hash, as :class:`bytes` """ return binascii.hexlify(hash) @functools.lru_cache() def hash_to_bytes(hash): """Converts a hash (in hex or bytes form) to its raw bytes form Args: hash (str or bytes): a :class:`bytes` hash or a :class:`str` containing the hexadecimal form of the hash Returns: bytes: the :class:`bytes` form of the hash """ if isinstance(hash, bytes): return hash return bytes.fromhex(hash) @functools.lru_cache() def bytehex_to_hash(hex): """Converts a hexadecimal bytes representation of a hash to that hash Args: hash (bytes): a :class:`bytes` containing the hexadecimal form of the hash encoded in ascii Returns: bytes: the :class:`bytes` form of the hash """ return hash_to_bytes(hex.decode()) diff --git a/swh/model/identifiers.py b/swh/model/identifiers.py index e7608e9..083efd4 100644 --- a/swh/model/identifiers.py +++ b/swh/model/identifiers.py @@ -1,792 +1,791 @@ # Copyright (C) 2015-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import binascii import datetime from collections import namedtuple from functools import lru_cache from .exceptions import ValidationError from .fields.hashes import validate_sha1 -from .hashutil import hash_data, hash_git_data, DEFAULT_ALGORITHMS -from .hashutil import hash_to_hex +from .hashutil import hash_git_data, hash_to_hex, MultiHash SNAPSHOT = 'snapshot' REVISION = 'revision' RELEASE = 'release' DIRECTORY = 'directory' CONTENT = 'content' @lru_cache() def identifier_to_bytes(identifier): """Convert a text identifier to bytes. Args: identifier: an identifier, either a 40-char hexadecimal string or a bytes object of length 20 Returns: The length 20 bytestring corresponding to the given identifier Raises: ValueError: if the identifier is of an unexpected type or length. """ if isinstance(identifier, bytes): if len(identifier) != 20: raise ValueError( 'Wrong length for bytes identifier %s, expected 20' % len(identifier)) return identifier if isinstance(identifier, str): if len(identifier) != 40: raise ValueError( 'Wrong length for str identifier %s, expected 40' % len(identifier)) return bytes.fromhex(identifier) raise ValueError('Wrong type for identifier %s, expected bytes or str' % identifier.__class__.__name__) @lru_cache() def identifier_to_str(identifier): """Convert an identifier to an hexadecimal string. Args: identifier: an identifier, either a 40-char hexadecimal string or a bytes object of length 20 Returns: The length 40 string corresponding to the given identifier, hex encoded Raises: ValueError: if the identifier is of an unexpected type or length. """ if isinstance(identifier, str): if len(identifier) != 40: raise ValueError( 'Wrong length for str identifier %s, expected 40' % len(identifier)) return identifier if isinstance(identifier, bytes): if len(identifier) != 20: raise ValueError( 'Wrong length for bytes identifier %s, expected 20' % len(identifier)) return binascii.hexlify(identifier).decode() raise ValueError('Wrong type for identifier %s, expected bytes or str' % identifier.__class__.__name__) def content_identifier(content): """Return the intrinsic identifier for a content. A content's identifier is the sha1, sha1_git and sha256 checksums of its data. Args: content: a content conforming to the Software Heritage schema Returns: A dictionary with all the hashes for the data Raises: KeyError: if the content doesn't have a data member. """ - return hash_data(content['data'], DEFAULT_ALGORITHMS) + return MultiHash.from_data(content['data']).digest() def _sort_key(entry): """The sorting key for tree entries""" if entry['type'] == 'dir': return entry['name'] + b'/' else: return entry['name'] @lru_cache() def _perms_to_bytes(perms): """Convert the perms value to its bytes representation""" oc = oct(perms)[2:] return oc.encode('ascii') def escape_newlines(snippet): """Escape the newlines present in snippet according to git rules. New lines in git manifests are escaped by indenting the next line by one space. """ if b'\n' in snippet: return b'\n '.join(snippet.split(b'\n')) else: return snippet def directory_identifier(directory): """Return the intrinsic identifier for a directory. A directory's identifier is the tree sha1 à la git of a directory listing, using the following algorithm, which is equivalent to the git algorithm for trees: 1. Entries of the directory are sorted using the name (or the name with '/' appended for directory entries) as key, in bytes order. 2. For each entry of the directory, the following bytes are output: - the octal representation of the permissions for the entry (stored in the 'perms' member), which is a representation of the entry type: - b'100644' (int 33188) for files - b'100755' (int 33261) for executable files - b'120000' (int 40960) for symbolic links - b'40000' (int 16384) for directories - b'160000' (int 57344) for references to revisions - an ascii space (b'\x20') - the entry's name (as raw bytes), stored in the 'name' member - a null byte (b'\x00') - the 20 byte long identifier of the object pointed at by the entry, stored in the 'target' member: - for files or executable files: their blob sha1_git - for symbolic links: the blob sha1_git of a file containing the link destination - for directories: their intrinsic identifier - for revisions: their intrinsic identifier (Note that there is no separator between entries) """ components = [] for entry in sorted(directory['entries'], key=_sort_key): components.extend([ _perms_to_bytes(entry['perms']), b'\x20', entry['name'], b'\x00', identifier_to_bytes(entry['target']), ]) return identifier_to_str(hash_git_data(b''.join(components), 'tree')) def format_date(date): """Convert a date object into an UTC timestamp encoded as ascii bytes. Git stores timestamps as an integer number of seconds since the UNIX epoch. However, Software Heritage stores timestamps as an integer number of microseconds (postgres type "datetime with timezone"). Therefore, we print timestamps with no microseconds as integers, and timestamps with microseconds as floating point values. We elide the trailing zeroes from microsecond values, to "future-proof" our representation if we ever need more precision in timestamps. """ if not isinstance(date, dict): raise ValueError('format_date only supports dicts, %r received' % date) seconds = date.get('seconds', 0) microseconds = date.get('microseconds', 0) if not microseconds: return str(seconds).encode() else: float_value = ('%d.%06d' % (seconds, microseconds)) return float_value.rstrip('0').encode() @lru_cache() def format_offset(offset, negative_utc=None): """Convert an integer number of minutes into an offset representation. The offset representation is [+-]hhmm where: - hh is the number of hours; - mm is the number of minutes. A null offset is represented as +0000. """ if offset < 0 or offset == 0 and negative_utc: sign = '-' else: sign = '+' hours = abs(offset) // 60 minutes = abs(offset) % 60 t = '%s%02d%02d' % (sign, hours, minutes) return t.encode() def normalize_timestamp(time_representation): """Normalize a time representation for processing by Software Heritage This function supports a numeric timestamp (representing a number of seconds since the UNIX epoch, 1970-01-01 at 00:00 UTC), a :obj:`datetime.datetime` object (with timezone information), or a normalized Software Heritage time representation (idempotency). Args: time_representation: the representation of a timestamp Returns: dict: a normalized dictionary with three keys: - timestamp: a dict with two optional keys: - seconds: the integral number of seconds since the UNIX epoch - microseconds: the integral number of microseconds - offset: the timezone offset as a number of minutes relative to UTC - negative_utc: a boolean representing whether the offset is -0000 when offset = 0. """ if time_representation is None: return None negative_utc = False if isinstance(time_representation, dict): ts = time_representation['timestamp'] if isinstance(ts, dict): seconds = ts.get('seconds', 0) microseconds = ts.get('microseconds', 0) elif isinstance(ts, int): seconds = ts microseconds = 0 else: raise ValueError( 'normalize_timestamp received non-integer timestamp member:' ' %r' % ts) offset = time_representation['offset'] if 'negative_utc' in time_representation: negative_utc = time_representation['negative_utc'] elif isinstance(time_representation, datetime.datetime): seconds = int(time_representation.timestamp()) microseconds = time_representation.microsecond utcoffset = time_representation.utcoffset() if utcoffset is None: raise ValueError( 'normalize_timestamp received datetime without timezone: %s' % time_representation) # utcoffset is an integer number of minutes seconds_offset = utcoffset.total_seconds() offset = int(seconds_offset) // 60 elif isinstance(time_representation, int): seconds = time_representation microseconds = 0 offset = 0 else: raise ValueError( 'normalize_timestamp received non-integer timestamp:' ' %r' % time_representation) return { 'timestamp': { 'seconds': seconds, 'microseconds': microseconds, }, 'offset': offset, 'negative_utc': negative_utc, } def format_author(author): """Format the specification of an author. An author is either a byte string (passed unchanged), or a dict with three keys, fullname, name and email. If the fullname exists, return it; if it doesn't, we construct a fullname using the following heuristics: if the name value is None, we return the email in angle brackets, else, we return the name, a space, and the email in angle brackets. """ if isinstance(author, bytes) or author is None: return author if 'fullname' in author: return author['fullname'] ret = [] if author['name'] is not None: ret.append(author['name']) if author['email'] is not None: ret.append(b''.join([b'<', author['email'], b'>'])) return b' '.join(ret) def format_author_line(header, author, date_offset): """Format a an author line according to git standards. An author line has three components: - a header, describing the type of author (author, committer, tagger) - a name and email, which is an arbitrary bytestring - optionally, a timestamp with UTC offset specification The author line is formatted thus:: `header` `name and email`[ `timestamp` `utc_offset`] The timestamp is encoded as a (decimal) number of seconds since the UNIX epoch (1970-01-01 at 00:00 UTC). As an extension to the git format, we support fractional timestamps, using a dot as the separator for the decimal part. The utc offset is a number of minutes encoded as '[+-]HHMM'. Note some tools can pass a negative offset corresponding to the UTC timezone ('-0000'), which is valid and is encoded as such. For convenience, this function returns the whole line with its trailing newline. Args: header: the header of the author line (one of 'author', 'committer', 'tagger') author: an author specification (dict with two bytes values: name and email, or byte value) date_offset: a normalized date/time representation as returned by :func:`normalize_timestamp`. Returns: the newline-terminated byte string containing the author line """ ret = [header.encode(), b' ', escape_newlines(format_author(author))] date_offset = normalize_timestamp(date_offset) if date_offset is not None: date_f = format_date(date_offset['timestamp']) offset_f = format_offset(date_offset['offset'], date_offset['negative_utc']) ret.extend([b' ', date_f, b' ', offset_f]) ret.append(b'\n') return b''.join(ret) def revision_identifier(revision): """Return the intrinsic identifier for a revision. The fields used for the revision identifier computation are: - directory - parents - author - author_date - committer - committer_date - metadata -> extra_headers - message A revision's identifier is the 'git'-checksum of a commit manifest constructed as follows (newlines are a single ASCII newline character):: tree [for each parent in parents] parent [end for each parents] author committer [for each key, value in extra_headers] [end for each extra_headers] The directory identifier is the ascii representation of its hexadecimal encoding. Author and committer are formatted with the :func:`format_author` function. Dates are formatted with the :func:`format_offset` function. Extra headers are an ordered list of [key, value] pairs. Keys are strings and get encoded to utf-8 for identifier computation. Values are either byte strings, unicode strings (that get encoded to utf-8), or integers (that get encoded to their utf-8 decimal representation). Multiline extra header values are escaped by indenting the continuation lines with one ascii space. If the message is None, the manifest ends with the last header. Else, the message is appended to the headers after an empty line. The checksum of the full manifest is computed using the 'commit' git object type. """ components = [ b'tree ', identifier_to_str(revision['directory']).encode(), b'\n', ] for parent in revision['parents']: if parent: components.extend([ b'parent ', identifier_to_str(parent).encode(), b'\n', ]) components.extend([ format_author_line('author', revision['author'], revision['date']), format_author_line('committer', revision['committer'], revision['committer_date']), ]) # Handle extra headers metadata = revision.get('metadata') if not metadata: metadata = {} for key, value in metadata.get('extra_headers', []): # Integer values: decimal representation if isinstance(value, int): value = str(value).encode('utf-8') # Unicode string values: utf-8 encoding if isinstance(value, str): value = value.encode('utf-8') # encode the key to utf-8 components.extend([key.encode('utf-8'), b' ', escape_newlines(value), b'\n']) if revision['message'] is not None: components.extend([b'\n', revision['message']]) commit_raw = b''.join(components) return identifier_to_str(hash_git_data(commit_raw, 'commit')) def target_type_to_git(target_type): """Convert a software heritage target type to a git object type""" return { 'content': b'blob', 'directory': b'tree', 'revision': b'commit', 'release': b'tag', }[target_type] def release_identifier(release): """Return the intrinsic identifier for a release.""" components = [ b'object ', identifier_to_str(release['target']).encode(), b'\n', b'type ', target_type_to_git(release['target_type']), b'\n', b'tag ', release['name'], b'\n', ] if 'author' in release and release['author']: components.append( format_author_line('tagger', release['author'], release['date']) ) if release['message'] is not None: components.extend([b'\n', release['message']]) return identifier_to_str(hash_git_data(b''.join(components), 'tag')) def snapshot_identifier(snapshot, *, ignore_unresolved=False): """Return the intrinsic identifier for a snapshot. Snapshots are a set of named branches, which are pointers to objects at any level of the Software Heritage DAG. As well as pointing to other objects in the Software Heritage DAG, branches can also be *alias*es, in which case their target is the name of another branch in the same snapshot, or *dangling*, in which case the target is unknown (and represented by the ``None`` value). A snapshot identifier is a salted sha1 (using the git hashing algorithm with the ``snapshot`` object type) of a manifest following the algorithm: 1. Branches are sorted using the name as key, in bytes order. 2. For each branch, the following bytes are output: - the type of the branch target: - ``content``, ``directory``, ``revision``, ``release`` or ``snapshot`` for the corresponding entries in the DAG; - ``alias`` for branches referencing another branch; - ``dangling`` for dangling branches - an ascii space (``\\x20``) - the branch name (as raw bytes) - a null byte (``\\x00``) - the length of the target identifier, as an ascii-encoded decimal number (``20`` for current intrinsic identifiers, ``0`` for dangling branches, the length of the target branch name for branch aliases) - a colon (``:``) - the identifier of the target object pointed at by the branch, stored in the 'target' member: - for contents: their *sha1_git* - for directories, revisions, releases or snapshots: their intrinsic identifier - for branch aliases, the name of the target branch (as raw bytes) - for dangling branches, the empty string Note that, akin to directory manifests, there is no separator between entries. Because of symbolic branches, identifiers are of arbitrary length but are length-encoded to avoid ambiguity. Args: snapshot (dict): the snapshot of which to compute the identifier. A single entry is needed, ``'branches'``, which is itself a :class:`dict` mapping each branch to its target ignore_unresolved (bool): if `True`, ignore unresolved branch aliases. Returns: str: the intrinsic identifier for `snapshot` """ unresolved = [] lines = [] for name, target in sorted(snapshot['branches'].items()): if not target: target_type = b'dangling' target_id = b'' elif target['target_type'] == 'alias': target_type = b'alias' target_id = target['target'] if target_id not in snapshot['branches'] or target_id == name: unresolved.append((name, target_id)) else: target_type = target['target_type'].encode() target_id = identifier_to_bytes(target['target']) lines.extend([ target_type, b'\x20', name, b'\x00', ('%d:' % len(target_id)).encode(), target_id, ]) if unresolved and not ignore_unresolved: raise ValueError('Branch aliases unresolved: %s' % ', '.join('%s -> %s' % (name, target) for name, target in unresolved)) return identifier_to_str(hash_git_data(b''.join(lines), 'snapshot')) _object_type_map = { SNAPSHOT: { 'short_name': 'snp', 'key_id': 'id' }, RELEASE: { 'short_name': 'rel', 'key_id': 'id' }, REVISION: { 'short_name': 'rev', 'key_id': 'id' }, DIRECTORY: { 'short_name': 'dir', 'key_id': 'id' }, CONTENT: { 'short_name': 'cnt', 'key_id': 'sha1_git' } } PERSISTENT_IDENTIFIER_TYPES = ['snp', 'rel', 'rev', 'dir', 'cnt'] PERSISTENT_IDENTIFIER_KEYS = [ 'namespace', 'scheme_version', 'object_type', 'object_id', 'metadata'] PERSISTENT_IDENTIFIER_PARTS_SEP = ';' class PersistentId(namedtuple('PersistentId', PERSISTENT_IDENTIFIER_KEYS)): """ Named tuple holding the relevant info associated to a Software Heritage persistent identifier. Args: namespace (str): the namespace of the identifier, defaults to 'swh' scheme_version (int): the scheme version of the identifier, defaults to 1 object_type (str): the type of object the identifier points to, either 'content', 'directory', 'release', 'revision' or 'snapshot' object_id (dict/bytes/str): object's dict representation or object identifier metadata (dict): optional dict filled with metadata related to pointed object Raises: swh.model.exceptions.ValidationError: In case of invalid object type or id Once created, it contains the following attributes: Attributes: namespace (str): the namespace of the identifier scheme_version (int): the scheme version of the identifier object_type (str): the type of object the identifier points to object_id (str): hexadecimal representation of the object hash metadata (dict): metadata related to the pointed object To get the raw persistent identifier string from an instance of this named tuple, use the :func:`str` function:: pid = PersistentId(object_type='content', object_id='8ff44f081d43176474b267de5451f2c2e88089d0') pid_str = str(pid) # 'swh:1:cnt:8ff44f081d43176474b267de5451f2c2e88089d0' """ # noqa __slots__ = () def __new__(cls, namespace='swh', scheme_version=1, object_type='', object_id='', metadata={}): o = _object_type_map.get(object_type) if not o: raise ValidationError('Wrong input: Supported types are %s' % ( list(_object_type_map.keys()))) # internal swh representation resolution if isinstance(object_id, dict): object_id = object_id[o['key_id']] validate_sha1(object_id) # can raise if invalid hash object_id = hash_to_hex(object_id) return super(cls, PersistentId).__new__( cls, namespace, scheme_version, object_type, object_id, metadata) def __str__(self): o = _object_type_map.get(self.object_type) pid = '%s:%s:%s:%s' % (self.namespace, self.scheme_version, o['short_name'], self.object_id) if self.metadata: for k, v in self.metadata.items(): pid += '%s%s=%s' % (PERSISTENT_IDENTIFIER_PARTS_SEP, k, v) return pid def persistent_identifier(object_type, object_id, scheme_version=1, metadata={}): """Compute persistent identifier (stable over time) as per documentation. Documentation: https://docs.softwareheritage.org/devel/swh-model/persistent-identifiers.html # noqa Args: object_type (str): object's type, either 'content', 'directory', 'release', 'revision' or 'snapshot' object_id (dict/bytes/str): object's dict representation or object identifier scheme_version (int): persistent identifier scheme version, defaults to 1 metadata (dict): metadata related to the pointed object Raises: swh.model.exceptions.ValidationError: In case of invalid object type or id Returns: str: the persistent identifier """ pid = PersistentId(scheme_version=scheme_version, object_type=object_type, object_id=object_id, metadata=metadata) return str(pid) def parse_persistent_identifier(persistent_id): """Parse swh's :ref:`persistent-identifiers` scheme. Args: persistent_id (str): A persistent identifier Raises: swh.model.exceptions.ValidationError: in case of: * missing mandatory values (4) * invalid namespace supplied * invalid version supplied * invalid type supplied * missing hash * invalid hash identifier supplied Returns: PersistentId: a named tuple holding the parsing result """ # ; persistent_id_parts = persistent_id.split(PERSISTENT_IDENTIFIER_PARTS_SEP) pid_data = persistent_id_parts.pop(0).split(':') if len(pid_data) != 4: raise ValidationError( 'Wrong format: There should be 4 mandatory values') # Checking for parsing errors _ns, _version, _type, _id = pid_data if _ns != 'swh': raise ValidationError( 'Wrong format: Supported namespace is \'swh\'') if _version != '1': raise ValidationError( 'Wrong format: Supported version is 1') pid_data[1] = int(pid_data[1]) expected_types = PERSISTENT_IDENTIFIER_TYPES if _type not in expected_types: raise ValidationError( 'Wrong format: Supported types are %s' % ( ', '.join(expected_types))) for otype, data in _object_type_map.items(): if _type == data['short_name']: pid_data[2] = otype break if not _id: raise ValidationError( 'Wrong format: Identifier should be present') try: validate_sha1(_id) except ValidationError: raise ValidationError( 'Wrong format: Identifier should be a valid hash') persistent_id_metadata = {} for part in persistent_id_parts: try: key, val = part.split('=') persistent_id_metadata[key] = val except Exception: msg = 'Contextual data is badly formatted, form key=val expected' raise ValidationError(msg) pid_data.append(persistent_id_metadata) return PersistentId._make(pid_data) diff --git a/swh/model/tests/data/dir-folders/sample-folder.tgz b/swh/model/tests/data/dir-folders/sample-folder.tgz new file mode 100644 index 0000000..cc84894 Binary files /dev/null and b/swh/model/tests/data/dir-folders/sample-folder.tgz differ diff --git a/swh/model/tests/fields/test_compound.py b/swh/model/tests/fields/test_compound.py index b6e13b6..dffbb04 100644 --- a/swh/model/tests/fields/test_compound.py +++ b/swh/model/tests/fields/test_compound.py @@ -1,241 +1,228 @@ # Copyright (C) 2015 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime import unittest -from nose.tools import istest - -from swh.model.exceptions import ValidationError, NON_FIELD_ERRORS +from swh.model.exceptions import NON_FIELD_ERRORS, ValidationError from swh.model.fields import compound, simple class ValidateCompound(unittest.TestCase): def setUp(self): def validate_always(model): return True def validate_never(model): return False self.test_model = 'test model' self.test_schema = { 'int': (True, simple.validate_int), 'str': (True, simple.validate_str), 'str2': (True, simple.validate_str), 'datetime': (False, simple.validate_datetime), NON_FIELD_ERRORS: validate_always, } self.test_schema_shortcut = self.test_schema.copy() self.test_schema_shortcut[NON_FIELD_ERRORS] = validate_never self.test_schema_field_failed = self.test_schema.copy() self.test_schema_field_failed['int'] = (True, [simple.validate_int, validate_never]) self.test_value = { 'str': 'value1', 'str2': 'value2', 'int': 42, 'datetime': datetime.datetime(1990, 1, 1, 12, 0, 0, tzinfo=datetime.timezone.utc), } self.test_value_missing = { 'str': 'value1', } self.test_value_str_error = { 'str': 1984, 'str2': 'value2', 'int': 42, 'datetime': datetime.datetime(1990, 1, 1, 12, 0, 0, tzinfo=datetime.timezone.utc), } self.test_value_missing_keys = {'int'} self.test_value_wrong_type = 42 self.present_keys = set(self.test_value) self.missing_keys = {'missingkey1', 'missingkey2'} - @istest - def validate_any_key(self): + def test_validate_any_key(self): self.assertTrue( compound.validate_any_key(self.test_value, self.present_keys)) self.assertTrue( compound.validate_any_key(self.test_value, self.present_keys | self.missing_keys)) - @istest - def validate_any_key_missing(self): + def test_validate_any_key_missing(self): with self.assertRaises(ValidationError) as cm: compound.validate_any_key(self.test_value, self.missing_keys) exc = cm.exception self.assertIsInstance(str(exc), str) self.assertEqual(exc.code, 'missing-alternative-field') self.assertEqual(exc.params['missing_fields'], ', '.join(sorted(self.missing_keys))) - @istest - def validate_all_keys(self): + def test_validate_all_keys(self): self.assertTrue( compound.validate_all_keys(self.test_value, self.present_keys)) - @istest - def validate_all_keys_missing(self): + def test_validate_all_keys_missing(self): with self.assertRaises(ValidationError) as cm: compound.validate_all_keys(self.test_value, self.missing_keys) exc = cm.exception self.assertIsInstance(str(exc), str) self.assertEqual(exc.code, 'missing-mandatory-field') self.assertEqual(exc.params['missing_fields'], ', '.join(sorted(self.missing_keys))) with self.assertRaises(ValidationError) as cm: compound.validate_all_keys(self.test_value, self.present_keys | self.missing_keys) exc = cm.exception self.assertIsInstance(str(exc), str) self.assertEqual(exc.code, 'missing-mandatory-field') self.assertEqual(exc.params['missing_fields'], ', '.join(sorted(self.missing_keys))) - @istest - def validate_against_schema(self): + def test_validate_against_schema(self): self.assertTrue( compound.validate_against_schema(self.test_model, self.test_schema, self.test_value)) - @istest - def validate_against_schema_wrong_type(self): + def test_validate_against_schema_wrong_type(self): with self.assertRaises(ValidationError) as cm: compound.validate_against_schema(self.test_model, self.test_schema, self.test_value_wrong_type) exc = cm.exception self.assertIsInstance(str(exc), str) self.assertEqual(exc.code, 'model-unexpected-type') self.assertEqual(exc.params['model'], self.test_model) self.assertEqual(exc.params['type'], self.test_value_wrong_type.__class__.__name__) - @istest - def validate_against_schema_mandatory_keys(self): + def test_validate_against_schema_mandatory_keys(self): with self.assertRaises(ValidationError) as cm: compound.validate_against_schema(self.test_model, self.test_schema, self.test_value_missing) # The exception should be of the form: # ValidationError({ # 'mandatory_key1': [ValidationError('model-field-mandatory')], # 'mandatory_key2': [ValidationError('model-field-mandatory')], # }) exc = cm.exception self.assertIsInstance(str(exc), str) for key in self.test_value_missing_keys: nested_key = exc.error_dict[key] self.assertIsInstance(nested_key, list) self.assertEqual(len(nested_key), 1) nested = nested_key[0] self.assertIsInstance(nested, ValidationError) self.assertEqual(nested.code, 'model-field-mandatory') self.assertEqual(nested.params['field'], key) - @istest - def validate_against_schema_whole_schema_shortcut_previous_error(self): + def test_validate_whole_schema_shortcut_previous_error(self): with self.assertRaises(ValidationError) as cm: compound.validate_against_schema( self.test_model, self.test_schema_shortcut, self.test_value_missing, ) exc = cm.exception self.assertIsInstance(str(exc), str) self.assertNotIn(NON_FIELD_ERRORS, exc.error_dict) - @istest - def validate_against_schema_whole_schema(self): + def test_validate_whole_schema(self): with self.assertRaises(ValidationError) as cm: compound.validate_against_schema( self.test_model, self.test_schema_shortcut, self.test_value, ) # The exception should be of the form: # ValidationError({ # NON_FIELD_ERRORS: [ValidationError('model-validation-failed')], # }) exc = cm.exception self.assertIsInstance(str(exc), str) - self.assertEquals(set(exc.error_dict.keys()), {NON_FIELD_ERRORS}) + self.assertEqual(set(exc.error_dict.keys()), {NON_FIELD_ERRORS}) non_field_errors = exc.error_dict[NON_FIELD_ERRORS] self.assertIsInstance(non_field_errors, list) - self.assertEquals(len(non_field_errors), 1) + self.assertEqual(len(non_field_errors), 1) nested = non_field_errors[0] self.assertIsInstance(nested, ValidationError) - self.assertEquals(nested.code, 'model-validation-failed') - self.assertEquals(nested.params['model'], self.test_model) - self.assertEquals(nested.params['validator'], 'validate_never') + self.assertEqual(nested.code, 'model-validation-failed') + self.assertEqual(nested.params['model'], self.test_model) + self.assertEqual(nested.params['validator'], 'validate_never') - @istest - def validate_against_schema_field_error(self): + def test_validate_against_schema_field_error(self): with self.assertRaises(ValidationError) as cm: compound.validate_against_schema(self.test_model, self.test_schema, self.test_value_str_error) # The exception should be of the form: # ValidationError({ # 'str': [ValidationError('unexpected-type')], # }) exc = cm.exception self.assertIsInstance(str(exc), str) - self.assertEquals(set(exc.error_dict.keys()), {'str'}) + self.assertEqual(set(exc.error_dict.keys()), {'str'}) str_errors = exc.error_dict['str'] self.assertIsInstance(str_errors, list) - self.assertEquals(len(str_errors), 1) + self.assertEqual(len(str_errors), 1) nested = str_errors[0] self.assertIsInstance(nested, ValidationError) - self.assertEquals(nested.code, 'unexpected-type') + self.assertEqual(nested.code, 'unexpected-type') - @istest - def validate_against_schema_field_failed(self): + def test_validate_against_schema_field_failed(self): with self.assertRaises(ValidationError) as cm: compound.validate_against_schema(self.test_model, self.test_schema_field_failed, self.test_value) # The exception should be of the form: # ValidationError({ # 'int': [ValidationError('field-validation-failed')], # }) exc = cm.exception self.assertIsInstance(str(exc), str) - self.assertEquals(set(exc.error_dict.keys()), {'int'}) + self.assertEqual(set(exc.error_dict.keys()), {'int'}) int_errors = exc.error_dict['int'] self.assertIsInstance(int_errors, list) - self.assertEquals(len(int_errors), 1) + self.assertEqual(len(int_errors), 1) nested = int_errors[0] self.assertIsInstance(nested, ValidationError) - self.assertEquals(nested.code, 'field-validation-failed') - self.assertEquals(nested.params['validator'], 'validate_never') - self.assertEquals(nested.params['field'], 'int') + self.assertEqual(nested.code, 'field-validation-failed') + self.assertEqual(nested.params['validator'], 'validate_never') + self.assertEqual(nested.params['field'], 'int') diff --git a/swh/model/tests/fields/test_hashes.py b/swh/model/tests/fields/test_hashes.py index 0ef303f..7ce0b78 100644 --- a/swh/model/tests/fields/test_hashes.py +++ b/swh/model/tests/fields/test_hashes.py @@ -1,162 +1,150 @@ # Copyright (C) 2015 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import unittest -from nose.tools import istest - from swh.model.exceptions import ValidationError from swh.model.fields import hashes class ValidateHashes(unittest.TestCase): def setUp(self): self.valid_byte_hashes = { 'sha1': b'\xf1\xd2\xd2\xf9\x24\xe9\x86\xac\x86\xfd\xf7\xb3\x6c\x94' b'\xbc\xdf\x32\xbe\xec\x15', 'sha1_git': b'\x25\x7c\xc5\x64\x2c\xb1\xa0\x54\xf0\x8c\xc8\x3f\x2d' b'\x94\x3e\x56\xfd\x3e\xbe\x99', 'sha256': b'\xb5\xbb\x9d\x80\x14\xa0\xf9\xb1\xd6\x1e\x21\xe7\x96' b'\xd7\x8d\xcc\xdf\x13\x52\xf2\x3c\xd3\x28\x12\xf4\x85' b'\x0b\x87\x8a\xe4\x94\x4c', } self.valid_str_hashes = { 'sha1': 'f1d2d2f924e986ac86fdf7b36c94bcdf32beec15', 'sha1_git': '257cc5642cb1a054f08cc83f2d943e56fd3ebe99', 'sha256': 'b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f485' '0b878ae4944c', } self.bad_hash = object() - @istest - def valid_bytes_hash(self): + def test_valid_bytes_hash(self): for hash_type, value in self.valid_byte_hashes.items(): self.assertTrue(hashes.validate_hash(value, hash_type)) - @istest - def valid_str_hash(self): + def test_valid_str_hash(self): for hash_type, value in self.valid_str_hashes.items(): self.assertTrue(hashes.validate_hash(value, hash_type)) - @istest - def invalid_hash_type(self): + def test_invalid_hash_type(self): hash_type = 'unknown_hash_type' with self.assertRaises(ValidationError) as cm: hashes.validate_hash(self.valid_str_hashes['sha1'], hash_type) exc = cm.exception self.assertIsInstance(str(exc), str) self.assertEqual(exc.code, 'unexpected-hash-type') self.assertEqual(exc.params['hash_type'], hash_type) self.assertIn('Unexpected hash type', str(exc)) self.assertIn(hash_type, str(exc)) - @istest - def invalid_bytes_len(self): + def test_invalid_bytes_len(self): for hash_type, value in self.valid_byte_hashes.items(): value = value + b'\x00\x01' with self.assertRaises(ValidationError) as cm: hashes.validate_hash(value, hash_type) exc = cm.exception self.assertIsInstance(str(exc), str) self.assertEqual(exc.code, 'unexpected-hash-length') self.assertEqual(exc.params['hash_type'], hash_type) self.assertEqual(exc.params['length'], len(value)) self.assertIn('Unexpected length', str(exc)) self.assertIn(str(len(value)), str(exc)) - @istest - def invalid_str_len(self): + def test_invalid_str_len(self): for hash_type, value in self.valid_str_hashes.items(): value = value + '0001' with self.assertRaises(ValidationError) as cm: hashes.validate_hash(value, hash_type) exc = cm.exception self.assertIsInstance(str(exc), str) self.assertEqual(exc.code, 'unexpected-hash-length') self.assertEqual(exc.params['hash_type'], hash_type) self.assertEqual(exc.params['length'], len(value)) self.assertIn('Unexpected length', str(exc)) self.assertIn(str(len(value)), str(exc)) - @istest - def invalid_str_contents(self): + def test_invalid_str_contents(self): for hash_type, value in self.valid_str_hashes.items(): value = '\xa2' + value[1:-1] + '\xc3' with self.assertRaises(ValidationError) as cm: hashes.validate_hash(value, hash_type) exc = cm.exception self.assertIsInstance(str(exc), str) self.assertEqual(exc.code, 'unexpected-hash-contents') self.assertEqual(exc.params['hash_type'], hash_type) self.assertEqual(exc.params['unexpected_chars'], '\xa2, \xc3') self.assertIn('Unexpected characters', str(exc)) self.assertIn('\xc3', str(exc)) self.assertIn('\xa2', str(exc)) - @istest - def invalid_value_type(self): + def test_invalid_value_type(self): with self.assertRaises(ValidationError) as cm: hashes.validate_hash(self.bad_hash, 'sha1') exc = cm.exception self.assertIsInstance(str(exc), str) self.assertEqual(exc.code, 'unexpected-hash-value-type') self.assertEqual(exc.params['type'], self.bad_hash.__class__.__name__) self.assertIn('Unexpected type', str(exc)) self.assertIn(self.bad_hash.__class__.__name__, str(exc)) - @istest - def validate_sha1(self): + def test_validate_sha1(self): self.assertTrue(hashes.validate_sha1(self.valid_byte_hashes['sha1'])) self.assertTrue(hashes.validate_sha1(self.valid_str_hashes['sha1'])) with self.assertRaises(ValidationError) as cm: hashes.validate_sha1(self.bad_hash) exc = cm.exception self.assertIsInstance(str(exc), str) self.assertEqual(exc.code, 'unexpected-hash-value-type') self.assertEqual(exc.params['type'], self.bad_hash.__class__.__name__) - @istest - def validate_sha1_git(self): + def test_validate_sha1_git(self): self.assertTrue( hashes.validate_sha1_git(self.valid_byte_hashes['sha1_git'])) self.assertTrue( hashes.validate_sha1_git(self.valid_str_hashes['sha1_git'])) with self.assertRaises(ValidationError) as cm: hashes.validate_sha1_git(self.bad_hash) exc = cm.exception self.assertIsInstance(str(exc), str) self.assertEqual(exc.code, 'unexpected-hash-value-type') self.assertEqual(exc.params['type'], self.bad_hash.__class__.__name__) - @istest - def validate_sha256(self): + def test_validate_sha256(self): self.assertTrue( hashes.validate_sha256(self.valid_byte_hashes['sha256'])) self.assertTrue( hashes.validate_sha256(self.valid_str_hashes['sha256'])) with self.assertRaises(ValidationError) as cm: hashes.validate_sha256(self.bad_hash) exc = cm.exception self.assertIsInstance(str(exc), str) self.assertEqual(exc.code, 'unexpected-hash-value-type') self.assertEqual(exc.params['type'], self.bad_hash.__class__.__name__) diff --git a/swh/model/tests/fields/test_simple.py b/swh/model/tests/fields/test_simple.py index 6fa2918..ab5e262 100644 --- a/swh/model/tests/fields/test_simple.py +++ b/swh/model/tests/fields/test_simple.py @@ -1,136 +1,123 @@ # Copyright (C) 2015 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime import unittest -from nose.tools import istest - from swh.model.exceptions import ValidationError from swh.model.fields import simple class ValidateSimple(unittest.TestCase): def setUp(self): self.valid_str = 'I am a valid string' self.valid_bytes = b'I am a valid bytes object' self.enum_values = {'an enum value', 'other', 'and another'} self.invalid_enum_value = 'invalid enum value' self.valid_int = 42 self.valid_real = 42.42 self.valid_datetime = datetime.datetime(1999, 1, 1, 12, 0, 0, tzinfo=datetime.timezone.utc) self.invalid_datetime_notz = datetime.datetime(1999, 1, 1, 12, 0, 0) - @istest - def validate_int(self): + def test_validate_int(self): self.assertTrue(simple.validate_int(self.valid_int)) - @istest - def validate_int_invalid_type(self): + def test_validate_int_invalid_type(self): with self.assertRaises(ValidationError) as cm: simple.validate_int(self.valid_str) exc = cm.exception self.assertIsInstance(str(exc), str) self.assertEqual(exc.code, 'unexpected-type') self.assertEqual(exc.params['expected_type'], 'Integral') self.assertEqual(exc.params['type'], 'str') - @istest - def validate_str(self): + def test_validate_str(self): self.assertTrue(simple.validate_str(self.valid_str)) - @istest - def validate_str_invalid_type(self): + def test_validate_str_invalid_type(self): with self.assertRaises(ValidationError) as cm: simple.validate_str(self.valid_int) exc = cm.exception self.assertIsInstance(str(exc), str) self.assertEqual(exc.code, 'unexpected-type') self.assertEqual(exc.params['expected_type'], 'str') self.assertEqual(exc.params['type'], 'int') with self.assertRaises(ValidationError) as cm: simple.validate_str(self.valid_bytes) exc = cm.exception self.assertIsInstance(str(exc), str) self.assertEqual(exc.code, 'unexpected-type') self.assertEqual(exc.params['expected_type'], 'str') self.assertEqual(exc.params['type'], 'bytes') - @istest - def validate_bytes(self): + def test_validate_bytes(self): self.assertTrue(simple.validate_bytes(self.valid_bytes)) - @istest - def validate_bytes_invalid_type(self): + def test_validate_bytes_invalid_type(self): with self.assertRaises(ValidationError) as cm: simple.validate_bytes(self.valid_int) exc = cm.exception self.assertIsInstance(str(exc), str) self.assertEqual(exc.code, 'unexpected-type') self.assertEqual(exc.params['expected_type'], 'bytes') self.assertEqual(exc.params['type'], 'int') with self.assertRaises(ValidationError) as cm: simple.validate_bytes(self.valid_str) exc = cm.exception self.assertIsInstance(str(exc), str) self.assertEqual(exc.code, 'unexpected-type') self.assertEqual(exc.params['expected_type'], 'bytes') self.assertEqual(exc.params['type'], 'str') - @istest - def validate_datetime(self): + def test_validate_datetime(self): self.assertTrue(simple.validate_datetime(self.valid_datetime)) self.assertTrue(simple.validate_datetime(self.valid_int)) self.assertTrue(simple.validate_datetime(self.valid_real)) - @istest - def validate_datetime_invalid_type(self): + def test_validate_datetime_invalid_type(self): with self.assertRaises(ValidationError) as cm: simple.validate_datetime(self.valid_str) exc = cm.exception self.assertIsInstance(str(exc), str) self.assertEqual(exc.code, 'unexpected-type') self.assertEqual(exc.params['expected_type'], 'one of datetime, Real') self.assertEqual(exc.params['type'], 'str') - @istest - def validate_datetime_invalide_tz(self): + def test_validate_datetime_invalide_tz(self): with self.assertRaises(ValidationError) as cm: simple.validate_datetime(self.invalid_datetime_notz) exc = cm.exception self.assertIsInstance(str(exc), str) self.assertEqual(exc.code, 'datetime-without-tzinfo') - @istest - def validate_enum(self): + def test_validate_enum(self): for value in self.enum_values: self.assertTrue(simple.validate_enum(value, self.enum_values)) - @istest - def validate_enum_invalid_value(self): + def test_validate_enum_invalid_value(self): with self.assertRaises(ValidationError) as cm: simple.validate_enum(self.invalid_enum_value, self.enum_values) exc = cm.exception self.assertIsInstance(str(exc), str) self.assertEqual(exc.code, 'unexpected-value') self.assertEqual(exc.params['value'], self.invalid_enum_value) self.assertEqual(exc.params['expected_values'], ', '.join(sorted(self.enum_values))) diff --git a/swh/model/tests/test_cli.py b/swh/model/tests/test_cli.py index 9e31a4a..e4232fe 100644 --- a/swh/model/tests/test_cli.py +++ b/swh/model/tests/test_cli.py @@ -1,116 +1,116 @@ # Copyright (C) 2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import os import tempfile import unittest from click.testing import CliRunner -from nose.plugins.attrib import attr +import pytest from swh.model import cli -from swh.model.tests.test_from_disk import DataMixin from swh.model.hashutil import hash_to_hex +from swh.model.tests.test_from_disk import DataMixin -@attr('fs') +@pytest.mark.fs class TestIdentify(DataMixin, unittest.TestCase): def setUp(self): super().setUp() self.runner = CliRunner() - def assertPidOK(self, result, pid): + def assertPidOK(self, result, pid): # noqa: N802 self.assertEqual(result.exit_code, 0) self.assertEqual(result.output.split()[0], pid) def test_content_id(self): """identify file content""" self.make_contents(self.tmpdir_name) for filename, content in self.contents.items(): path = os.path.join(self.tmpdir_name, filename) result = self.runner.invoke(cli.identify, ['--type', 'content', path]) self.assertPidOK(result, 'swh:1:cnt:' + hash_to_hex(content['sha1_git'])) def test_directory_id(self): """identify an entire directory""" self.make_from_tarball(self.tmpdir_name) path = os.path.join(self.tmpdir_name, b'sample-folder') result = self.runner.invoke(cli.identify, ['--type', 'directory', path]) self.assertPidOK(result, 'swh:1:dir:e8b0f1466af8608c8a3fb9879db172b887e80759') def test_symlink(self): """identify symlink --- both itself and target""" regular = os.path.join(self.tmpdir_name, b'foo.txt') link = os.path.join(self.tmpdir_name, b'bar.txt') open(regular, 'w').write('foo\n') os.symlink(os.path.basename(regular), link) result = self.runner.invoke(cli.identify, [link]) self.assertPidOK(result, 'swh:1:cnt:257cc5642cb1a054f08cc83f2d943e56fd3ebe99') result = self.runner.invoke(cli.identify, ['--no-dereference', link]) self.assertPidOK(result, 'swh:1:cnt:996f1789ff67c0e3f69ef5933a55d54c5d0e9954') def test_show_filename(self): """filename is shown by default""" self.make_contents(self.tmpdir_name) for filename, content in self.contents.items(): path = os.path.join(self.tmpdir_name, filename) result = self.runner.invoke(cli.identify, ['--type', 'content', path]) self.assertEqual(result.exit_code, 0) self.assertEqual(result.output.rstrip(), 'swh:1:cnt:%s\t%s' % (hash_to_hex(content['sha1_git']), path.decode())) def test_hide_filename(self): """filename is hidden upon request""" self.make_contents(self.tmpdir_name) for filename, content in self.contents.items(): path = os.path.join(self.tmpdir_name, filename) result = self.runner.invoke(cli.identify, ['--type', 'content', '--no-filename', path]) self.assertPidOK(result, 'swh:1:cnt:' + hash_to_hex(content['sha1_git'])) def test_auto_id(self): """automatic object type: file or directory, depending on argument""" with tempfile.NamedTemporaryFile(prefix='swh.model.cli') as f: result = self.runner.invoke(cli.identify, [f.name]) self.assertEqual(result.exit_code, 0) self.assertRegex(result.output, r'^swh:\d+:cnt:') with tempfile.TemporaryDirectory(prefix='swh.model.cli') as dirname: result = self.runner.invoke(cli.identify, [dirname]) self.assertEqual(result.exit_code, 0) self.assertRegex(result.output, r'^swh:\d+:dir:') def test_verify_content(self): """identifier verification""" self.make_contents(self.tmpdir_name) for filename, content in self.contents.items(): expected_id = 'swh:1:cnt:' + hash_to_hex(content['sha1_git']) # match path = os.path.join(self.tmpdir_name, filename) result = self.runner.invoke(cli.identify, ['--verify', expected_id, path]) self.assertEqual(result.exit_code, 0) # mismatch with open(path, 'a') as f: f.write('trailing garbage to make verification fail') result = self.runner.invoke(cli.identify, ['--verify', expected_id, path]) self.assertEqual(result.exit_code, 1) diff --git a/swh/model/tests/test_from_disk.py b/swh/model/tests/test_from_disk.py index 8e568ec..30f543d 100644 --- a/swh/model/tests/test_from_disk.py +++ b/swh/model/tests/test_from_disk.py @@ -1,789 +1,787 @@ # Copyright (C) 2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import os import tarfile import tempfile import unittest -from nose.plugins.attrib import attr +import pytest from swh.model import from_disk -from swh.model.from_disk import Content, Directory, DentryPerms +from swh.model.from_disk import Content, DentryPerms, Directory from swh.model.hashutil import DEFAULT_ALGORITHMS, hash_to_bytes, hash_to_hex +TEST_DATA = os.path.join(os.path.dirname(__file__), 'data') + class ModeToPerms(unittest.TestCase): def setUp(self): super().setUp() # Generate a full permissions map self.perms_map = {} # Symlinks for i in range(0o120000, 0o127777 + 1): self.perms_map[i] = DentryPerms.symlink # Directories for i in range(0o040000, 0o047777 + 1): self.perms_map[i] = DentryPerms.directory # Other file types: socket, regular file, block device, character # device, fifo all map to regular files for ft in [0o140000, 0o100000, 0o060000, 0o020000, 0o010000]: for i in range(ft, ft + 0o7777 + 1): if i & 0o111: # executable bits are set self.perms_map[i] = DentryPerms.executable_content else: self.perms_map[i] = DentryPerms.content def test_exhaustive_mode_to_perms(self): for fmode, perm in self.perms_map.items(): self.assertEqual(perm, from_disk.mode_to_perms(fmode)) class DataMixin: maxDiff = None def setUp(self): self.tmpdir = tempfile.TemporaryDirectory( prefix='swh.model.from_disk' ) self.tmpdir_name = os.fsencode(self.tmpdir.name) self.contents = { b'file': { 'data': b'42\n', 'sha1': hash_to_bytes( '34973274ccef6ab4dfaaf86599792fa9c3fe4689' ), 'sha256': hash_to_bytes( '084c799cd551dd1d8d5c5f9a5d593b2e' '931f5e36122ee5c793c1d08a19839cc0' ), 'sha1_git': hash_to_bytes( 'd81cc0710eb6cf9efd5b920a8453e1e07157b6cd'), 'blake2s256': hash_to_bytes( 'd5fe1939576527e42cfd76a9455a2432' 'fe7f56669564577dd93c4280e76d661d' ), 'length': 3, 'mode': 0o100644 }, } self.symlinks = { b'symlink': { 'data': b'target', 'blake2s256': hash_to_bytes( '595d221b30fdd8e10e2fdf18376e688e' '9f18d56fd9b6d1eb6a822f8c146c6da6' ), 'sha1': hash_to_bytes( '0e8a3ad980ec179856012b7eecf4327e99cd44cd' ), 'sha1_git': hash_to_bytes( '1de565933b05f74c75ff9a6520af5f9f8a5a2f1d' ), 'sha256': hash_to_bytes( '34a04005bcaf206eec990bd9637d9fdb' '6725e0a0c0d4aebf003f17f4c956eb5c' ), 'length': 6, 'perms': DentryPerms.symlink, } } self.specials = { b'fifo': os.mkfifo, b'devnull': lambda path: os.mknod(path, device=os.makedev(1, 3)), } self.empty_content = { 'data': b'', 'length': 0, 'blake2s256': hash_to_bytes( '69217a3079908094e11121d042354a7c' '1f55b6482ca1a51e1b250dfd1ed0eef9' ), 'sha1': hash_to_bytes( 'da39a3ee5e6b4b0d3255bfef95601890afd80709' ), 'sha1_git': hash_to_bytes( 'e69de29bb2d1d6434b8b29ae775ad8c2e48c5391' ), 'sha256': hash_to_bytes( 'e3b0c44298fc1c149afbf4c8996fb924' '27ae41e4649b934ca495991b7852b855' ), 'perms': DentryPerms.content, } self.empty_directory = { 'id': hash_to_bytes( '4b825dc642cb6eb9a060e54bf8d69288fbee4904' ), 'entries': [], } # Generated with generate_testdata_from_disk self.tarball_contents = { b'': { 'entries': [{ 'name': b'bar', 'perms': DentryPerms.directory, 'target': hash_to_bytes( '3c1f578394f4623f74a0ba7fe761729f59fc6ec4' ), 'type': 'dir', }, { 'name': b'empty-folder', 'perms': DentryPerms.directory, 'target': hash_to_bytes( '4b825dc642cb6eb9a060e54bf8d69288fbee4904' ), 'type': 'dir', }, { 'name': b'foo', 'perms': DentryPerms.directory, 'target': hash_to_bytes( '2b41c40f0d1fbffcba12497db71fba83fcca96e5' ), 'type': 'dir', }, { 'name': b'link-to-another-quote', 'perms': DentryPerms.symlink, 'target': hash_to_bytes( '7d5c08111e21c8a9f71540939998551683375fad' ), 'type': 'file', }, { 'name': b'link-to-binary', 'perms': DentryPerms.symlink, 'target': hash_to_bytes( 'e86b45e538d9b6888c969c89fbd22a85aa0e0366' ), 'type': 'file', }, { 'name': b'link-to-foo', 'perms': DentryPerms.symlink, 'target': hash_to_bytes( '19102815663d23f8b75a47e7a01965dcdc96468c' ), 'type': 'file', }, { 'name': b'some-binary', 'perms': DentryPerms.executable_content, 'target': hash_to_bytes( '68769579c3eaadbe555379b9c3538e6628bae1eb' ), 'type': 'file', }], 'id': hash_to_bytes( 'e8b0f1466af8608c8a3fb9879db172b887e80759' ), }, b'bar': { 'entries': [{ 'name': b'barfoo', 'perms': DentryPerms.directory, 'target': hash_to_bytes( 'c3020f6bf135a38c6df3afeb5fb38232c5e07087' ), 'type': 'dir', }], 'id': hash_to_bytes( '3c1f578394f4623f74a0ba7fe761729f59fc6ec4' ), }, b'bar/barfoo': { 'entries': [{ 'name': b'another-quote.org', 'perms': DentryPerms.content, 'target': hash_to_bytes( '133693b125bad2b4ac318535b84901ebb1f6b638' ), 'type': 'file', }], 'id': hash_to_bytes( 'c3020f6bf135a38c6df3afeb5fb38232c5e07087' ), }, b'bar/barfoo/another-quote.org': { 'blake2s256': hash_to_bytes( 'd26c1cad82d43df0bffa5e7be11a60e3' '4adb85a218b433cbce5278b10b954fe8' ), 'length': 72, 'perms': DentryPerms.content, 'sha1': hash_to_bytes( '90a6138ba59915261e179948386aa1cc2aa9220a' ), 'sha1_git': hash_to_bytes( '133693b125bad2b4ac318535b84901ebb1f6b638' ), 'sha256': hash_to_bytes( '3db5ae168055bcd93a4d08285dc99ffe' 'e2883303b23fac5eab850273a8ea5546' ), }, b'empty-folder': { 'entries': [], 'id': hash_to_bytes( '4b825dc642cb6eb9a060e54bf8d69288fbee4904' ), }, b'foo': { 'entries': [{ 'name': b'barfoo', 'perms': DentryPerms.symlink, 'target': hash_to_bytes( '8185dfb2c0c2c597d16f75a8a0c37668567c3d7e' ), 'type': 'file', }, { 'name': b'quotes.md', 'perms': DentryPerms.content, 'target': hash_to_bytes( '7c4c57ba9ff496ad179b8f65b1d286edbda34c9a' ), 'type': 'file', }, { 'name': b'rel-link-to-barfoo', 'perms': DentryPerms.symlink, 'target': hash_to_bytes( 'acac326ddd63b0bc70840659d4ac43619484e69f' ), 'type': 'file', }], 'id': hash_to_bytes( '2b41c40f0d1fbffcba12497db71fba83fcca96e5' ), }, b'foo/barfoo': { 'blake2s256': hash_to_bytes( 'e1252f2caa4a72653c4efd9af871b62b' 'f2abb7bb2f1b0e95969204bd8a70d4cd' ), 'data': b'bar/barfoo', 'length': 10, 'perms': DentryPerms.symlink, 'sha1': hash_to_bytes( '9057ee6d0162506e01c4d9d5459a7add1fedac37' ), 'sha1_git': hash_to_bytes( '8185dfb2c0c2c597d16f75a8a0c37668567c3d7e' ), 'sha256': hash_to_bytes( '29ad3f5725321b940332c78e403601af' 'ff61daea85e9c80b4a7063b6887ead68' ), }, b'foo/quotes.md': { 'blake2s256': hash_to_bytes( 'bf7ce4fe304378651ee6348d3e9336ed' '5ad603d33e83c83ba4e14b46f9b8a80b' ), 'length': 66, 'perms': DentryPerms.content, 'sha1': hash_to_bytes( '1bf0bb721ac92c18a19b13c0eb3d741cbfadebfc' ), 'sha1_git': hash_to_bytes( '7c4c57ba9ff496ad179b8f65b1d286edbda34c9a' ), 'sha256': hash_to_bytes( 'caca942aeda7b308859eb56f909ec96d' '07a499491690c453f73b9800a93b1659' ), }, b'foo/rel-link-to-barfoo': { 'blake2s256': hash_to_bytes( 'd9c327421588a1cf61f316615005a2e9' 'c13ac3a4e96d43a24138d718fa0e30db' ), 'data': b'../bar/barfoo', 'length': 13, 'perms': DentryPerms.symlink, 'sha1': hash_to_bytes( 'dc51221d308f3aeb2754db48391b85687c2869f4' ), 'sha1_git': hash_to_bytes( 'acac326ddd63b0bc70840659d4ac43619484e69f' ), 'sha256': hash_to_bytes( '8007d20db2af40435f42ddef4b8ad76b' '80adbec26b249fdf0473353f8d99df08' ), }, b'link-to-another-quote': { 'blake2s256': hash_to_bytes( '2d0e73cea01ba949c1022dc10c8a43e6' '6180639662e5dc2737b843382f7b1910' ), 'data': b'bar/barfoo/another-quote.org', 'length': 28, 'perms': DentryPerms.symlink, 'sha1': hash_to_bytes( 'cbeed15e79599c90de7383f420fed7acb48ea171' ), 'sha1_git': hash_to_bytes( '7d5c08111e21c8a9f71540939998551683375fad' ), 'sha256': hash_to_bytes( 'e6e17d0793aa750a0440eb9ad5b80b25' '8076637ef0fb68f3ac2e59e4b9ac3ba6' ), }, b'link-to-binary': { 'blake2s256': hash_to_bytes( '9ce18b1adecb33f891ca36664da676e1' '2c772cc193778aac9a137b8dc5834b9b' ), 'data': b'some-binary', 'length': 11, 'perms': DentryPerms.symlink, 'sha1': hash_to_bytes( 'd0248714948b3a48a25438232a6f99f0318f59f1' ), 'sha1_git': hash_to_bytes( 'e86b45e538d9b6888c969c89fbd22a85aa0e0366' ), 'sha256': hash_to_bytes( '14126e97d83f7d261c5a6889cee73619' '770ff09e40c5498685aba745be882eff' ), }, b'link-to-foo': { 'blake2s256': hash_to_bytes( '08d6cad88075de8f192db097573d0e82' '9411cd91eb6ec65e8fc16c017edfdb74' ), 'data': b'foo', 'length': 3, 'perms': DentryPerms.symlink, 'sha1': hash_to_bytes( '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33' ), 'sha1_git': hash_to_bytes( '19102815663d23f8b75a47e7a01965dcdc96468c' ), 'sha256': hash_to_bytes( '2c26b46b68ffc68ff99b453c1d304134' '13422d706483bfa0f98a5e886266e7ae' ), }, b'some-binary': { 'blake2s256': hash_to_bytes( '922e0f7015035212495b090c27577357' 'a740ddd77b0b9e0cd23b5480c07a18c6' ), 'length': 5, 'perms': DentryPerms.executable_content, 'sha1': hash_to_bytes( '0bbc12d7f4a2a15b143da84617d95cb223c9b23c' ), 'sha1_git': hash_to_bytes( '68769579c3eaadbe555379b9c3538e6628bae1eb' ), 'sha256': hash_to_bytes( 'bac650d34a7638bb0aeb5342646d24e3' 'b9ad6b44c9b383621faa482b990a367d' ), }, } def tearDown(self): self.tmpdir.cleanup() def assertContentEqual(self, left, right, *, check_data=False, # noqa check_path=False): if not isinstance(left, Content): raise ValueError('%s is not a Content' % left) if isinstance(right, Content): right = right.get_data() keys = DEFAULT_ALGORITHMS | { 'length', 'perms', } if check_data: keys |= {'data'} if check_path: keys |= {'path'} failed = [] for key in keys: try: lvalue = left.data[key] if key == 'perms' and 'perms' not in right: rvalue = from_disk.mode_to_perms(right['mode']) else: rvalue = right[key] except KeyError: failed.append(key) continue if lvalue != rvalue: failed.append(key) if failed: raise self.failureException( 'Content mismatched:\n' + '\n'.join( 'content[%s] = %r != %r' % ( key, left.data.get(key), right.get(key)) for key in failed ) ) def assertDirectoryEqual(self, left, right): # NoQA if not isinstance(left, Directory): raise ValueError('%s is not a Directory' % left) if isinstance(right, Directory): right = right.get_data() return self.assertCountEqual(left.entries, right['entries']) def make_contents(self, directory): for filename, content in self.contents.items(): path = os.path.join(directory, filename) with open(path, 'wb') as f: f.write(content['data']) os.chmod(path, content['mode']) def make_symlinks(self, directory): for filename, symlink in self.symlinks.items(): path = os.path.join(directory, filename) os.symlink(symlink['data'], path) def make_specials(self, directory): for filename, fn in self.specials.items(): path = os.path.join(directory, filename) fn(path) def make_from_tarball(self, directory): - tarball = os.path.join(os.path.dirname(__file__), - '../../../..', - 'swh-storage-testdata', - 'dir-folders', - 'sample-folder.tgz') + tarball = os.path.join(TEST_DATA, 'dir-folders', 'sample-folder.tgz') with tarfile.open(tarball, 'r:gz') as f: f.extractall(os.fsdecode(directory)) class TestContent(DataMixin, unittest.TestCase): def setUp(self): super().setUp() def test_data_to_content(self): for filename, content in self.contents.items(): conv_content = Content.from_bytes(mode=content['mode'], data=content['data']) self.assertContentEqual(conv_content, content) self.assertIn(hash_to_hex(conv_content.hash), repr(conv_content)) class SymlinkToContent(DataMixin, unittest.TestCase): def setUp(self): super().setUp() self.make_symlinks(self.tmpdir_name) def test_symlink_to_content(self): for filename, symlink in self.symlinks.items(): path = os.path.join(self.tmpdir_name, filename) perms = 0o120000 conv_content = Content.from_symlink(path=path, mode=perms) self.assertContentEqual(conv_content, symlink) class FileToContent(DataMixin, unittest.TestCase): def setUp(self): super().setUp() self.make_contents(self.tmpdir_name) self.make_symlinks(self.tmpdir_name) self.make_specials(self.tmpdir_name) def test_file_to_content(self): # Check whether loading the data works for data in [True, False]: for filename, symlink in self.symlinks.items(): path = os.path.join(self.tmpdir_name, filename) conv_content = Content.from_file(path=path, data=data) self.assertContentEqual(conv_content, symlink, check_data=data) for filename, content in self.contents.items(): path = os.path.join(self.tmpdir_name, filename) conv_content = Content.from_file(path=path, data=data) self.assertContentEqual(conv_content, content, check_data=data) for filename in self.specials: path = os.path.join(self.tmpdir_name, filename) conv_content = Content.from_file(path=path, data=data) self.assertContentEqual(conv_content, self.empty_content) def test_file_to_content_with_path(self): for filename, content in self.contents.items(): content_w_path = content.copy() path = os.path.join(self.tmpdir_name, filename) content_w_path['path'] = path conv_content = Content.from_file(path=path, save_path=True) self.assertContentEqual(conv_content, content_w_path, check_path=True) class DirectoryToObjects(DataMixin, unittest.TestCase): def setUp(self): super().setUp() contents = os.path.join(self.tmpdir_name, b'contents') os.mkdir(contents) self.make_contents(contents) symlinks = os.path.join(self.tmpdir_name, b'symlinks') os.mkdir(symlinks) self.make_symlinks(symlinks) specials = os.path.join(self.tmpdir_name, b'specials') os.mkdir(specials) self.make_specials(specials) empties = os.path.join(self.tmpdir_name, b'empty1', b'empty2') os.makedirs(empties) def test_directory_to_objects(self): directory = Directory.from_disk(path=self.tmpdir_name) for name, value in self.contents.items(): self.assertContentEqual(directory[b'contents/' + name], value) for name, value in self.symlinks.items(): self.assertContentEqual(directory[b'symlinks/' + name], value) for name in self.specials: self.assertContentEqual( directory[b'specials/' + name], self.empty_content, ) self.assertEqual( directory[b'empty1/empty2'].get_data(), self.empty_directory, ) # Raise on non existent file with self.assertRaisesRegex(KeyError, "b'nonexistent'"): directory[b'empty1/nonexistent'] # Raise on non existent directory with self.assertRaisesRegex(KeyError, "b'nonexistentdir'"): directory[b'nonexistentdir/file'] objs = directory.collect() self.assertCountEqual(['content', 'directory'], objs) self.assertEqual(len(objs['directory']), 6) self.assertEqual(len(objs['content']), len(self.contents) + len(self.symlinks) + 1) def test_directory_to_objects_ignore_empty(self): directory = Directory.from_disk( path=self.tmpdir_name, dir_filter=from_disk.ignore_empty_directories ) for name, value in self.contents.items(): self.assertContentEqual(directory[b'contents/' + name], value) for name, value in self.symlinks.items(): self.assertContentEqual(directory[b'symlinks/' + name], value) for name in self.specials: self.assertContentEqual( directory[b'specials/' + name], self.empty_content, ) # empty directories have been ignored recursively with self.assertRaisesRegex(KeyError, "b'empty1'"): directory[b'empty1'] with self.assertRaisesRegex(KeyError, "b'empty1'"): directory[b'empty1/empty2'] objs = directory.collect() self.assertCountEqual(['content', 'directory'], objs) self.assertEqual(len(objs['directory']), 4) self.assertEqual(len(objs['content']), len(self.contents) + len(self.symlinks) + 1) def test_directory_to_objects_ignore_name(self): directory = Directory.from_disk( path=self.tmpdir_name, dir_filter=from_disk.ignore_named_directories([b'symlinks']) ) for name, value in self.contents.items(): self.assertContentEqual(directory[b'contents/' + name], value) for name in self.specials: self.assertContentEqual( directory[b'specials/' + name], self.empty_content, ) self.assertEqual( directory[b'empty1/empty2'].get_data(), self.empty_directory, ) with self.assertRaisesRegex(KeyError, "b'symlinks'"): directory[b'symlinks'] objs = directory.collect() self.assertCountEqual(['content', 'directory'], objs) self.assertEqual(len(objs['directory']), 5) self.assertEqual(len(objs['content']), len(self.contents) + 1) def test_directory_to_objects_ignore_name_case(self): directory = Directory.from_disk( path=self.tmpdir_name, dir_filter=from_disk.ignore_named_directories([b'symLiNks'], case_sensitive=False) ) for name, value in self.contents.items(): self.assertContentEqual(directory[b'contents/' + name], value) for name in self.specials: self.assertContentEqual( directory[b'specials/' + name], self.empty_content, ) self.assertEqual( directory[b'empty1/empty2'].get_data(), self.empty_directory, ) with self.assertRaisesRegex(KeyError, "b'symlinks'"): directory[b'symlinks'] objs = directory.collect() self.assertCountEqual(['content', 'directory'], objs) self.assertEqual(len(objs['directory']), 5) self.assertEqual(len(objs['content']), len(self.contents) + 1) -@attr('fs') +@pytest.mark.fs class TarballTest(DataMixin, unittest.TestCase): def setUp(self): super().setUp() self.make_from_tarball(self.tmpdir_name) def test_contents_match(self): directory = Directory.from_disk( path=os.path.join(self.tmpdir_name, b'sample-folder') ) for name, data in self.tarball_contents.items(): obj = directory[name] if isinstance(obj, Content): self.assertContentEqual(obj, data) elif isinstance(obj, Directory): self.assertDirectoryEqual(obj, data) else: raise self.failureException('Unknown type for %s' % obj) class DirectoryManipulation(DataMixin, unittest.TestCase): def test_directory_access_nested(self): d = Directory() d[b'a'] = Directory() d[b'a/b'] = Directory() self.assertEqual(d[b'a/b'].get_data(), self.empty_directory) def test_directory_del_nested(self): d = Directory() d[b'a'] = Directory() d[b'a/b'] = Directory() with self.assertRaisesRegex(KeyError, "b'c'"): del d[b'a/b/c'] with self.assertRaisesRegex(KeyError, "b'level2'"): del d[b'a/level2/c'] del d[b'a/b'] self.assertEqual(d[b'a'].get_data(), self.empty_directory) def test_directory_access_self(self): d = Directory() self.assertIs(d, d[b'']) self.assertIs(d, d[b'/']) self.assertIs(d, d[b'//']) def test_directory_access_wrong_type(self): d = Directory() with self.assertRaisesRegex(ValueError, 'bytes from Directory'): d['foo'] with self.assertRaisesRegex(ValueError, 'bytes from Directory'): d[42] def test_directory_repr(self): entries = [b'a', b'b', b'c'] d = Directory() for entry in entries: d[entry] = Directory() r = repr(d) self.assertIn(hash_to_hex(d.hash), r) for entry in entries: self.assertIn(str(entry), r) def test_directory_set_wrong_type_name(self): d = Directory() with self.assertRaisesRegex(ValueError, 'bytes Directory entry'): d['foo'] = Directory() with self.assertRaisesRegex(ValueError, 'bytes Directory entry'): d[42] = Directory() def test_directory_set_nul_in_name(self): d = Directory() with self.assertRaisesRegex(ValueError, 'nul bytes'): d[b'\x00\x01'] = Directory() def test_directory_set_empty_name(self): d = Directory() with self.assertRaisesRegex(ValueError, 'must have a name'): d[b''] = Directory() with self.assertRaisesRegex(ValueError, 'must have a name'): d[b'/'] = Directory() def test_directory_set_wrong_type(self): d = Directory() with self.assertRaisesRegex(ValueError, 'Content or Directory'): d[b'entry'] = object() def test_directory_del_wrong_type(self): d = Directory() with self.assertRaisesRegex(ValueError, 'bytes Directory entry'): del d['foo'] with self.assertRaisesRegex(ValueError, 'bytes Directory entry'): del d[42] diff --git a/swh/model/tests/test_hashutil.py b/swh/model/tests/test_hashutil.py index 92b3684..0e41068 100644 --- a/swh/model/tests/test_hashutil.py +++ b/swh/model/tests/test_hashutil.py @@ -1,414 +1,379 @@ # Copyright (C) 2015-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import hashlib import io import os import tempfile import unittest - -from nose.tools import istest from unittest.mock import patch from swh.model import hashutil from swh.model.hashutil import MultiHash class BaseHashutil(unittest.TestCase): def setUp(self): # Reset function cache hashutil._blake2_hash_cache = {} self.data = b'1984\n' self.hex_checksums = { 'sha1': '62be35bf00ff0c624f4a621e2ea5595a049e0731', 'sha1_git': '568aaf43d83b2c3df8067f3bedbb97d83260be6d', 'sha256': '26602113b4b9afd9d55466b08580d3c2' '4a9b50ee5b5866c0d91fab0e65907311', 'blake2s256': '63cfb259e1fdb485bc5c55749697a6b21ef31fb7445f6c78a' 'c9422f9f2dc8906', } self.checksums = { type: bytes.fromhex(cksum) for type, cksum in self.hex_checksums.items() } self.bytehex_checksums = { type: hashutil.hash_to_bytehex(cksum) for type, cksum in self.checksums.items() } self.git_hex_checksums = { 'blob': self.hex_checksums['sha1_git'], 'tree': '5b2e883aa33d2efab98442693ea4dd5f1b8871b0', 'commit': '79e4093542e72f0fcb7cbd75cb7d270f9254aa8f', 'tag': 'd6bf62466f287b4d986c545890716ce058bddf67', } self.git_checksums = { type: bytes.fromhex(cksum) for type, cksum in self.git_hex_checksums.items() } class MultiHashTest(BaseHashutil): - @istest - def multi_hash_data(self): + def test_multi_hash_data(self): checksums = MultiHash.from_data(self.data).digest() self.assertEqual(checksums, self.checksums) self.assertFalse('length' in checksums) - @istest - def multi_hash_data_with_length(self): + def test_multi_hash_data_with_length(self): expected_checksums = self.checksums.copy() expected_checksums['length'] = len(self.data) algos = set(['length']).union(hashutil.DEFAULT_ALGORITHMS) checksums = MultiHash.from_data(self.data, hash_names=algos).digest() self.assertEqual(checksums, expected_checksums) self.assertTrue('length' in checksums) - @istest - def multi_hash_data_unknown_hash(self): + def test_multi_hash_data_unknown_hash(self): with self.assertRaises(ValueError) as cm: MultiHash.from_data(self.data, ['unknown-hash']) self.assertIn('Unexpected hashing algorithm', cm.exception.args[0]) self.assertIn('unknown-hash', cm.exception.args[0]) - @istest - def multi_hash_file(self): + def test_multi_hash_file(self): fobj = io.BytesIO(self.data) checksums = MultiHash.from_file(fobj, length=len(self.data)).digest() self.assertEqual(checksums, self.checksums) - @istest - def multi_hash_file_hexdigest(self): + def test_multi_hash_file_hexdigest(self): fobj = io.BytesIO(self.data) length = len(self.data) checksums = MultiHash.from_file(fobj, length=length).hexdigest() self.assertEqual(checksums, self.hex_checksums) - @istest - def multi_hash_file_bytehexdigest(self): + def test_multi_hash_file_bytehexdigest(self): fobj = io.BytesIO(self.data) length = len(self.data) checksums = MultiHash.from_file(fobj, length=length).bytehexdigest() self.assertEqual(checksums, self.bytehex_checksums) - @istest - def multi_hash_file_missing_length(self): + def test_multi_hash_file_missing_length(self): fobj = io.BytesIO(self.data) with self.assertRaises(ValueError) as cm: MultiHash.from_file(fobj, hash_names=['sha1_git']) self.assertIn('Missing length', cm.exception.args[0]) - @istest - def multi_hash_path(self): + def test_multi_hash_path(self): with tempfile.NamedTemporaryFile(delete=False) as f: f.write(self.data) hashes = MultiHash.from_path(f.name).digest() os.remove(f.name) - self.checksums['length'] = len(self.data) - self.assertEquals(self.checksums, hashes) + self.assertEqual(self.checksums, hashes) class Hashutil(BaseHashutil): - @istest - def hash_data(self): + def test_hash_data(self): checksums = hashutil.hash_data(self.data) self.assertEqual(checksums, self.checksums) self.assertFalse('length' in checksums) - @istest - def hash_data_with_length(self): + def test_hash_data_with_length(self): expected_checksums = self.checksums.copy() expected_checksums['length'] = len(self.data) algos = set(['length']).union(hashutil.DEFAULT_ALGORITHMS) checksums = hashutil.hash_data(self.data, algorithms=algos) self.assertEqual(checksums, expected_checksums) self.assertTrue('length' in checksums) - @istest - def hash_data_unknown_hash(self): + def test_hash_data_unknown_hash(self): with self.assertRaises(ValueError) as cm: hashutil.hash_data(self.data, ['unknown-hash']) self.assertIn('Unexpected hashing algorithm', cm.exception.args[0]) self.assertIn('unknown-hash', cm.exception.args[0]) - @istest - def hash_git_data(self): + def test_hash_git_data(self): checksums = { git_type: hashutil.hash_git_data(self.data, git_type) for git_type in self.git_checksums } self.assertEqual(checksums, self.git_checksums) - @istest - def hash_git_data_unknown_git_type(self): + def test_hash_git_data_unknown_git_type(self): with self.assertRaises(ValueError) as cm: hashutil.hash_git_data(self.data, 'unknown-git-type') self.assertIn('Unexpected git object type', cm.exception.args[0]) self.assertIn('unknown-git-type', cm.exception.args[0]) - @istest - def hash_file(self): + def test_hash_file(self): fobj = io.BytesIO(self.data) checksums = hashutil.hash_file(fobj, length=len(self.data)) self.assertEqual(checksums, self.checksums) - @istest - def hash_file_missing_length(self): + def test_hash_file_missing_length(self): fobj = io.BytesIO(self.data) with self.assertRaises(ValueError) as cm: hashutil.hash_file(fobj, algorithms=['sha1_git']) self.assertIn('Missing length', cm.exception.args[0]) - @istest - def hash_path(self): + def test_hash_path(self): with tempfile.NamedTemporaryFile(delete=False) as f: f.write(self.data) hashes = hashutil.hash_path(f.name) os.remove(f.name) self.checksums['length'] = len(self.data) - self.assertEquals(self.checksums, hashes) + self.assertEqual(self.checksums, hashes) - @istest - def hash_to_hex(self): + def test_hash_to_hex(self): for type in self.checksums: hex = self.hex_checksums[type] hash = self.checksums[type] - self.assertEquals(hashutil.hash_to_hex(hex), hex) - self.assertEquals(hashutil.hash_to_hex(hash), hex) + self.assertEqual(hashutil.hash_to_hex(hex), hex) + self.assertEqual(hashutil.hash_to_hex(hash), hex) - @istest - def hash_to_bytes(self): + def test_hash_to_bytes(self): for type in self.checksums: hex = self.hex_checksums[type] hash = self.checksums[type] - self.assertEquals(hashutil.hash_to_bytes(hex), hash) - self.assertEquals(hashutil.hash_to_bytes(hash), hash) + self.assertEqual(hashutil.hash_to_bytes(hex), hash) + self.assertEqual(hashutil.hash_to_bytes(hash), hash) - @istest - def hash_to_bytehex(self): + def test_hash_to_bytehex(self): for algo in self.checksums: self.assertEqual(self.hex_checksums[algo].encode('ascii'), hashutil.hash_to_bytehex(self.checksums[algo])) - @istest - def bytehex_to_hash(self): + def test_bytehex_to_hash(self): for algo in self.checksums: self.assertEqual(self.checksums[algo], hashutil.bytehex_to_hash( self.hex_checksums[algo].encode())) - @istest - def new_hash_unsupported_hashing_algorithm(self): + def test_new_hash_unsupported_hashing_algorithm(self): try: hashutil._new_hash('blake2:10') except ValueError as e: - self.assertEquals(str(e), - 'Unexpected hashing algorithm blake2:10, ' - 'expected one of blake2b512, blake2s256, ' - 'sha1, sha1_git, sha256') + self.assertEqual(str(e), + 'Unexpected hashing algorithm blake2:10, ' + 'expected one of blake2b512, blake2s256, ' + 'sha1, sha1_git, sha256') @patch('hashlib.new') - @istest - def new_hash_blake2b_blake2b512_builtin(self, mock_hashlib_new): + def test_new_hash_blake2b_blake2b512_builtin(self, mock_hashlib_new): if 'blake2b512' not in hashlib.algorithms_available: self.skipTest('blake2b512 not built-in') mock_hashlib_new.return_value = sentinel = object() h = hashutil._new_hash('blake2b512') self.assertIs(h, sentinel) mock_hashlib_new.assert_called_with('blake2b512') @patch('hashlib.new') - @istest - def new_hash_blake2s_blake2s256_builtin(self, mock_hashlib_new): + def test_new_hash_blake2s_blake2s256_builtin(self, mock_hashlib_new): if 'blake2s256' not in hashlib.algorithms_available: self.skipTest('blake2s256 not built-in') mock_hashlib_new.return_value = sentinel = object() h = hashutil._new_hash('blake2s256') self.assertIs(h, sentinel) mock_hashlib_new.assert_called_with('blake2s256') - @istest - def new_hash_blake2b_builtin(self): + def test_new_hash_blake2b_builtin(self): removed_hash = False try: if 'blake2b512' in hashlib.algorithms_available: removed_hash = True hashlib.algorithms_available.remove('blake2b512') if 'blake2b' not in hashlib.algorithms_available: self.skipTest('blake2b not built in') with patch('hashlib.blake2b') as mock_blake2b: mock_blake2b.return_value = sentinel = object() h = hashutil._new_hash('blake2b512') self.assertIs(h, sentinel) mock_blake2b.assert_called_with(digest_size=512//8) finally: if removed_hash: hashlib.algorithms_available.add('blake2b512') - @istest - def new_hash_blake2s_builtin(self): + def test_new_hash_blake2s_builtin(self): removed_hash = False try: if 'blake2s256' in hashlib.algorithms_available: removed_hash = True hashlib.algorithms_available.remove('blake2s256') if 'blake2s' not in hashlib.algorithms_available: self.skipTest('blake2s not built in') with patch('hashlib.blake2s') as mock_blake2s: mock_blake2s.return_value = sentinel = object() h = hashutil._new_hash('blake2s256') self.assertIs(h, sentinel) mock_blake2s.assert_called_with(digest_size=256//8) finally: if removed_hash: hashlib.algorithms_available.add('blake2s256') - @istest - def new_hash_blake2b_pyblake2(self): + def test_new_hash_blake2b_pyblake2(self): if 'blake2b512' in hashlib.algorithms_available: self.skipTest('blake2b512 built in') if 'blake2b' in hashlib.algorithms_available: self.skipTest('blake2b built in') with patch('pyblake2.blake2b') as mock_blake2b: mock_blake2b.return_value = sentinel = object() h = hashutil._new_hash('blake2b512') self.assertIs(h, sentinel) mock_blake2b.assert_called_with(digest_size=512//8) - @istest - def new_hash_blake2s_pyblake2(self): + def test_new_hash_blake2s_pyblake2(self): if 'blake2s256' in hashlib.algorithms_available: self.skipTest('blake2s256 built in') if 'blake2s' in hashlib.algorithms_available: self.skipTest('blake2s built in') with patch('pyblake2.blake2s') as mock_blake2s: mock_blake2s.return_value = sentinel = object() h = hashutil._new_hash('blake2s256') self.assertIs(h, sentinel) mock_blake2s.assert_called_with(digest_size=256//8) class HashlibGit(unittest.TestCase): def setUp(self): self.blob_data = b'42\n' self.tree_data = b''.join([b'40000 barfoo\0', bytes.fromhex('c3020f6bf135a38c6df' '3afeb5fb38232c5e07087'), b'100644 blah\0', bytes.fromhex('63756ef0df5e4f10b6efa' '33cfe5c758749615f20'), b'100644 hello\0', bytes.fromhex('907b308167f0880fb2a' '5c0e1614bb0c7620f9dc3')]) self.commit_data = """tree 1c61f7259dcb770f46b194d941df4f08ff0a3970 author Antoine R. Dumont (@ardumont) 1444054085 +0200 committer Antoine R. Dumont (@ardumont) 1444054085 +0200 initial """.encode('utf-8') # NOQA self.tag_data = """object 24d012aaec0bc5a4d2f62c56399053d6cc72a241 type commit tag 0.0.1 tagger Antoine R. Dumont (@ardumont) 1444225145 +0200 blah """.encode('utf-8') # NOQA self.checksums = { 'blob_sha1_git': bytes.fromhex('d81cc0710eb6cf9efd5b920a8453e1' 'e07157b6cd'), 'tree_sha1_git': bytes.fromhex('ac212302c45eada382b27bfda795db' '121dacdb1c'), 'commit_sha1_git': bytes.fromhex('e960570b2e6e2798fa4cfb9af2c399' 'd629189653'), 'tag_sha1_git': bytes.fromhex('bc2b99ba469987bcf1272c189ed534' 'e9e959f120'), } - @istest - def unknown_header_type(self): + def test_unknown_header_type(self): with self.assertRaises(ValueError) as cm: hashutil.hash_git_data(b'any-data', 'some-unknown-type') self.assertIn('Unexpected git object type', cm.exception.args[0]) - @istest - def hashdata_content(self): + def test_hashdata_content(self): # when actual_hash = hashutil.hash_git_data(self.blob_data, git_type='blob') # then self.assertEqual(actual_hash, self.checksums['blob_sha1_git']) - @istest - def hashdata_tree(self): + def test_hashdata_tree(self): # when actual_hash = hashutil.hash_git_data(self.tree_data, git_type='tree') # then self.assertEqual(actual_hash, self.checksums['tree_sha1_git']) - @istest - def hashdata_revision(self): + def test_hashdata_revision(self): # when actual_hash = hashutil.hash_git_data(self.commit_data, git_type='commit') # then self.assertEqual(actual_hash, self.checksums['commit_sha1_git']) - @istest - def hashdata_tag(self): + def test_hashdata_tag(self): # when actual_hash = hashutil.hash_git_data(self.tag_data, git_type='tag') # then self.assertEqual(actual_hash, self.checksums['tag_sha1_git']) diff --git a/swh/model/tests/test_identifiers.py b/swh/model/tests/test_identifiers.py index 6658608..de96865 100644 --- a/swh/model/tests/test_identifiers.py +++ b/swh/model/tests/test_identifiers.py @@ -1,919 +1,894 @@ # Copyright (C) 2015-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import binascii import datetime import unittest -from nose.tools import istest - from swh.model import hashutil, identifiers - from swh.model.exceptions import ValidationError -from swh.model.identifiers import SNAPSHOT, RELEASE, REVISION, DIRECTORY -from swh.model.identifiers import CONTENT, PERSISTENT_IDENTIFIER_TYPES -from swh.model.identifiers import PersistentId +from swh.model.identifiers import (CONTENT, DIRECTORY, + PERSISTENT_IDENTIFIER_TYPES, RELEASE, + REVISION, SNAPSHOT, PersistentId) class UtilityFunctionsIdentifier(unittest.TestCase): def setUp(self): self.str_id = 'c2e41aae41ac17bd4a650770d6ee77f62e52235b' self.bytes_id = binascii.unhexlify(self.str_id) self.bad_type_id = object() - @istest - def identifier_to_bytes(self): + def test_identifier_to_bytes(self): for id in [self.str_id, self.bytes_id]: self.assertEqual(identifiers.identifier_to_bytes(id), self.bytes_id) # wrong length with self.assertRaises(ValueError) as cm: identifiers.identifier_to_bytes(id[:-2]) self.assertIn('length', str(cm.exception)) with self.assertRaises(ValueError) as cm: identifiers.identifier_to_bytes(self.bad_type_id) self.assertIn('type', str(cm.exception)) - @istest - def identifier_to_str(self): + def test_identifier_to_str(self): for id in [self.str_id, self.bytes_id]: self.assertEqual(identifiers.identifier_to_str(id), self.str_id) # wrong length with self.assertRaises(ValueError) as cm: identifiers.identifier_to_str(id[:-2]) self.assertIn('length', str(cm.exception)) with self.assertRaises(ValueError) as cm: identifiers.identifier_to_str(self.bad_type_id) self.assertIn('type', str(cm.exception)) class UtilityFunctionsDateOffset(unittest.TestCase): def setUp(self): self.dates = { b'1448210036': { 'seconds': 1448210036, 'microseconds': 0, }, b'1448210036.002342': { 'seconds': 1448210036, 'microseconds': 2342, }, b'1448210036.12': { 'seconds': 1448210036, 'microseconds': 120000, } } self.broken_dates = [ 1448210036.12, ] self.offsets = { 0: b'+0000', -630: b'-1030', 800: b'+1320', } - @istest - def format_date(self): + def test_format_date(self): for date_repr, date in self.dates.items(): self.assertEqual(identifiers.format_date(date), date_repr) - @istest - def format_date_fail(self): + def test_format_date_fail(self): for date in self.broken_dates: with self.assertRaises(ValueError): identifiers.format_date(date) - @istest - def format_offset(self): + def test_format_offset(self): for offset, res in self.offsets.items(): self.assertEqual(identifiers.format_offset(offset), res) class ContentIdentifier(unittest.TestCase): def setUp(self): self.content = { 'status': 'visible', 'length': 5, 'data': b'1984\n', 'ctime': datetime.datetime(2015, 11, 22, 16, 33, 56, tzinfo=datetime.timezone.utc), } self.content_id = hashutil.hash_data(self.content['data']) - @istest - def content_identifier(self): + def test_content_identifier(self): self.assertEqual(identifiers.content_identifier(self.content), self.content_id) class DirectoryIdentifier(unittest.TestCase): def setUp(self): self.directory = { 'id': 'c2e41aae41ac17bd4a650770d6ee77f62e52235b', 'entries': [ { 'type': 'file', 'perms': 33188, 'name': b'README', 'target': '37ec8ea2110c0b7a32fbb0e872f6e7debbf95e21' }, { 'type': 'file', 'perms': 33188, 'name': b'Rakefile', 'target': '3bb0e8592a41ae3185ee32266c860714980dbed7' }, { 'type': 'dir', 'perms': 16384, 'name': b'app', 'target': '61e6e867f5d7ba3b40540869bc050b0c4fed9e95' }, { 'type': 'file', 'perms': 33188, 'name': b'1.megabyte', 'target': '7c2b2fbdd57d6765cdc9d84c2d7d333f11be7fb3' }, { 'type': 'dir', 'perms': 16384, 'name': b'config', 'target': '591dfe784a2e9ccc63aaba1cb68a765734310d98' }, { 'type': 'dir', 'perms': 16384, 'name': b'public', 'target': '9588bf4522c2b4648bfd1c61d175d1f88c1ad4a5' }, { 'type': 'file', 'perms': 33188, 'name': b'development.sqlite3', 'target': 'e69de29bb2d1d6434b8b29ae775ad8c2e48c5391' }, { 'type': 'dir', 'perms': 16384, 'name': b'doc', 'target': '154705c6aa1c8ead8c99c7915373e3c44012057f' }, { 'type': 'dir', 'perms': 16384, 'name': b'db', 'target': '85f157bdc39356b7bc7de9d0099b4ced8b3b382c' }, { 'type': 'dir', 'perms': 16384, 'name': b'log', 'target': '5e3d3941c51cce73352dff89c805a304ba96fffe' }, { 'type': 'dir', 'perms': 16384, 'name': b'script', 'target': '1b278423caf176da3f3533592012502aa10f566c' }, { 'type': 'dir', 'perms': 16384, 'name': b'test', 'target': '035f0437c080bfd8711670b3e8677e686c69c763' }, { 'type': 'dir', 'perms': 16384, 'name': b'vendor', 'target': '7c0dc9ad978c1af3f9a4ce061e50f5918bd27138' }, { 'type': 'rev', 'perms': 57344, 'name': b'will_paginate', 'target': '3d531e169db92a16a9a8974f0ae6edf52e52659e' } ], } self.empty_directory = { 'id': '4b825dc642cb6eb9a060e54bf8d69288fbee4904', 'entries': [], } - @istest - def dir_identifier(self): + def test_dir_identifier(self): self.assertEqual( identifiers.directory_identifier(self.directory), self.directory['id']) - @istest - def dir_identifier_empty_directory(self): + def test_dir_identifier_empty_directory(self): self.assertEqual( identifiers.directory_identifier(self.empty_directory), self.empty_directory['id']) class RevisionIdentifier(unittest.TestCase): def setUp(self): linus_tz = datetime.timezone(datetime.timedelta(minutes=-420)) gpgsig = b'''\ -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.13 (Darwin) iQIcBAABAgAGBQJVJcYsAAoJEBiY3kIkQRNJVAUQAJ8/XQIfMqqC5oYeEFfHOPYZ L7qy46bXHVBa9Qd8zAJ2Dou3IbI2ZoF6/Et89K/UggOycMlt5FKV/9toWyuZv4Po L682wonoxX99qvVTHo6+wtnmYO7+G0f82h+qHMErxjP+I6gzRNBvRr+SfY7VlGdK wikMKOMWC5smrScSHITnOq1Ews5pe3N7qDYMzK0XVZmgDoaem4RSWMJs4My/qVLN e0CqYWq2A22GX7sXl6pjneJYQvcAXUX+CAzp24QnPSb+Q22Guj91TcxLFcHCTDdn qgqMsEyMiisoglwrCbO+D+1xq9mjN9tNFWP66SQ48mrrHYTBV5sz9eJyDfroJaLP CWgbDTgq6GzRMehHT3hXfYS5NNatjnhkNISXR7pnVP/obIi/vpWh5ll6Gd8q26z+ a/O41UzOaLTeNI365MWT4/cnXohVLRG7iVJbAbCxoQmEgsYMRc/pBAzWJtLfcB2G jdTswYL6+MUdL8sB9pZ82D+BP/YAdHe69CyTu1lk9RT2pYtI/kkfjHubXBCYEJSG +VGllBbYG6idQJpyrOYNRJyrDi9yvDJ2W+S0iQrlZrxzGBVGTB/y65S8C+2WTBcE lf1Qb5GDsQrZWgD+jtWTywOYHtCBwyCKSAXxSARMbNPeak9WPlcW/Jmu+fUcMe2x dg1KdHOa34shrKDaOVzW =od6m -----END PGP SIGNATURE-----''' self.revision = { 'id': 'bc0195aad0daa2ad5b0d76cce22b167bc3435590', 'directory': '85a74718d377195e1efd0843ba4f3260bad4fe07', 'parents': ['01e2d0627a9a6edb24c37db45db5ecb31e9de808'], 'author': { 'name': b'Linus Torvalds', 'email': b'torvalds@linux-foundation.org', }, 'date': datetime.datetime(2015, 7, 12, 15, 10, 30, tzinfo=linus_tz), 'committer': { 'name': b'Linus Torvalds', 'email': b'torvalds@linux-foundation.org', }, 'committer_date': datetime.datetime(2015, 7, 12, 15, 10, 30, tzinfo=linus_tz), 'message': b'Linux 4.2-rc2\n', } self.revision_none_metadata = { 'id': 'bc0195aad0daa2ad5b0d76cce22b167bc3435590', 'directory': '85a74718d377195e1efd0843ba4f3260bad4fe07', 'parents': ['01e2d0627a9a6edb24c37db45db5ecb31e9de808'], 'author': { 'name': b'Linus Torvalds', 'email': b'torvalds@linux-foundation.org', }, 'date': datetime.datetime(2015, 7, 12, 15, 10, 30, tzinfo=linus_tz), 'committer': { 'name': b'Linus Torvalds', 'email': b'torvalds@linux-foundation.org', }, 'committer_date': datetime.datetime(2015, 7, 12, 15, 10, 30, tzinfo=linus_tz), 'message': b'Linux 4.2-rc2\n', 'metadata': None, } self.synthetic_revision = { 'id': b'\xb2\xa7\xe1&\x04\x92\xe3D\xfa\xb3\xcb\xf9\x1b\xc1<\x91' b'\xe0T&\xfd', 'author': { 'name': b'Software Heritage', 'email': b'robot@softwareheritage.org', }, 'date': { 'timestamp': {'seconds': 1437047495}, 'offset': 0, 'negative_utc': False, }, 'type': 'tar', 'committer': { 'name': b'Software Heritage', 'email': b'robot@softwareheritage.org', }, 'committer_date': 1437047495, 'synthetic': True, 'parents': [None], 'message': b'synthetic revision message\n', 'directory': b'\xd1\x1f\x00\xa6\xa0\xfe\xa6\x05SA\xd2U\x84\xb5\xa9' b'e\x16\xc0\xd2\xb8', 'metadata': {'original_artifact': [ {'archive_type': 'tar', 'name': 'gcc-5.2.0.tar.bz2', 'sha1_git': '39d281aff934d44b439730057e55b055e206a586', 'sha1': 'fe3f5390949d47054b613edc36c557eb1d51c18e', 'sha256': '5f835b04b5f7dd4f4d2dc96190ec1621b8d89f' '2dc6f638f9f8bc1b1014ba8cad'}]}, } # cat commit.txt | git hash-object -t commit --stdin self.revision_with_extra_headers = { 'id': '010d34f384fa99d047cdd5e2f41e56e5c2feee45', 'directory': '85a74718d377195e1efd0843ba4f3260bad4fe07', 'parents': ['01e2d0627a9a6edb24c37db45db5ecb31e9de808'], 'author': { 'name': b'Linus Torvalds', 'email': b'torvalds@linux-foundation.org', 'fullname': b'Linus Torvalds ', }, 'date': datetime.datetime(2015, 7, 12, 15, 10, 30, tzinfo=linus_tz), 'committer': { 'name': b'Linus Torvalds', 'email': b'torvalds@linux-foundation.org', 'fullname': b'Linus Torvalds ', }, 'committer_date': datetime.datetime(2015, 7, 12, 15, 10, 30, tzinfo=linus_tz), 'message': b'Linux 4.2-rc2\n', 'metadata': { 'extra_headers': [ ['svn-repo-uuid', '046f1af7-66c2-d61b-5410-ce57b7db7bff'], ['svn-revision', 10], ] } } self.revision_with_gpgsig = { 'id': '44cc742a8ca17b9c279be4cc195a93a6ef7a320e', 'directory': 'b134f9b7dc434f593c0bab696345548b37de0558', 'parents': ['689664ae944b4692724f13b709a4e4de28b54e57', 'c888305e1efbaa252d01b4e5e6b778f865a97514'], 'author': { 'name': b'Jiang Xin', 'email': b'worldhello.net@gmail.com', 'fullname': b'Jiang Xin ', }, 'date': { 'timestamp': 1428538899, 'offset': 480, }, 'committer': { 'name': b'Jiang Xin', 'email': b'worldhello.net@gmail.com', }, 'committer_date': { 'timestamp': 1428538899, 'offset': 480, }, 'metadata': { 'extra_headers': [ ['gpgsig', gpgsig], ], }, 'message': b'''Merge branch 'master' of git://github.com/alexhenrie/git-po * 'master' of git://github.com/alexhenrie/git-po: l10n: ca.po: update translation ''' } self.revision_no_message = { 'id': '4cfc623c9238fa92c832beed000ce2d003fd8333', 'directory': 'b134f9b7dc434f593c0bab696345548b37de0558', 'parents': ['689664ae944b4692724f13b709a4e4de28b54e57', 'c888305e1efbaa252d01b4e5e6b778f865a97514'], 'author': { 'name': b'Jiang Xin', 'email': b'worldhello.net@gmail.com', 'fullname': b'Jiang Xin ', }, 'date': { 'timestamp': 1428538899, 'offset': 480, }, 'committer': { 'name': b'Jiang Xin', 'email': b'worldhello.net@gmail.com', }, 'committer_date': { 'timestamp': 1428538899, 'offset': 480, }, 'message': None, } self.revision_empty_message = { 'id': '7442cd78bd3b4966921d6a7f7447417b7acb15eb', 'directory': 'b134f9b7dc434f593c0bab696345548b37de0558', 'parents': ['689664ae944b4692724f13b709a4e4de28b54e57', 'c888305e1efbaa252d01b4e5e6b778f865a97514'], 'author': { 'name': b'Jiang Xin', 'email': b'worldhello.net@gmail.com', 'fullname': b'Jiang Xin ', }, 'date': { 'timestamp': 1428538899, 'offset': 480, }, 'committer': { 'name': b'Jiang Xin', 'email': b'worldhello.net@gmail.com', }, 'committer_date': { 'timestamp': 1428538899, 'offset': 480, }, 'message': b'', } self.revision_only_fullname = { 'id': '010d34f384fa99d047cdd5e2f41e56e5c2feee45', 'directory': '85a74718d377195e1efd0843ba4f3260bad4fe07', 'parents': ['01e2d0627a9a6edb24c37db45db5ecb31e9de808'], 'author': { 'fullname': b'Linus Torvalds ', }, 'date': datetime.datetime(2015, 7, 12, 15, 10, 30, tzinfo=linus_tz), 'committer': { 'fullname': b'Linus Torvalds ', }, 'committer_date': datetime.datetime(2015, 7, 12, 15, 10, 30, tzinfo=linus_tz), 'message': b'Linux 4.2-rc2\n', 'metadata': { 'extra_headers': [ ['svn-repo-uuid', '046f1af7-66c2-d61b-5410-ce57b7db7bff'], ['svn-revision', 10], ] } } - @istest - def revision_identifier(self): + def test_revision_identifier(self): self.assertEqual( identifiers.revision_identifier(self.revision), identifiers.identifier_to_str(self.revision['id']), ) - @istest - def revision_identifier_none_metadata(self): + def test_revision_identifier_none_metadata(self): self.assertEqual( identifiers.revision_identifier(self.revision_none_metadata), identifiers.identifier_to_str(self.revision_none_metadata['id']), ) - @istest - def revision_identifier_synthetic(self): + def test_revision_identifier_synthetic(self): self.assertEqual( identifiers.revision_identifier(self.synthetic_revision), identifiers.identifier_to_str(self.synthetic_revision['id']), ) - @istest - def revision_identifier_with_extra_headers(self): + def test_revision_identifier_with_extra_headers(self): self.assertEqual( identifiers.revision_identifier( self.revision_with_extra_headers), identifiers.identifier_to_str( self.revision_with_extra_headers['id']), ) - @istest - def revision_identifier_with_gpgsig(self): + def test_revision_identifier_with_gpgsig(self): self.assertEqual( identifiers.revision_identifier( self.revision_with_gpgsig), identifiers.identifier_to_str( self.revision_with_gpgsig['id']), ) - @istest - def revision_identifier_no_message(self): + def test_revision_identifier_no_message(self): self.assertEqual( identifiers.revision_identifier( self.revision_no_message), identifiers.identifier_to_str( self.revision_no_message['id']), ) - @istest - def revision_identifier_empty_message(self): + def test_revision_identifier_empty_message(self): self.assertEqual( identifiers.revision_identifier( self.revision_empty_message), identifiers.identifier_to_str( self.revision_empty_message['id']), ) - @istest - def revision_identifier_only_fullname(self): + def test_revision_identifier_only_fullname(self): self.assertEqual( identifiers.revision_identifier( self.revision_only_fullname), identifiers.identifier_to_str( self.revision_only_fullname['id']), ) class ReleaseIdentifier(unittest.TestCase): def setUp(self): linus_tz = datetime.timezone(datetime.timedelta(minutes=-420)) self.release = { 'id': '2b10839e32c4c476e9d94492756bb1a3e1ec4aa8', 'target': b't\x1b"R\xa5\xe1Ml`\xa9\x13\xc7z`\x99\xab\xe7:\x85J', 'target_type': 'revision', 'name': b'v2.6.14', 'author': { 'name': b'Linus Torvalds', 'email': b'torvalds@g5.osdl.org', }, 'date': datetime.datetime(2005, 10, 27, 17, 2, 33, tzinfo=linus_tz), 'message': b'''\ Linux 2.6.14 release -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.1 (GNU/Linux) iD8DBQBDYWq6F3YsRnbiHLsRAmaeAJ9RCez0y8rOBbhSv344h86l/VVcugCeIhO1 wdLOnvj91G4wxYqrvThthbE= =7VeT -----END PGP SIGNATURE----- ''', 'synthetic': False, } self.release_no_author = { 'id': b'&y\x1a\x8b\xcf\x0em3\xf4:\xefv\x82\xbd\xb5U#mV\xde', 'target': '9ee1c939d1cb936b1f98e8d81aeffab57bae46ab', 'target_type': 'revision', 'name': b'v2.6.12', 'message': b'''\ This is the final 2.6.12 release -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.2.4 (GNU/Linux) iD8DBQBCsykyF3YsRnbiHLsRAvPNAJ482tCZwuxp/bJRz7Q98MHlN83TpACdHr37 o6X/3T+vm8K3bf3driRr34c= =sBHn -----END PGP SIGNATURE----- ''', 'synthetic': False, } self.release_no_message = { 'id': 'b6f4f446715f7d9543ef54e41b62982f0db40045', 'target': '9ee1c939d1cb936b1f98e8d81aeffab57bae46ab', 'target_type': 'revision', 'name': b'v2.6.12', 'author': { 'name': b'Linus Torvalds', 'email': b'torvalds@g5.osdl.org', }, 'date': datetime.datetime(2005, 10, 27, 17, 2, 33, tzinfo=linus_tz), 'message': None, } self.release_empty_message = { 'id': '71a0aea72444d396575dc25ac37fec87ee3c6492', 'target': '9ee1c939d1cb936b1f98e8d81aeffab57bae46ab', 'target_type': 'revision', 'name': b'v2.6.12', 'author': { 'name': b'Linus Torvalds', 'email': b'torvalds@g5.osdl.org', }, 'date': datetime.datetime(2005, 10, 27, 17, 2, 33, tzinfo=linus_tz), 'message': b'', } self.release_negative_utc = { 'id': '97c8d2573a001f88e72d75f596cf86b12b82fd01', 'name': b'20081029', 'target': '54e9abca4c77421e2921f5f156c9fe4a9f7441c7', 'target_type': 'revision', 'date': { 'timestamp': {'seconds': 1225281976}, 'offset': 0, 'negative_utc': True, }, 'author': { 'name': b'Otavio Salvador', 'email': b'otavio@debian.org', 'id': 17640, }, 'synthetic': False, 'message': b'tagging version 20081029\n\nr56558\n', } self.release_newline_in_author = { 'author': { 'email': b'esycat@gmail.com', 'fullname': b'Eugene Janusov\n', 'name': b'Eugene Janusov\n', }, 'date': { 'negative_utc': None, 'offset': 600, 'timestamp': { 'microseconds': 0, 'seconds': 1377480558, }, }, 'id': b'\\\x98\xf5Y\xd04\x16-\xe2->\xbe\xb9T3\xe6\xf8\x88R1', 'message': b'Release of v0.3.2.', 'name': b'0.3.2', 'synthetic': False, 'target': (b'\xc0j\xa3\xd9;x\xa2\x86\\I5\x17' b'\x000\xf8\xc2\xd79o\xd3'), 'target_type': 'revision', } - @istest - def release_identifier(self): + def test_release_identifier(self): self.assertEqual( identifiers.release_identifier(self.release), identifiers.identifier_to_str(self.release['id']) ) - @istest - def release_identifier_no_author(self): + def test_release_identifier_no_author(self): self.assertEqual( identifiers.release_identifier(self.release_no_author), identifiers.identifier_to_str(self.release_no_author['id']) ) - @istest - def release_identifier_no_message(self): + def test_release_identifier_no_message(self): self.assertEqual( identifiers.release_identifier(self.release_no_message), identifiers.identifier_to_str(self.release_no_message['id']) ) - @istest - def release_identifier_empty_message(self): + def test_release_identifier_empty_message(self): self.assertEqual( identifiers.release_identifier(self.release_empty_message), identifiers.identifier_to_str(self.release_empty_message['id']) ) - @istest - def release_identifier_negative_utc(self): + def test_release_identifier_negative_utc(self): self.assertEqual( identifiers.release_identifier(self.release_negative_utc), identifiers.identifier_to_str(self.release_negative_utc['id']) ) - @istest - def release_identifier_newline_in_author(self): + def test_release_identifier_newline_in_author(self): self.assertEqual( identifiers.release_identifier(self.release_newline_in_author), identifiers.identifier_to_str(self.release_newline_in_author['id']) ) class SnapshotIdentifier(unittest.TestCase): def setUp(self): super().setUp() self.empty = { 'id': '1a8893e6a86f444e8be8e7bda6cb34fb1735a00e', 'branches': {}, } self.dangling_branch = { 'id': 'c84502e821eb21ed84e9fd3ec40973abc8b32353', 'branches': { b'HEAD': None, }, } self.unresolved = { 'id': '84b4548ea486e4b0a7933fa541ff1503a0afe1e0', 'branches': { b'foo': { 'target': b'bar', 'target_type': 'alias', }, }, } self.all_types = { 'id': '6e65b86363953b780d92b0a928f3e8fcdd10db36', 'branches': { b'directory': { 'target': '1bd0e65f7d2ff14ae994de17a1e7fe65111dcad8', 'target_type': 'directory', }, b'content': { 'target': 'fe95a46679d128ff167b7c55df5d02356c5a1ae1', 'target_type': 'content', }, b'alias': { 'target': b'revision', 'target_type': 'alias', }, b'revision': { 'target': 'aafb16d69fd30ff58afdd69036a26047f3aebdc6', 'target_type': 'revision', }, b'release': { 'target': '7045404f3d1c54e6473c71bbb716529fbad4be24', 'target_type': 'release', }, b'snapshot': { 'target': '1a8893e6a86f444e8be8e7bda6cb34fb1735a00e', 'target_type': 'snapshot', }, b'dangling': None, } } def test_empty_snapshot(self): self.assertEqual( identifiers.snapshot_identifier(self.empty), identifiers.identifier_to_str(self.empty['id']), ) def test_dangling_branch(self): self.assertEqual( identifiers.snapshot_identifier(self.dangling_branch), identifiers.identifier_to_str(self.dangling_branch['id']), ) def test_unresolved(self): with self.assertRaisesRegex(ValueError, "b'foo' -> b'bar'"): identifiers.snapshot_identifier(self.unresolved) def test_unresolved_force(self): self.assertEqual( identifiers.snapshot_identifier( self.unresolved, ignore_unresolved=True, ), identifiers.identifier_to_str(self.unresolved['id']), ) def test_all_types(self): self.assertEqual( identifiers.snapshot_identifier(self.all_types), identifiers.identifier_to_str(self.all_types['id']), ) def test_persistent_identifier(self): _snapshot_id = hashutil.hash_to_bytes( 'c7c108084bc0bf3d81436bf980b46e98bd338453') _release_id = '22ece559cc7cc2364edc5e5593d63ae8bd229f9f' _revision_id = '309cf2674ee7a0749978cf8265ab91a60aea0f7d' _directory_id = 'd198bc9d7a6bcf6db04f476d29314f157507d505' _content_id = '94a9ed024d3859793618152ea559a168bbcbb5e2' _snapshot = {'id': _snapshot_id} _release = {'id': _release_id} _revision = {'id': _revision_id} _directory = {'id': _directory_id} _content = {'sha1_git': _content_id} for full_type, _hash, expected_persistent_id, version, _meta in [ (SNAPSHOT, _snapshot_id, 'swh:1:snp:c7c108084bc0bf3d81436bf980b46e98bd338453', None, {}), (RELEASE, _release_id, 'swh:2:rel:22ece559cc7cc2364edc5e5593d63ae8bd229f9f', 2, {}), (REVISION, _revision_id, 'swh:1:rev:309cf2674ee7a0749978cf8265ab91a60aea0f7d', None, {}), (DIRECTORY, _directory_id, 'swh:1:dir:d198bc9d7a6bcf6db04f476d29314f157507d505', None, {}), (CONTENT, _content_id, 'swh:1:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2', 1, {}), (SNAPSHOT, _snapshot, 'swh:1:snp:c7c108084bc0bf3d81436bf980b46e98bd338453', None, {}), (RELEASE, _release, 'swh:2:rel:22ece559cc7cc2364edc5e5593d63ae8bd229f9f', 2, {}), (REVISION, _revision, 'swh:1:rev:309cf2674ee7a0749978cf8265ab91a60aea0f7d', None, {}), (DIRECTORY, _directory, 'swh:1:dir:d198bc9d7a6bcf6db04f476d29314f157507d505', None, {}), (CONTENT, _content, 'swh:1:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2', 1, {}), (CONTENT, _content, 'swh:1:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2;origin=1', 1, {'origin': '1'}), ]: if version: actual_value = identifiers.persistent_identifier( full_type, _hash, version, metadata=_meta) else: actual_value = identifiers.persistent_identifier( full_type, _hash, metadata=_meta) - self.assertEquals(actual_value, expected_persistent_id) + self.assertEqual(actual_value, expected_persistent_id) def test_persistent_identifier_wrong_input(self): _snapshot_id = 'notahash4bc0bf3d81436bf980b46e98bd338453' _snapshot = {'id': _snapshot_id} for _type, _hash, _error in [ (SNAPSHOT, _snapshot_id, 'Unexpected characters'), (SNAPSHOT, _snapshot, 'Unexpected characters'), ('foo', '', 'Wrong input: Supported types are'), ]: with self.assertRaisesRegex(ValidationError, _error): identifiers.persistent_identifier(_type, _hash) def test_parse_persistent_identifier(self): for pid, _type, _version, _hash in [ ('swh:1:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2', CONTENT, 1, '94a9ed024d3859793618152ea559a168bbcbb5e2'), ('swh:1:dir:d198bc9d7a6bcf6db04f476d29314f157507d505', DIRECTORY, 1, 'd198bc9d7a6bcf6db04f476d29314f157507d505'), ('swh:1:rev:309cf2674ee7a0749978cf8265ab91a60aea0f7d', REVISION, 1, '309cf2674ee7a0749978cf8265ab91a60aea0f7d'), ('swh:1:rel:22ece559cc7cc2364edc5e5593d63ae8bd229f9f', RELEASE, 1, '22ece559cc7cc2364edc5e5593d63ae8bd229f9f'), ('swh:1:snp:c7c108084bc0bf3d81436bf980b46e98bd338453', SNAPSHOT, 1, 'c7c108084bc0bf3d81436bf980b46e98bd338453'), ]: expected_result = PersistentId( namespace='swh', scheme_version=_version, object_type=_type, object_id=_hash, metadata={} ) actual_result = identifiers.parse_persistent_identifier(pid) - self.assertEquals(actual_result, expected_result) + self.assertEqual(actual_result, expected_result) for pid, _type, _version, _hash, _metadata in [ ('swh:1:cnt:9c95815d9e9d91b8dae8e05d8bbc696fe19f796b;lines=1-18;origin=https://github.com/python/cpython', # noqa CONTENT, 1, '9c95815d9e9d91b8dae8e05d8bbc696fe19f796b', { 'lines': '1-18', 'origin': 'https://github.com/python/cpython' }), ('swh:1:dir:0b6959356d30f1a4e9b7f6bca59b9a336464c03d;origin=deb://Debian/packages/linuxdoc-tools', # noqa DIRECTORY, 1, '0b6959356d30f1a4e9b7f6bca59b9a336464c03d', { 'origin': 'deb://Debian/packages/linuxdoc-tools' }) ]: expected_result = PersistentId( namespace='swh', scheme_version=_version, object_type=_type, object_id=_hash, metadata=_metadata ) actual_result = identifiers.parse_persistent_identifier(pid) - self.assertEquals(actual_result, expected_result) + self.assertEqual(actual_result, expected_result) def test_parse_persistent_identifier_parsing_error(self): for pid, _error in [ ('swh:1:cnt', 'Wrong format: There should be 4 mandatory values'), ('swh:1:', 'Wrong format: There should be 4 mandatory values'), ('swh:', 'Wrong format: There should be 4 mandatory values'), ('swh:1:cnt:', 'Wrong format: Identifier should be present'), ('foo:1:cnt:abc8bc9d7a6bcf6db04f476d29314f157507d505', 'Wrong format: Supported namespace is \'swh\''), ('swh:2:dir:def8bc9d7a6bcf6db04f476d29314f157507d505', 'Wrong format: Supported version is 1'), ('swh:1:foo:fed8bc9d7a6bcf6db04f476d29314f157507d505', 'Wrong format: Supported types are %s' % ( ', '.join(PERSISTENT_IDENTIFIER_TYPES))), ('swh:1:dir:0b6959356d30f1a4e9b7f6bca59b9a336464c03d;invalid;' 'malformed', 'Contextual data is badly formatted, form key=val expected'), ('swh:1:snp:gh6959356d30f1a4e9b7f6bca59b9a336464c03d', 'Wrong format: Identifier should be a valid hash'), ('swh:1:snp:foo', 'Wrong format: Identifier should be a valid hash') ]: with self.assertRaisesRegex( ValidationError, _error): identifiers.parse_persistent_identifier(pid) diff --git a/swh/model/tests/test_merkle.py b/swh/model/tests/test_merkle.py index 9f43892..8b1180a 100644 --- a/swh/model/tests/test_merkle.py +++ b/swh/model/tests/test_merkle.py @@ -1,229 +1,229 @@ # Copyright (C) 2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import unittest from swh.model import merkle -class TestedMerkleNode(merkle.MerkleNode): +class MerkleTestNode(merkle.MerkleNode): type = 'tested_merkle_node_type' def __init__(self, data): super().__init__(data) self.compute_hash_called = 0 def compute_hash(self): self.compute_hash_called += 1 child_data = [ child + b'=' + self[child].hash for child in sorted(self) ] return ( b'hash(' + b', '.join([self.data['value']] + child_data) + b')' ) -class TestedMerkleLeaf(merkle.MerkleLeaf): +class MerkleTestLeaf(merkle.MerkleLeaf): type = 'tested_merkle_leaf_type' def __init__(self, data): super().__init__(data) self.compute_hash_called = 0 def compute_hash(self): self.compute_hash_called += 1 return b'hash(' + self.data['value'] + b')' class TestMerkleLeaf(unittest.TestCase): def setUp(self): self.data = {'value': b'value'} - self.instance = TestedMerkleLeaf(self.data) + self.instance = MerkleTestLeaf(self.data) def test_hash(self): self.assertEqual(self.instance.compute_hash_called, 0) instance_hash = self.instance.hash self.assertEqual(self.instance.compute_hash_called, 1) instance_hash2 = self.instance.hash self.assertEqual(self.instance.compute_hash_called, 1) self.assertEqual(instance_hash, instance_hash2) def test_data(self): self.assertEqual(self.instance.get_data(), self.data) def test_collect(self): collected = self.instance.collect() self.assertEqual( collected, { self.instance.type: { self.instance.hash: self.instance.get_data(), }, }, ) collected2 = self.instance.collect() self.assertEqual(collected2, {}) self.instance.reset_collect() collected3 = self.instance.collect() self.assertEqual(collected, collected3) def test_leaf(self): with self.assertRaisesRegex(ValueError, 'is a leaf'): self.instance[b'key1'] = 'Test' with self.assertRaisesRegex(ValueError, 'is a leaf'): del self.instance[b'key1'] with self.assertRaisesRegex(ValueError, 'is a leaf'): self.instance[b'key1'] with self.assertRaisesRegex(ValueError, 'is a leaf'): self.instance.update(self.data) class TestMerkleNode(unittest.TestCase): maxDiff = None def setUp(self): - self.root = TestedMerkleNode({'value': b'root'}) + self.root = MerkleTestNode({'value': b'root'}) self.nodes = {b'root': self.root} for i in (b'a', b'b', b'c'): value = b'root/' + i - node = TestedMerkleNode({ + node = MerkleTestNode({ 'value': value, }) self.root[i] = node self.nodes[value] = node for j in (b'a', b'b', b'c'): value2 = value + b'/' + j - node2 = TestedMerkleNode({ + node2 = MerkleTestNode({ 'value': value2, }) node[j] = node2 self.nodes[value2] = node2 for k in (b'a', b'b', b'c'): value3 = value2 + b'/' + j - node3 = TestedMerkleNode({ + node3 = MerkleTestNode({ 'value': value3, }) node2[j] = node3 self.nodes[value3] = node3 def test_hash(self): for node in self.nodes.values(): self.assertEqual(node.compute_hash_called, 0) # Root hash will compute hash for all the nodes hash = self.root.hash for node in self.nodes.values(): self.assertEqual(node.compute_hash_called, 1) self.assertIn(node.data['value'], hash) # Should use the cached value hash2 = self.root.hash self.assertEqual(hash, hash2) for node in self.nodes.values(): self.assertEqual(node.compute_hash_called, 1) # Should still use the cached value hash3 = self.root.update_hash(force=False) self.assertEqual(hash, hash3) for node in self.nodes.values(): self.assertEqual(node.compute_hash_called, 1) # Force update of the cached value for a deeply nested node self.root[b'a'][b'b'].update_hash(force=True) for key, node in self.nodes.items(): # update_hash rehashes all children if key.startswith(b'root/a/b'): self.assertEqual(node.compute_hash_called, 2) else: self.assertEqual(node.compute_hash_called, 1) hash4 = self.root.hash self.assertEqual(hash, hash4) for key, node in self.nodes.items(): # update_hash also invalidates all parents if key in (b'root', b'root/a') or key.startswith(b'root/a/b'): self.assertEqual(node.compute_hash_called, 2) else: self.assertEqual(node.compute_hash_called, 1) def test_collect(self): collected = self.root.collect() self.assertEqual(len(collected[self.root.type]), len(self.nodes)) for node in self.nodes.values(): self.assertTrue(node.collected) collected2 = self.root.collect() self.assertEqual(collected2, {}) def test_get(self): for key in (b'a', b'b', b'c'): self.assertEqual(self.root[key], self.nodes[b'root/' + key]) with self.assertRaisesRegex(KeyError, "b'nonexistent'"): self.root[b'nonexistent'] def test_del(self): hash_root = self.root.hash hash_a = self.nodes[b'root/a'].hash del self.root[b'a'][b'c'] hash_root2 = self.root.hash hash_a2 = self.nodes[b'root/a'].hash self.assertNotEqual(hash_root, hash_root2) self.assertNotEqual(hash_a, hash_a2) self.assertEqual(self.nodes[b'root/a/c'].parents, []) with self.assertRaisesRegex(KeyError, "b'nonexistent'"): del self.root[b'nonexistent'] def test_update(self): hash_root = self.root.hash hash_b = self.root[b'b'].hash new_children = { - b'c': TestedMerkleNode({'value': b'root/b/new_c'}), - b'd': TestedMerkleNode({'value': b'root/b/d'}), + b'c': MerkleTestNode({'value': b'root/b/new_c'}), + b'd': MerkleTestNode({'value': b'root/b/d'}), } # collect all nodes self.root.collect() self.root[b'b'].update(new_children) # Ensure everyone got reparented self.assertEqual(new_children[b'c'].parents, [self.root[b'b']]) self.assertEqual(new_children[b'd'].parents, [self.root[b'b']]) self.assertEqual(self.nodes[b'root/b/c'].parents, []) hash_root2 = self.root.hash self.assertNotEqual(hash_root, hash_root2) self.assertIn(b'root/b/new_c', hash_root2) self.assertIn(b'root/b/d', hash_root2) hash_b2 = self.root[b'b'].hash self.assertNotEqual(hash_b, hash_b2) for key, node in self.nodes.items(): if key in (b'root', b'root/b'): self.assertEqual(node.compute_hash_called, 2) else: self.assertEqual(node.compute_hash_called, 1) # Ensure we collected root, root/b, and both new children collected_after_update = self.root.collect() self.assertCountEqual( - collected_after_update[TestedMerkleNode.type], + collected_after_update[MerkleTestNode.type], [self.nodes[b'root'].hash, self.nodes[b'root/b'].hash, new_children[b'c'].hash, new_children[b'd'].hash], ) # test that noop updates doesn't invalidate anything self.root[b'a'][b'b'].update({}) self.assertEqual(self.root.collect(), {}) diff --git a/swh/model/tests/test_toposort.py b/swh/model/tests/test_toposort.py index 66a8ee1..174368f 100644 --- a/swh/model/tests/test_toposort.py +++ b/swh/model/tests/test_toposort.py @@ -1,99 +1,100 @@ # Copyright (C) 2017-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import unittest + from swh.model.toposort import toposort def is_toposorted_slow(revision_log): """Check (inefficiently) that the given revision log is in any topological order. Complexity: O(n^2). (Note: It's totally possible to write a O(n) is_toposorted function, but it requires computing the transitive closure of the input DAG, which requires computing a topological ordering of that DAG, which kind of defeats the purpose of writing unit tests for toposort().) Args: revision_log: Revision log as returned by swh.storage.Storage.revision_log(). Returns: True if the revision log is topologically sorted. """ rev_by_id = {r['id']: r for r in revision_log} def all_parents(revision): for parent in revision['parents']: yield parent yield from all_parents(rev_by_id[parent]) visited = set() for rev in revision_log: visited.add(rev['id']) if not all(parent in visited for parent in all_parents(rev)): return False return True class TestToposort(unittest.TestCase): def generate_log(self, graph): for node_id, parents in graph.items(): yield {'id': node_id, 'parents': tuple(parents)} def unordered_log(self, log): return {(d['id'], tuple(d['parents'])) for d in log} def check(self, graph): log = list(self.generate_log(graph)) topolog = list(toposort(log)) self.assertEqual(len(topolog), len(graph)) self.assertEqual(self.unordered_log(topolog), self.unordered_log(log)) self.assertTrue(is_toposorted_slow(toposort(log))) def test_linked_list(self): self.check({3: [2], 2: [1], 1: []}) def test_fork(self): self.check({7: [6], 6: [4], 5: [3], 4: [2], 3: [2], 2: [1], 1: []}) def test_fork_merge(self): self.check({8: [7, 5], 7: [6], 6: [4], 5: [3], 4: [2], 3: [2], 2: [1], 1: []}) def test_two_origins(self): self.check({9: [8], 8: [7, 5], 7: [6], 6: [4], 5: [3], 4: [], 3: []}) def test_three_way(self): self.check({9: [8, 4, 2], 8: [7, 5], 7: [6], 6: [4], 5: [3], 4: [2], 3: [2], 2: [1], 1: []}) diff --git a/swh/model/tests/test_validators.py b/swh/model/tests/test_validators.py index 60a1de4..8c8512e 100644 --- a/swh/model/tests/test_validators.py +++ b/swh/model/tests/test_validators.py @@ -1,75 +1,71 @@ # Copyright (C) 2015 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime import unittest -from nose.tools import istest - -from swh.model import validators, hashutil, exceptions +from swh.model import exceptions, hashutil, validators class TestValidators(unittest.TestCase): def setUp(self): self.valid_visible_content = { 'status': 'visible', 'length': 5, 'data': b'1984\n', 'ctime': datetime.datetime(2015, 11, 22, 16, 33, 56, tzinfo=datetime.timezone.utc), } self.valid_visible_content.update( hashutil.hash_data(self.valid_visible_content['data'])) self.valid_absent_content = { 'status': 'absent', 'length': 5, 'ctime': datetime.datetime(2015, 11, 22, 16, 33, 56, tzinfo=datetime.timezone.utc), 'reason': 'Content too large', 'sha1_git': self.valid_visible_content['sha1_git'], 'origin': 42, } self.invalid_content_hash_mismatch = self.valid_visible_content.copy() self.invalid_content_hash_mismatch.update( hashutil.hash_data(b"this is not the data you're looking for")) - @istest - def validate_content(self): + def test_validate_content(self): self.assertTrue( validators.validate_content(self.valid_visible_content)) self.assertTrue( validators.validate_content(self.valid_absent_content)) - @istest - def validate_content_hash_mismatch(self): + def test_validate_content_hash_mismatch(self): with self.assertRaises(exceptions.ValidationError) as cm: validators.validate_content(self.invalid_content_hash_mismatch) # All the hashes are wrong. The exception should be of the form: # ValidationError({ # NON_FIELD_ERRORS: [ # ValidationError('content-hash-mismatch', 'sha1'), # ValidationError('content-hash-mismatch', 'sha1_git'), # ValidationError('content-hash-mismatch', 'sha256'), # ] # }) exc = cm.exception self.assertIsInstance(str(exc), str) - self.assertEquals(set(exc.error_dict.keys()), - {exceptions.NON_FIELD_ERRORS}) + self.assertEqual(set(exc.error_dict.keys()), + {exceptions.NON_FIELD_ERRORS}) hash_mismatches = exc.error_dict[exceptions.NON_FIELD_ERRORS] self.assertIsInstance(hash_mismatches, list) self.assertEqual(len(hash_mismatches), 4) self.assertTrue(all(mismatch.code == 'content-hash-mismatch' for mismatch in hash_mismatches)) self.assertEqual(set(mismatch.params['hash'] for mismatch in hash_mismatches), {'sha1', 'sha1_git', 'sha256', 'blake2s256'}) diff --git a/swh/model/validators.py b/swh/model/validators.py index ea64b40..6d2c370 100644 --- a/swh/model/validators.py +++ b/swh/model/validators.py @@ -1,76 +1,77 @@ -# Copyright (C) 2015 The Software Heritage developers +# Copyright (C) 2015-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from .exceptions import ValidationError, NON_FIELD_ERRORS -from . import fields, hashutil +from . import fields +from .hashutil import MultiHash, hash_to_bytes def validate_content(content): """Validate that a content has the correct schema. Args: a content (dictionary) to validate.""" def validate_content_status(status): return fields.validate_enum(status, {'absent', 'visible', 'hidden'}) def validate_keys(content): hashes = {'sha1', 'sha1_git', 'sha256'} errors = [] out = True if content['status'] == 'absent': try: out = out and fields.validate_all_keys(content, {'reason', 'origin'}) except ValidationError as e: errors.append(e) try: out = out and fields.validate_any_key(content, hashes) except ValidationError as e: errors.append(e) else: try: out = out and fields.validate_all_keys(content, hashes) except ValidationError as e: errors.append(e) if errors: raise ValidationError(errors) return out def validate_hashes(content): errors = [] if 'data' in content: - hashes = hashutil.hash_data(content['data']) + hashes = MultiHash.from_data(content['data']).digest() for hash_type, computed_hash in hashes.items(): if hash_type not in content: continue - content_hash = hashutil.hash_to_bytes(content[hash_type]) + content_hash = hash_to_bytes(content[hash_type]) if content_hash != computed_hash: errors.append(ValidationError( 'hash mismatch in content for hash %(hash)s', params={'hash': hash_type}, code='content-hash-mismatch', )) if errors: raise ValidationError(errors) return True content_schema = { 'sha1': (False, fields.validate_sha1), 'sha1_git': (False, fields.validate_sha1_git), 'sha256': (False, fields.validate_sha256), 'status': (True, validate_content_status), 'length': (True, fields.validate_int), 'ctime': (True, fields.validate_datetime), 'reason': (False, fields.validate_str), 'origin': (False, fields.validate_int), 'data': (False, fields.validate_bytes), NON_FIELD_ERRORS: [validate_keys, validate_hashes], } return fields.validate_against_schema('content', content_schema, content) diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000..0fb07c6 --- /dev/null +++ b/tox.ini @@ -0,0 +1,16 @@ +[tox] +envlist=flake8,py3 + +[testenv:py3] +deps = + .[testing] + pytest-cov +commands = + pytest --cov=swh --cov-branch {posargs} + +[testenv:flake8] +skip_install = true +deps = + flake8 +commands = + {envpython} -m flake8 diff --git a/version.txt b/version.txt index 04ecf50..b5af979 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -v0.0.27-0-geb338cd \ No newline at end of file +v0.0.28-0-g4e6bce9 \ No newline at end of file