diff --git a/.gitignore b/.gitignore index 0bafd00..6870872 100644 --- a/.gitignore +++ b/.gitignore @@ -1,14 +1,15 @@ *~ build /.coverage /.coverage.* dist *.egg-info/ .eggs/ .hypothesis *.pyc __pycache__ .pytest_cache *.sw? .tox version.txt +.mypy_cache/ diff --git a/mypy.ini b/mypy.ini new file mode 100644 index 0000000..2c020e0 --- /dev/null +++ b/mypy.ini @@ -0,0 +1,18 @@ +[mypy] +namespace_packages = True +warn_unused_ignores = True + + +# 3rd party libraries without stubs (yet) + +[mypy-pkg_resources.*] +ignore_missing_imports = True + +[mypy-pyblake2.*] +ignore_missing_imports = True + +[mypy-pytest.*] +ignore_missing_imports = True + +# [mypy-add_your_lib_here.*] +# ignore_missing_imports = True diff --git a/swh/__init__.py b/swh/__init__.py index 69e3be5..de9df06 100644 --- a/swh/__init__.py +++ b/swh/__init__.py @@ -1 +1,4 @@ -__path__ = __import__('pkgutil').extend_path(__path__, __name__) +from typing import Iterable + +__path__ = __import__('pkgutil').extend_path(__path__, + __name__) # type: Iterable[str] diff --git a/swh/model/from_disk.py b/swh/model/from_disk.py index bfd7c7c..64a6ef7 100644 --- a/swh/model/from_disk.py +++ b/swh/model/from_disk.py @@ -1,349 +1,351 @@ # Copyright (C) 2017-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import enum import os import stat +from typing import List + from .hashutil import MultiHash, HASH_BLOCK_SIZE from .merkle import MerkleLeaf, MerkleNode from .identifiers import ( directory_identifier, identifier_to_bytes as id_to_bytes, identifier_to_str as id_to_str, ) class DentryPerms(enum.IntEnum): """Admissible permissions for directory entries.""" content = 0o100644 """Content""" executable_content = 0o100755 """Executable content (e.g. executable script)""" symlink = 0o120000 """Symbolic link""" directory = 0o040000 """Directory""" revision = 0o160000 """Revision (e.g. submodule)""" def mode_to_perms(mode): """Convert a file mode to a permission compatible with Software Heritage directory entries Args: mode (int): a file mode as returned by :func:`os.stat` in :attr:`os.stat_result.st_mode` Returns: DentryPerms: one of the following values: :const:`DentryPerms.content`: plain file :const:`DentryPerms.executable_content`: executable file :const:`DentryPerms.symlink`: symbolic link :const:`DentryPerms.directory`: directory """ if stat.S_ISLNK(mode): return DentryPerms.symlink if stat.S_ISDIR(mode): return DentryPerms.directory else: # file is executable in any way if mode & (0o111): return DentryPerms.executable_content else: return DentryPerms.content class Content(MerkleLeaf): """Representation of a Software Heritage content as a node in a Merkle tree. The current Merkle hash for the Content nodes is the `sha1_git`, which makes it consistent with what :class:`Directory` uses for its own hash computation. """ - __slots__ = [] + __slots__ = [] # type: List[str] type = 'content' @classmethod def from_bytes(cls, *, mode, data): """Convert data (raw :class:`bytes`) to a Software Heritage content entry Args: mode (int): a file mode (passed to :func:`mode_to_perms`) data (bytes): raw contents of the file """ ret = MultiHash.from_data(data).digest() ret['length'] = len(data) ret['perms'] = mode_to_perms(mode) ret['data'] = data return cls(ret) @classmethod def from_symlink(cls, *, path, mode): """Convert a symbolic link to a Software Heritage content entry""" return cls.from_bytes(mode=mode, data=os.readlink(path)) @classmethod def from_file(cls, *, path, data=False, save_path=False): """Compute the Software Heritage content entry corresponding to an on-disk file. The returned dictionary contains keys useful for both: - loading the content in the archive (hashes, `length`) - using the content as a directory entry in a directory Args: path (bytes): path to the file for which we're computing the content entry data (bool): add the file data to the entry save_path (bool): add the file path to the entry """ file_stat = os.lstat(path) mode = file_stat.st_mode if stat.S_ISLNK(mode): # Symbolic link: return a file whose contents are the link target return cls.from_symlink(path=path, mode=mode) elif not stat.S_ISREG(mode): # not a regular file: return the empty file instead return cls.from_bytes(mode=mode, data=b'') length = file_stat.st_size if not data: ret = MultiHash.from_path(path).digest() else: h = MultiHash(length=length) chunks = [] with open(path, 'rb') as fobj: while True: chunk = fobj.read(HASH_BLOCK_SIZE) if not chunk: break h.update(chunk) chunks.append(chunk) ret = h.digest() ret['data'] = b''.join(chunks) if save_path: ret['path'] = path ret['perms'] = mode_to_perms(mode) ret['length'] = length obj = cls(ret) return obj def __repr__(self): return 'Content(id=%s)' % id_to_str(self.hash) def compute_hash(self): return self.data['sha1_git'] def accept_all_directories(dirname, entries): """Default filter for :func:`Directory.from_disk` accepting all directories Args: dirname (bytes): directory name entries (list): directory entries """ return True def ignore_empty_directories(dirname, entries): """Filter for :func:`directory_to_objects` ignoring empty directories Args: dirname (bytes): directory name entries (list): directory entries Returns: True if the directory is not empty, false if the directory is empty """ return bool(entries) def ignore_named_directories(names, *, case_sensitive=True): """Filter for :func:`directory_to_objects` to ignore directories named one of names. Args: names (list of bytes): names to ignore case_sensitive (bool): whether to do the filtering in a case sensitive way Returns: a directory filter for :func:`directory_to_objects` """ if not case_sensitive: names = [name.lower() for name in names] def named_filter(dirname, entries, names=names, case_sensitive=case_sensitive): if case_sensitive: return dirname not in names else: return dirname.lower() not in names return named_filter class Directory(MerkleNode): """Representation of a Software Heritage directory as a node in a Merkle Tree. This class can be used to generate, from an on-disk directory, all the objects that need to be sent to the Software Heritage archive. The :func:`from_disk` constructor allows you to generate the data structure from a directory on disk. The resulting :class:`Directory` can then be manipulated as a dictionary, using the path as key. The :func:`collect` method is used to retrieve all the objects that need to be added to the Software Heritage archive since the last collection, by class (contents and directories). When using the dict-like methods to update the contents of the directory, the affected levels of hierarchy are reset and can be collected again using the same method. This enables the efficient collection of updated nodes, for instance when the client is applying diffs. """ __slots__ = ['__entries'] type = 'directory' @classmethod def from_disk(cls, *, path, data=False, save_path=False, dir_filter=accept_all_directories): """Compute the Software Heritage objects for a given directory tree Args: path (bytes): the directory to traverse data (bool): whether to add the data to the content objects save_path (bool): whether to add the path to the content objects dir_filter (function): a filter to ignore some directories by name or contents. Takes two arguments: dirname and entries, and returns True if the directory should be added, False if the directory should be ignored. """ top_path = path dirs = {} for root, dentries, fentries in os.walk(top_path, topdown=False): entries = {} # Join fentries and dentries in the same processing, as symbolic # links to directories appear in dentries... for name in fentries + dentries: path = os.path.join(root, name) if not os.path.isdir(path) or os.path.islink(path): content = Content.from_file(path=path, data=data, save_path=save_path) entries[name] = content else: if dir_filter(name, dirs[path].entries): entries[name] = dirs[path] dirs[root] = cls({'name': os.path.basename(root)}) dirs[root].update(entries) return dirs[top_path] def __init__(self, data=None): super().__init__(data=data) self.__entries = None def invalidate_hash(self): self.__entries = None super().invalidate_hash() @staticmethod def child_to_directory_entry(name, child): if isinstance(child, Directory): return { 'type': 'dir', 'perms': DentryPerms.directory, 'target': child.hash, 'name': name, } elif isinstance(child, Content): return { 'type': 'file', 'perms': child.data['perms'], 'target': child.hash, 'name': name, } else: raise ValueError('unknown child') def get_data(self, **kwargs): return { 'id': self.hash, 'entries': self.entries, } @property def entries(self): if self.__entries is None: self.__entries = [ self.child_to_directory_entry(name, child) for name, child in self.items() ] return self.__entries def compute_hash(self): return id_to_bytes(directory_identifier({'entries': self.entries})) def __getitem__(self, key): if not isinstance(key, bytes): raise ValueError('Can only get a bytes from Directory') # Convenience shortcut if key == b'': return self if b'/' not in key: return super().__getitem__(key) else: key1, key2 = key.split(b'/', 1) return self.__getitem__(key1)[key2] def __setitem__(self, key, value): if not isinstance(key, bytes): raise ValueError('Can only set a bytes Directory entry') if not isinstance(value, (Content, Directory)): raise ValueError('Can only set a Directory entry to a Content or ' 'Directory') if key == b'': raise ValueError('Directory entry must have a name') if b'\x00' in key: raise ValueError('Directory entry name must not contain nul bytes') if b'/' not in key: return super().__setitem__(key, value) else: key1, key2 = key.rsplit(b'/', 1) self[key1].__setitem__(key2, value) def __delitem__(self, key): if not isinstance(key, bytes): raise ValueError('Can only delete a bytes Directory entry') if b'/' not in key: super().__delitem__(key) else: key1, key2 = key.rsplit(b'/', 1) del self[key1][key2] def __repr__(self): return 'Directory(id=%s, entries=[%s])' % ( id_to_str(self.hash), ', '.join(str(entry) for entry in self), ) diff --git a/swh/model/hashutil.py b/swh/model/hashutil.py index de85857..f045fb0 100644 --- a/swh/model/hashutil.py +++ b/swh/model/hashutil.py @@ -1,360 +1,361 @@ # Copyright (C) 2015-2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information """Module in charge of hashing function definitions. This is the base module use to compute swh's hashes. Only a subset of hashing algorithms is supported as defined in the ALGORITHMS set. Any provided algorithms not in that list will result in a ValueError explaining the error. This module defines a MultiHash class to ease the softwareheritage hashing algorithms computation. This allows to compute hashes from file object, path, data using a similar interface as what the standard hashlib module provides. Basic usage examples: - file object: MultiHash.from_file( file_object, hash_names=DEFAULT_ALGORITHMS).digest() - path (filepath): MultiHash.from_path(b'foo').hexdigest() - data (bytes): MultiHash.from_data(b'foo').bytehexdigest() "Complex" usage, defining a swh hashlib instance first: - To compute length, integrate the length to the set of algorithms to compute, for example: .. code-block:: python h = MultiHash(hash_names=set({'length'}).union(DEFAULT_ALGORITHMS)) with open(filepath, 'rb') as f: h.update(f.read(HASH_BLOCK_SIZE)) hashes = h.digest() # returns a dict of {hash_algo_name: hash_in_bytes} - Write alongside computing hashing algorithms (from a stream), example: .. code-block:: python h = MultiHash(length=length) with open(filepath, 'wb') as f: for chunk in r.iter_content(): # r a stream of sort h.update(chunk) f.write(chunk) hashes = h.hexdigest() # returns a dict of {hash_algo_name: hash_in_hex} """ import binascii import functools import hashlib import os from io import BytesIO +from typing import Callable, Dict ALGORITHMS = set(['sha1', 'sha256', 'sha1_git', 'blake2s256', 'blake2b512']) """Hashing algorithms supported by this module""" DEFAULT_ALGORITHMS = set(['sha1', 'sha256', 'sha1_git', 'blake2s256']) """Algorithms computed by default when calling the functions from this module. Subset of :const:`ALGORITHMS`. """ HASH_BLOCK_SIZE = 32768 """Block size for streaming hash computations made in this module""" -_blake2_hash_cache = {} +_blake2_hash_cache = {} # type: Dict[str, Callable] class MultiHash: """Hashutil class to support multiple hashes computation. Args: hash_names (set): Set of hash algorithms (+ optionally length) to compute hashes (cf. DEFAULT_ALGORITHMS) length (int): Length of the total sum of chunks to read If the length is provided as algorithm, the length is also computed and returned. """ def __init__(self, hash_names=DEFAULT_ALGORITHMS, length=None): self.state = {} self.track_length = False for name in hash_names: if name == 'length': self.state['length'] = 0 self.track_length = True else: self.state[name] = _new_hash(name, length) @classmethod def from_state(cls, state, track_length): ret = cls([]) ret.state = state ret.track_length = track_length @classmethod def from_file(cls, fobj, hash_names=DEFAULT_ALGORITHMS, length=None): ret = cls(length=length, hash_names=hash_names) while True: chunk = fobj.read(HASH_BLOCK_SIZE) if not chunk: break ret.update(chunk) return ret @classmethod def from_path(cls, path, hash_names=DEFAULT_ALGORITHMS): length = os.path.getsize(path) with open(path, 'rb') as f: ret = cls.from_file(f, hash_names=hash_names, length=length) return ret @classmethod def from_data(cls, data, hash_names=DEFAULT_ALGORITHMS): length = len(data) fobj = BytesIO(data) return cls.from_file(fobj, hash_names=hash_names, length=length) def update(self, chunk): for name, h in self.state.items(): if name == 'length': continue h.update(chunk) if self.track_length: self.state['length'] += len(chunk) def digest(self): return { name: h.digest() if name != 'length' else h for name, h in self.state.items() } def hexdigest(self): return { name: h.hexdigest() if name != 'length' else h for name, h in self.state.items() } def bytehexdigest(self): return { name: hash_to_bytehex(h.digest()) if name != 'length' else h for name, h in self.state.items() } def copy(self): copied_state = { name: h.copy() if name != 'length' else h for name, h in self.state.items() } return self.from_state(copied_state, self.track_length) def _new_blake2_hash(algo): """Return a function that initializes a blake2 hash. """ if algo in _blake2_hash_cache: return _blake2_hash_cache[algo]() lalgo = algo.lower() if not lalgo.startswith('blake2'): raise ValueError('Algorithm %s is not a blake2 hash' % algo) blake_family = lalgo[:7] digest_size = None if lalgo[7:]: try: digest_size, remainder = divmod(int(lalgo[7:]), 8) except ValueError: raise ValueError( 'Unknown digest size for algo %s' % algo ) from None if remainder: raise ValueError( 'Digest size for algorithm %s must be a multiple of 8' % algo ) if lalgo in hashlib.algorithms_available: # Handle the case where OpenSSL ships the given algorithm # (e.g. Python 3.5 on Debian 9 stretch) _blake2_hash_cache[algo] = lambda: hashlib.new(lalgo) else: # Try using the built-in implementation for Python 3.6+ if blake_family in hashlib.algorithms_available: blake2 = getattr(hashlib, blake_family) else: import pyblake2 blake2 = getattr(pyblake2, blake_family) _blake2_hash_cache[algo] = lambda: blake2(digest_size=digest_size) return _blake2_hash_cache[algo]() def _new_hashlib_hash(algo): """Initialize a digest object from hashlib. Handle the swh-specific names for the blake2-related algorithms """ if algo.startswith('blake2'): return _new_blake2_hash(algo) else: return hashlib.new(algo) def _new_git_hash(base_algo, git_type, length): """Initialize a digest object (as returned by python's hashlib) for the requested algorithm, and feed it with the header for a git object of the given type and length. The header for hashing a git object consists of: - The type of the object (encoded in ASCII) - One ASCII space (\x20) - The length of the object (decimal encoded in ASCII) - One NUL byte Args: base_algo (str from :const:`ALGORITHMS`): a hashlib-supported algorithm git_type: the type of the git object (supposedly one of 'blob', 'commit', 'tag', 'tree') length: the length of the git object you're encoding Returns: a hashutil.hash object """ h = _new_hashlib_hash(base_algo) git_header = '%s %d\0' % (git_type, length) h.update(git_header.encode('ascii')) return h def _new_hash(algo, length=None): """Initialize a digest object (as returned by python's hashlib) for the requested algorithm. See the constant ALGORITHMS for the list of supported algorithms. If a git-specific hashing algorithm is requested (e.g., "sha1_git"), the hashing object will be pre-fed with the needed header; for this to work, length must be given. Args: algo (str): a hashing algorithm (one of ALGORITHMS) length (int): the length of the hashed payload (needed for git-specific algorithms) Returns: a hashutil.hash object Raises: ValueError if algo is unknown, or length is missing for a git-specific hash. """ if algo not in ALGORITHMS: raise ValueError( 'Unexpected hashing algorithm %s, expected one of %s' % (algo, ', '.join(sorted(ALGORITHMS)))) if algo.endswith('_git'): if length is None: raise ValueError('Missing length for git hashing algorithm') base_algo = algo[:-4] return _new_git_hash(base_algo, 'blob', length) return _new_hashlib_hash(algo) def hash_git_data(data, git_type, base_algo='sha1'): """Hash the given data as a git object of type git_type. Args: data: a bytes object git_type: the git object type base_algo: the base hashing algorithm used (default: sha1) Returns: a dict mapping each algorithm to a bytes digest Raises: ValueError if the git_type is unexpected. """ git_object_types = {'blob', 'tree', 'commit', 'tag', 'snapshot'} if git_type not in git_object_types: raise ValueError('Unexpected git object type %s, expected one of %s' % (git_type, ', '.join(sorted(git_object_types)))) h = _new_git_hash(base_algo, git_type, len(data)) h.update(data) return h.digest() @functools.lru_cache() def hash_to_hex(hash): """Converts a hash (in hex or bytes form) to its hexadecimal ascii form Args: hash (str or bytes): a :class:`bytes` hash or a :class:`str` containing the hexadecimal form of the hash Returns: str: the hexadecimal form of the hash """ if isinstance(hash, str): return hash return binascii.hexlify(hash).decode('ascii') @functools.lru_cache() def hash_to_bytehex(hash): """Converts a hash to its hexadecimal bytes representation Args: hash (bytes): a :class:`bytes` hash Returns: bytes: the hexadecimal form of the hash, as :class:`bytes` """ return binascii.hexlify(hash) @functools.lru_cache() def hash_to_bytes(hash): """Converts a hash (in hex or bytes form) to its raw bytes form Args: hash (str or bytes): a :class:`bytes` hash or a :class:`str` containing the hexadecimal form of the hash Returns: bytes: the :class:`bytes` form of the hash """ if isinstance(hash, bytes): return hash return bytes.fromhex(hash) @functools.lru_cache() def bytehex_to_hash(hex): """Converts a hexadecimal bytes representation of a hash to that hash Args: hash (bytes): a :class:`bytes` containing the hexadecimal form of the hash encoded in ascii Returns: bytes: the :class:`bytes` form of the hash """ return hash_to_bytes(hex.decode()) diff --git a/swh/model/identifiers.py b/swh/model/identifiers.py index 62e031b..0254a89 100644 --- a/swh/model/identifiers.py +++ b/swh/model/identifiers.py @@ -1,808 +1,813 @@ # Copyright (C) 2015-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import binascii import datetime import hashlib -from collections import namedtuple from functools import lru_cache +from typing import Any, Dict, NamedTuple from .exceptions import ValidationError from .fields.hashes import validate_sha1 from .hashutil import hash_git_data, hash_to_hex, MultiHash ORIGIN = 'origin' SNAPSHOT = 'snapshot' REVISION = 'revision' RELEASE = 'release' DIRECTORY = 'directory' CONTENT = 'content' PID_NAMESPACE = 'swh' PID_VERSION = 1 PID_TYPES = ['ori', 'snp', 'rel', 'rev', 'dir', 'cnt'] -PID_KEYS = ['namespace', 'scheme_version', 'object_type', 'object_id', - 'metadata'] PID_SEP = ':' PID_CTXT_SEP = ';' @lru_cache() def identifier_to_bytes(identifier): """Convert a text identifier to bytes. Args: identifier: an identifier, either a 40-char hexadecimal string or a bytes object of length 20 Returns: The length 20 bytestring corresponding to the given identifier Raises: ValueError: if the identifier is of an unexpected type or length. """ if isinstance(identifier, bytes): if len(identifier) != 20: raise ValueError( 'Wrong length for bytes identifier %s, expected 20' % len(identifier)) return identifier if isinstance(identifier, str): if len(identifier) != 40: raise ValueError( 'Wrong length for str identifier %s, expected 40' % len(identifier)) return bytes.fromhex(identifier) raise ValueError('Wrong type for identifier %s, expected bytes or str' % identifier.__class__.__name__) @lru_cache() def identifier_to_str(identifier): """Convert an identifier to an hexadecimal string. Args: identifier: an identifier, either a 40-char hexadecimal string or a bytes object of length 20 Returns: The length 40 string corresponding to the given identifier, hex encoded Raises: ValueError: if the identifier is of an unexpected type or length. """ if isinstance(identifier, str): if len(identifier) != 40: raise ValueError( 'Wrong length for str identifier %s, expected 40' % len(identifier)) return identifier if isinstance(identifier, bytes): if len(identifier) != 20: raise ValueError( 'Wrong length for bytes identifier %s, expected 20' % len(identifier)) return binascii.hexlify(identifier).decode() raise ValueError('Wrong type for identifier %s, expected bytes or str' % identifier.__class__.__name__) def content_identifier(content): """Return the intrinsic identifier for a content. A content's identifier is the sha1, sha1_git and sha256 checksums of its data. Args: content: a content conforming to the Software Heritage schema Returns: A dictionary with all the hashes for the data Raises: KeyError: if the content doesn't have a data member. """ return MultiHash.from_data(content['data']).digest() def _sort_key(entry): """The sorting key for tree entries""" if entry['type'] == 'dir': return entry['name'] + b'/' else: return entry['name'] @lru_cache() def _perms_to_bytes(perms): """Convert the perms value to its bytes representation""" oc = oct(perms)[2:] return oc.encode('ascii') def escape_newlines(snippet): """Escape the newlines present in snippet according to git rules. New lines in git manifests are escaped by indenting the next line by one space. """ if b'\n' in snippet: return b'\n '.join(snippet.split(b'\n')) else: return snippet def directory_identifier(directory): """Return the intrinsic identifier for a directory. A directory's identifier is the tree sha1 à la git of a directory listing, using the following algorithm, which is equivalent to the git algorithm for trees: 1. Entries of the directory are sorted using the name (or the name with '/' appended for directory entries) as key, in bytes order. 2. For each entry of the directory, the following bytes are output: - the octal representation of the permissions for the entry (stored in the 'perms' member), which is a representation of the entry type: - b'100644' (int 33188) for files - b'100755' (int 33261) for executable files - b'120000' (int 40960) for symbolic links - b'40000' (int 16384) for directories - b'160000' (int 57344) for references to revisions - an ascii space (b'\x20') - the entry's name (as raw bytes), stored in the 'name' member - a null byte (b'\x00') - the 20 byte long identifier of the object pointed at by the entry, stored in the 'target' member: - for files or executable files: their blob sha1_git - for symbolic links: the blob sha1_git of a file containing the link destination - for directories: their intrinsic identifier - for revisions: their intrinsic identifier (Note that there is no separator between entries) """ components = [] for entry in sorted(directory['entries'], key=_sort_key): components.extend([ _perms_to_bytes(entry['perms']), b'\x20', entry['name'], b'\x00', identifier_to_bytes(entry['target']), ]) return identifier_to_str(hash_git_data(b''.join(components), 'tree')) def format_date(date): """Convert a date object into an UTC timestamp encoded as ascii bytes. Git stores timestamps as an integer number of seconds since the UNIX epoch. However, Software Heritage stores timestamps as an integer number of microseconds (postgres type "datetime with timezone"). Therefore, we print timestamps with no microseconds as integers, and timestamps with microseconds as floating point values. We elide the trailing zeroes from microsecond values, to "future-proof" our representation if we ever need more precision in timestamps. """ if not isinstance(date, dict): raise ValueError('format_date only supports dicts, %r received' % date) seconds = date.get('seconds', 0) microseconds = date.get('microseconds', 0) if not microseconds: return str(seconds).encode() else: float_value = ('%d.%06d' % (seconds, microseconds)) return float_value.rstrip('0').encode() @lru_cache() def format_offset(offset, negative_utc=None): """Convert an integer number of minutes into an offset representation. The offset representation is [+-]hhmm where: - hh is the number of hours; - mm is the number of minutes. A null offset is represented as +0000. """ if offset < 0 or offset == 0 and negative_utc: sign = '-' else: sign = '+' hours = abs(offset) // 60 minutes = abs(offset) % 60 t = '%s%02d%02d' % (sign, hours, minutes) return t.encode() def normalize_timestamp(time_representation): """Normalize a time representation for processing by Software Heritage This function supports a numeric timestamp (representing a number of seconds since the UNIX epoch, 1970-01-01 at 00:00 UTC), a :obj:`datetime.datetime` object (with timezone information), or a normalized Software Heritage time representation (idempotency). Args: time_representation: the representation of a timestamp Returns: dict: a normalized dictionary with three keys: - timestamp: a dict with two optional keys: - seconds: the integral number of seconds since the UNIX epoch - microseconds: the integral number of microseconds - offset: the timezone offset as a number of minutes relative to UTC - negative_utc: a boolean representing whether the offset is -0000 when offset = 0. """ if time_representation is None: return None negative_utc = False if isinstance(time_representation, dict): ts = time_representation['timestamp'] if isinstance(ts, dict): seconds = ts.get('seconds', 0) microseconds = ts.get('microseconds', 0) elif isinstance(ts, int): seconds = ts microseconds = 0 else: raise ValueError( 'normalize_timestamp received non-integer timestamp member:' ' %r' % ts) offset = time_representation['offset'] if 'negative_utc' in time_representation: negative_utc = time_representation['negative_utc'] elif isinstance(time_representation, datetime.datetime): seconds = int(time_representation.timestamp()) microseconds = time_representation.microsecond utcoffset = time_representation.utcoffset() if utcoffset is None: raise ValueError( 'normalize_timestamp received datetime without timezone: %s' % time_representation) # utcoffset is an integer number of minutes seconds_offset = utcoffset.total_seconds() offset = int(seconds_offset) // 60 elif isinstance(time_representation, int): seconds = time_representation microseconds = 0 offset = 0 else: raise ValueError( 'normalize_timestamp received non-integer timestamp:' ' %r' % time_representation) return { 'timestamp': { 'seconds': seconds, 'microseconds': microseconds, }, 'offset': offset, 'negative_utc': negative_utc, } def format_author(author): """Format the specification of an author. An author is either a byte string (passed unchanged), or a dict with three keys, fullname, name and email. If the fullname exists, return it; if it doesn't, we construct a fullname using the following heuristics: if the name value is None, we return the email in angle brackets, else, we return the name, a space, and the email in angle brackets. """ if isinstance(author, bytes) or author is None: return author if 'fullname' in author: return author['fullname'] ret = [] if author['name'] is not None: ret.append(author['name']) if author['email'] is not None: ret.append(b''.join([b'<', author['email'], b'>'])) return b' '.join(ret) def format_author_line(header, author, date_offset): """Format a an author line according to git standards. An author line has three components: - a header, describing the type of author (author, committer, tagger) - a name and email, which is an arbitrary bytestring - optionally, a timestamp with UTC offset specification The author line is formatted thus:: `header` `name and email`[ `timestamp` `utc_offset`] The timestamp is encoded as a (decimal) number of seconds since the UNIX epoch (1970-01-01 at 00:00 UTC). As an extension to the git format, we support fractional timestamps, using a dot as the separator for the decimal part. The utc offset is a number of minutes encoded as '[+-]HHMM'. Note some tools can pass a negative offset corresponding to the UTC timezone ('-0000'), which is valid and is encoded as such. For convenience, this function returns the whole line with its trailing newline. Args: header: the header of the author line (one of 'author', 'committer', 'tagger') author: an author specification (dict with two bytes values: name and email, or byte value) date_offset: a normalized date/time representation as returned by :func:`normalize_timestamp`. Returns: the newline-terminated byte string containing the author line """ ret = [header.encode(), b' ', escape_newlines(format_author(author))] date_offset = normalize_timestamp(date_offset) if date_offset is not None: date_f = format_date(date_offset['timestamp']) offset_f = format_offset(date_offset['offset'], date_offset['negative_utc']) ret.extend([b' ', date_f, b' ', offset_f]) ret.append(b'\n') return b''.join(ret) def revision_identifier(revision): """Return the intrinsic identifier for a revision. The fields used for the revision identifier computation are: - directory - parents - author - author_date - committer - committer_date - metadata -> extra_headers - message A revision's identifier is the 'git'-checksum of a commit manifest constructed as follows (newlines are a single ASCII newline character):: tree [for each parent in parents] parent [end for each parents] author committer [for each key, value in extra_headers] [end for each extra_headers] The directory identifier is the ascii representation of its hexadecimal encoding. Author and committer are formatted with the :func:`format_author` function. Dates are formatted with the :func:`format_offset` function. Extra headers are an ordered list of [key, value] pairs. Keys are strings and get encoded to utf-8 for identifier computation. Values are either byte strings, unicode strings (that get encoded to utf-8), or integers (that get encoded to their utf-8 decimal representation). Multiline extra header values are escaped by indenting the continuation lines with one ascii space. If the message is None, the manifest ends with the last header. Else, the message is appended to the headers after an empty line. The checksum of the full manifest is computed using the 'commit' git object type. """ components = [ b'tree ', identifier_to_str(revision['directory']).encode(), b'\n', ] for parent in revision['parents']: if parent: components.extend([ b'parent ', identifier_to_str(parent).encode(), b'\n', ]) components.extend([ format_author_line('author', revision['author'], revision['date']), format_author_line('committer', revision['committer'], revision['committer_date']), ]) # Handle extra headers metadata = revision.get('metadata') if not metadata: metadata = {} for key, value in metadata.get('extra_headers', []): # Integer values: decimal representation if isinstance(value, int): value = str(value).encode('utf-8') # Unicode string values: utf-8 encoding if isinstance(value, str): value = value.encode('utf-8') # encode the key to utf-8 components.extend([key.encode('utf-8'), b' ', escape_newlines(value), b'\n']) if revision['message'] is not None: components.extend([b'\n', revision['message']]) commit_raw = b''.join(components) return identifier_to_str(hash_git_data(commit_raw, 'commit')) def target_type_to_git(target_type): """Convert a software heritage target type to a git object type""" return { 'content': b'blob', 'directory': b'tree', 'revision': b'commit', 'release': b'tag', }[target_type] def release_identifier(release): """Return the intrinsic identifier for a release.""" components = [ b'object ', identifier_to_str(release['target']).encode(), b'\n', b'type ', target_type_to_git(release['target_type']), b'\n', b'tag ', release['name'], b'\n', ] if 'author' in release and release['author']: components.append( format_author_line('tagger', release['author'], release['date']) ) if release['message'] is not None: components.extend([b'\n', release['message']]) return identifier_to_str(hash_git_data(b''.join(components), 'tag')) def snapshot_identifier(snapshot, *, ignore_unresolved=False): """Return the intrinsic identifier for a snapshot. Snapshots are a set of named branches, which are pointers to objects at any level of the Software Heritage DAG. As well as pointing to other objects in the Software Heritage DAG, branches can also be *alias*es, in which case their target is the name of another branch in the same snapshot, or *dangling*, in which case the target is unknown (and represented by the ``None`` value). A snapshot identifier is a salted sha1 (using the git hashing algorithm with the ``snapshot`` object type) of a manifest following the algorithm: 1. Branches are sorted using the name as key, in bytes order. 2. For each branch, the following bytes are output: - the type of the branch target: - ``content``, ``directory``, ``revision``, ``release`` or ``snapshot`` for the corresponding entries in the DAG; - ``alias`` for branches referencing another branch; - ``dangling`` for dangling branches - an ascii space (``\\x20``) - the branch name (as raw bytes) - a null byte (``\\x00``) - the length of the target identifier, as an ascii-encoded decimal number (``20`` for current intrinsic identifiers, ``0`` for dangling branches, the length of the target branch name for branch aliases) - a colon (``:``) - the identifier of the target object pointed at by the branch, stored in the 'target' member: - for contents: their *sha1_git* - for directories, revisions, releases or snapshots: their intrinsic identifier - for branch aliases, the name of the target branch (as raw bytes) - for dangling branches, the empty string Note that, akin to directory manifests, there is no separator between entries. Because of symbolic branches, identifiers are of arbitrary length but are length-encoded to avoid ambiguity. Args: snapshot (dict): the snapshot of which to compute the identifier. A single entry is needed, ``'branches'``, which is itself a :class:`dict` mapping each branch to its target ignore_unresolved (bool): if `True`, ignore unresolved branch aliases. Returns: str: the intrinsic identifier for `snapshot` """ unresolved = [] lines = [] for name, target in sorted(snapshot['branches'].items()): if not target: target_type = b'dangling' target_id = b'' elif target['target_type'] == 'alias': target_type = b'alias' target_id = target['target'] if target_id not in snapshot['branches'] or target_id == name: unresolved.append((name, target_id)) else: target_type = target['target_type'].encode() target_id = identifier_to_bytes(target['target']) lines.extend([ target_type, b'\x20', name, b'\x00', ('%d:' % len(target_id)).encode(), target_id, ]) if unresolved and not ignore_unresolved: raise ValueError('Branch aliases unresolved: %s' % ', '.join('%s -> %s' % x for x in unresolved), unresolved) return identifier_to_str(hash_git_data(b''.join(lines), 'snapshot')) def origin_identifier(origin): """Return the intrinsic identifier for an origin.""" return hashlib.sha1(origin['url'].encode('ascii')).hexdigest() _object_type_map = { ORIGIN: { 'short_name': 'ori', 'key_id': 'id' }, SNAPSHOT: { 'short_name': 'snp', 'key_id': 'id' }, RELEASE: { 'short_name': 'rel', 'key_id': 'id' }, REVISION: { 'short_name': 'rev', 'key_id': 'id' }, DIRECTORY: { 'short_name': 'dir', 'key_id': 'id' }, CONTENT: { 'short_name': 'cnt', 'key_id': 'sha1_git' } } -class PersistentId(namedtuple('PersistentId', PID_KEYS)): +class PersistentId(NamedTuple( + 'PersistentId', [ + ('namespace', str), + ('scheme_version', int), + ('object_type', str), + ('object_id', str), + ('metadata', Dict[str, Any]), + ])): """ Named tuple holding the relevant info associated to a Software Heritage persistent identifier. Args: namespace (str): the namespace of the identifier, defaults to 'swh' scheme_version (int): the scheme version of the identifier, defaults to 1 object_type (str): the type of object the identifier points to, either 'content', 'directory', 'release', 'revision' or 'snapshot' object_id (dict/bytes/str): object's dict representation or object identifier metadata (dict): optional dict filled with metadata related to pointed object Raises: swh.model.exceptions.ValidationError: In case of invalid object type or id Once created, it contains the following attributes: Attributes: namespace (str): the namespace of the identifier scheme_version (int): the scheme version of the identifier object_type (str): the type of object the identifier points to object_id (str): hexadecimal representation of the object hash metadata (dict): metadata related to the pointed object To get the raw persistent identifier string from an instance of this named tuple, use the :func:`str` function:: pid = PersistentId( object_type='content', object_id='8ff44f081d43176474b267de5451f2c2e88089d0' ) pid_str = str(pid) # 'swh:1:cnt:8ff44f081d43176474b267de5451f2c2e88089d0' """ __slots__ = () def __new__(cls, namespace=PID_NAMESPACE, scheme_version=PID_VERSION, object_type='', object_id='', metadata={}): o = _object_type_map.get(object_type) if not o: raise ValidationError('Wrong input: Supported types are %s' % ( list(_object_type_map.keys()))) # internal swh representation resolution if isinstance(object_id, dict): object_id = object_id[o['key_id']] validate_sha1(object_id) # can raise if invalid hash object_id = hash_to_hex(object_id) return super(cls, PersistentId).__new__( cls, namespace, scheme_version, object_type, object_id, metadata) def __str__(self): o = _object_type_map.get(self.object_type) pid = PID_SEP.join([self.namespace, str(self.scheme_version), o['short_name'], self.object_id]) if self.metadata: for k, v in self.metadata.items(): pid += '%s%s=%s' % (PID_CTXT_SEP, k, v) return pid def persistent_identifier(object_type, object_id, scheme_version=1, metadata={}): """Compute persistent identifier (stable over time) as per documentation. Documentation: https://docs.softwareheritage.org/devel/swh-model/persistent-identifiers.html # noqa Args: object_type (str): object's type, either 'content', 'directory', 'release', 'revision' or 'snapshot' object_id (dict/bytes/str): object's dict representation or object identifier scheme_version (int): persistent identifier scheme version, defaults to 1 metadata (dict): metadata related to the pointed object Raises: swh.model.exceptions.ValidationError: In case of invalid object type or id Returns: str: the persistent identifier """ pid = PersistentId(scheme_version=scheme_version, object_type=object_type, object_id=object_id, metadata=metadata) return str(pid) def parse_persistent_identifier(persistent_id): """Parse swh's :ref:`persistent-identifiers` scheme. Args: persistent_id (str): A persistent identifier Raises: swh.model.exceptions.ValidationError: in case of: * missing mandatory values (4) * invalid namespace supplied * invalid version supplied * invalid type supplied * missing hash * invalid hash identifier supplied Returns: PersistentId: a named tuple holding the parsing result """ # ; persistent_id_parts = persistent_id.split(PID_CTXT_SEP) pid_data = persistent_id_parts.pop(0).split(':') if len(pid_data) != 4: raise ValidationError( 'Wrong format: There should be 4 mandatory values') # Checking for parsing errors _ns, _version, _type, _id = pid_data if _ns != PID_NAMESPACE: raise ValidationError( "Wrong format: only supported namespace is '%s'" % PID_NAMESPACE) if _version != str(PID_VERSION): raise ValidationError( 'Wrong format: only supported version is %d' % PID_VERSION) pid_data[1] = int(pid_data[1]) expected_types = PID_TYPES if _type not in expected_types: raise ValidationError( 'Wrong format: Supported types are %s' % ( ', '.join(expected_types))) for otype, data in _object_type_map.items(): if _type == data['short_name']: pid_data[2] = otype break if not _id: raise ValidationError( 'Wrong format: Identifier should be present') try: validate_sha1(_id) except ValidationError: raise ValidationError( 'Wrong format: Identifier should be a valid hash') persistent_id_metadata = {} for part in persistent_id_parts: try: key, val = part.split('=') persistent_id_metadata[key] = val except Exception: msg = 'Contextual data is badly formatted, form key=val expected' raise ValidationError(msg) pid_data.append(persistent_id_metadata) return PersistentId._make(pid_data) diff --git a/swh/model/merkle.py b/swh/model/merkle.py index c75cc2c..15c5db6 100644 --- a/swh/model/merkle.py +++ b/swh/model/merkle.py @@ -1,286 +1,288 @@ # Copyright (C) 2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information """Merkle tree data structure""" import abc import collections +from typing import List, Optional + def deep_update(left, right): """Recursively update the left mapping with deeply nested values from the right mapping. This function is useful to merge the results of several calls to :func:`MerkleNode.collect`. Arguments: left: a mapping (modified by the update operation) right: a mapping Returns: the left mapping, updated with nested values from the right mapping Example: >>> a = { ... 'key1': { ... 'key2': { ... 'key3': 'value1/2/3', ... }, ... }, ... } >>> deep_update(a, { ... 'key1': { ... 'key2': { ... 'key4': 'value1/2/4', ... }, ... }, ... }) == { ... 'key1': { ... 'key2': { ... 'key3': 'value1/2/3', ... 'key4': 'value1/2/4', ... }, ... }, ... } True >>> deep_update(a, { ... 'key1': { ... 'key2': { ... 'key3': 'newvalue1/2/3', ... }, ... }, ... }) == { ... 'key1': { ... 'key2': { ... 'key3': 'newvalue1/2/3', ... 'key4': 'value1/2/4', ... }, ... }, ... } True """ for key, rvalue in right.items(): if isinstance(rvalue, collections.Mapping): new_lvalue = deep_update(left.get(key, {}), rvalue) left[key] = new_lvalue else: left[key] = rvalue return left class MerkleNode(dict, metaclass=abc.ABCMeta): """Representation of a node in a Merkle Tree. A (generalized) `Merkle Tree`_ is a tree in which every node is labeled with a hash of its own data and the hash of its children. .. _Merkle Tree: https://en.wikipedia.org/wiki/Merkle_tree In pseudocode:: node.hash = hash(node.data + sum(child.hash for child in node.children)) This class efficiently implements the Merkle Tree data structure on top of a Python :class:`dict`, minimizing hash computations and new data collections when updating nodes. Node data is stored in the :attr:`data` attribute, while (named) children are stored as items of the underlying dictionary. Addition, update and removal of objects are instrumented to automatically invalidate the hashes of the current node as well as its registered parents; It also resets the collection status of the objects so the updated objects can be collected. The collection of updated data from the tree is implemented through the :func:`collect` function and associated helpers. Attributes: data (dict): data associated to the current node parents (list): known parents of the current node collected (bool): whether the current node has been collected """ __slots__ = ['parents', 'data', '__hash', 'collected'] - type = None + type = None # type: Optional[str] """Type of the current node (used as a classifier for :func:`collect`)""" def __init__(self, data=None): super().__init__() self.parents = [] self.data = data self.__hash = None self.collected = False def invalidate_hash(self): """Invalidate the cached hash of the current node.""" if not self.__hash: return self.__hash = None self.collected = False for parent in self.parents: parent.invalidate_hash() def update_hash(self, *, force=False): """Recursively compute the hash of the current node. Args: force (bool): invalidate the cache and force the computation for this node and all children. """ if self.__hash and not force: return self.__hash if force: self.invalidate_hash() for child in self.values(): child.update_hash(force=force) self.__hash = self.compute_hash() return self.__hash @property def hash(self): """The hash of the current node, as calculated by :func:`compute_hash`. """ return self.update_hash() @abc.abstractmethod def compute_hash(self): """Compute the hash of the current node. The hash should depend on the data of the node, as well as on hashes of the children nodes. """ raise NotImplementedError('Must implement compute_hash method') def __setitem__(self, name, new_child): """Add a child, invalidating the current hash""" self.invalidate_hash() super().__setitem__(name, new_child) new_child.parents.append(self) def __delitem__(self, name): """Remove a child, invalidating the current hash""" if name in self: self.invalidate_hash() self[name].parents.remove(self) super().__delitem__(name) else: raise KeyError(name) def update(self, new_children): """Add several named children from a dictionary""" if not new_children: return self.invalidate_hash() for name, new_child in new_children.items(): new_child.parents.append(self) if name in self: self[name].parents.remove(self) super().update(new_children) def get_data(self, **kwargs): """Retrieve and format the collected data for the current node, for use by :func:`collect`. Can be overridden, for instance when you want the collected data to contain information about the child nodes. Arguments: kwargs: allow subclasses to alter behaviour depending on how :func:`collect` is called. Returns: data formatted for :func:`collect` """ return self.data def collect_node(self, **kwargs): """Collect the data for the current node, for use by :func:`collect`. Arguments: kwargs: passed as-is to :func:`get_data`. Returns: A :class:`dict` compatible with :func:`collect`. """ if not self.collected: self.collected = True return {self.type: {self.hash: self.get_data(**kwargs)}} else: return {} def collect(self, **kwargs): """Collect the data for all nodes in the subtree rooted at `self`. The data is deduplicated by type and by hash. Arguments: kwargs: passed as-is to :func:`get_data`. Returns: A :class:`dict` with the following structure:: { 'typeA': { node1.hash: node1.get_data(), node2.hash: node2.get_data(), }, 'typeB': { node3.hash: node3.get_data(), ... }, ... } """ ret = self.collect_node(**kwargs) for child in self.values(): deep_update(ret, child.collect(**kwargs)) return ret def reset_collect(self): """Recursively unmark collected nodes in the subtree rooted at `self`. This lets the caller use :func:`collect` again. """ self.collected = False for child in self.values(): child.reset_collect() class MerkleLeaf(MerkleNode): """A leaf to a Merkle tree. A Merkle leaf is simply a Merkle node with children disabled. """ - __slots__ = [] + __slots__ = [] # type: List[str] def __setitem__(self, name, child): raise ValueError('%s is a leaf' % self.__class__.__name__) def __getitem__(self, name): raise ValueError('%s is a leaf' % self.__class__.__name__) def __delitem__(self, name): raise ValueError('%s is a leaf' % self.__class__.__name__) def update(self, new_children): """Children update operation. Disabled for leaves.""" raise ValueError('%s is a leaf' % self.__class__.__name__) diff --git a/swh/model/py.typed b/swh/model/py.typed new file mode 100644 index 0000000..1242d43 --- /dev/null +++ b/swh/model/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. diff --git a/swh/model/tests/test_from_disk.py b/swh/model/tests/test_from_disk.py index 30f543d..7b21d20 100644 --- a/swh/model/tests/test_from_disk.py +++ b/swh/model/tests/test_from_disk.py @@ -1,787 +1,788 @@ # Copyright (C) 2017 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import os +import pytest import tarfile import tempfile import unittest -import pytest +from typing import ClassVar, Optional from swh.model import from_disk from swh.model.from_disk import Content, DentryPerms, Directory from swh.model.hashutil import DEFAULT_ALGORITHMS, hash_to_bytes, hash_to_hex TEST_DATA = os.path.join(os.path.dirname(__file__), 'data') class ModeToPerms(unittest.TestCase): def setUp(self): super().setUp() # Generate a full permissions map self.perms_map = {} # Symlinks for i in range(0o120000, 0o127777 + 1): self.perms_map[i] = DentryPerms.symlink # Directories for i in range(0o040000, 0o047777 + 1): self.perms_map[i] = DentryPerms.directory # Other file types: socket, regular file, block device, character # device, fifo all map to regular files for ft in [0o140000, 0o100000, 0o060000, 0o020000, 0o010000]: for i in range(ft, ft + 0o7777 + 1): if i & 0o111: # executable bits are set self.perms_map[i] = DentryPerms.executable_content else: self.perms_map[i] = DentryPerms.content def test_exhaustive_mode_to_perms(self): for fmode, perm in self.perms_map.items(): self.assertEqual(perm, from_disk.mode_to_perms(fmode)) class DataMixin: - maxDiff = None + maxDiff = None # type: ClassVar[Optional[int]] def setUp(self): self.tmpdir = tempfile.TemporaryDirectory( prefix='swh.model.from_disk' ) self.tmpdir_name = os.fsencode(self.tmpdir.name) self.contents = { b'file': { 'data': b'42\n', 'sha1': hash_to_bytes( '34973274ccef6ab4dfaaf86599792fa9c3fe4689' ), 'sha256': hash_to_bytes( '084c799cd551dd1d8d5c5f9a5d593b2e' '931f5e36122ee5c793c1d08a19839cc0' ), 'sha1_git': hash_to_bytes( 'd81cc0710eb6cf9efd5b920a8453e1e07157b6cd'), 'blake2s256': hash_to_bytes( 'd5fe1939576527e42cfd76a9455a2432' 'fe7f56669564577dd93c4280e76d661d' ), 'length': 3, 'mode': 0o100644 }, } self.symlinks = { b'symlink': { 'data': b'target', 'blake2s256': hash_to_bytes( '595d221b30fdd8e10e2fdf18376e688e' '9f18d56fd9b6d1eb6a822f8c146c6da6' ), 'sha1': hash_to_bytes( '0e8a3ad980ec179856012b7eecf4327e99cd44cd' ), 'sha1_git': hash_to_bytes( '1de565933b05f74c75ff9a6520af5f9f8a5a2f1d' ), 'sha256': hash_to_bytes( '34a04005bcaf206eec990bd9637d9fdb' '6725e0a0c0d4aebf003f17f4c956eb5c' ), 'length': 6, 'perms': DentryPerms.symlink, } } self.specials = { b'fifo': os.mkfifo, b'devnull': lambda path: os.mknod(path, device=os.makedev(1, 3)), } self.empty_content = { 'data': b'', 'length': 0, 'blake2s256': hash_to_bytes( '69217a3079908094e11121d042354a7c' '1f55b6482ca1a51e1b250dfd1ed0eef9' ), 'sha1': hash_to_bytes( 'da39a3ee5e6b4b0d3255bfef95601890afd80709' ), 'sha1_git': hash_to_bytes( 'e69de29bb2d1d6434b8b29ae775ad8c2e48c5391' ), 'sha256': hash_to_bytes( 'e3b0c44298fc1c149afbf4c8996fb924' '27ae41e4649b934ca495991b7852b855' ), 'perms': DentryPerms.content, } self.empty_directory = { 'id': hash_to_bytes( '4b825dc642cb6eb9a060e54bf8d69288fbee4904' ), 'entries': [], } # Generated with generate_testdata_from_disk self.tarball_contents = { b'': { 'entries': [{ 'name': b'bar', 'perms': DentryPerms.directory, 'target': hash_to_bytes( '3c1f578394f4623f74a0ba7fe761729f59fc6ec4' ), 'type': 'dir', }, { 'name': b'empty-folder', 'perms': DentryPerms.directory, 'target': hash_to_bytes( '4b825dc642cb6eb9a060e54bf8d69288fbee4904' ), 'type': 'dir', }, { 'name': b'foo', 'perms': DentryPerms.directory, 'target': hash_to_bytes( '2b41c40f0d1fbffcba12497db71fba83fcca96e5' ), 'type': 'dir', }, { 'name': b'link-to-another-quote', 'perms': DentryPerms.symlink, 'target': hash_to_bytes( '7d5c08111e21c8a9f71540939998551683375fad' ), 'type': 'file', }, { 'name': b'link-to-binary', 'perms': DentryPerms.symlink, 'target': hash_to_bytes( 'e86b45e538d9b6888c969c89fbd22a85aa0e0366' ), 'type': 'file', }, { 'name': b'link-to-foo', 'perms': DentryPerms.symlink, 'target': hash_to_bytes( '19102815663d23f8b75a47e7a01965dcdc96468c' ), 'type': 'file', }, { 'name': b'some-binary', 'perms': DentryPerms.executable_content, 'target': hash_to_bytes( '68769579c3eaadbe555379b9c3538e6628bae1eb' ), 'type': 'file', }], 'id': hash_to_bytes( 'e8b0f1466af8608c8a3fb9879db172b887e80759' ), }, b'bar': { 'entries': [{ 'name': b'barfoo', 'perms': DentryPerms.directory, 'target': hash_to_bytes( 'c3020f6bf135a38c6df3afeb5fb38232c5e07087' ), 'type': 'dir', }], 'id': hash_to_bytes( '3c1f578394f4623f74a0ba7fe761729f59fc6ec4' ), }, b'bar/barfoo': { 'entries': [{ 'name': b'another-quote.org', 'perms': DentryPerms.content, 'target': hash_to_bytes( '133693b125bad2b4ac318535b84901ebb1f6b638' ), 'type': 'file', }], 'id': hash_to_bytes( 'c3020f6bf135a38c6df3afeb5fb38232c5e07087' ), }, b'bar/barfoo/another-quote.org': { 'blake2s256': hash_to_bytes( 'd26c1cad82d43df0bffa5e7be11a60e3' '4adb85a218b433cbce5278b10b954fe8' ), 'length': 72, 'perms': DentryPerms.content, 'sha1': hash_to_bytes( '90a6138ba59915261e179948386aa1cc2aa9220a' ), 'sha1_git': hash_to_bytes( '133693b125bad2b4ac318535b84901ebb1f6b638' ), 'sha256': hash_to_bytes( '3db5ae168055bcd93a4d08285dc99ffe' 'e2883303b23fac5eab850273a8ea5546' ), }, b'empty-folder': { 'entries': [], 'id': hash_to_bytes( '4b825dc642cb6eb9a060e54bf8d69288fbee4904' ), }, b'foo': { 'entries': [{ 'name': b'barfoo', 'perms': DentryPerms.symlink, 'target': hash_to_bytes( '8185dfb2c0c2c597d16f75a8a0c37668567c3d7e' ), 'type': 'file', }, { 'name': b'quotes.md', 'perms': DentryPerms.content, 'target': hash_to_bytes( '7c4c57ba9ff496ad179b8f65b1d286edbda34c9a' ), 'type': 'file', }, { 'name': b'rel-link-to-barfoo', 'perms': DentryPerms.symlink, 'target': hash_to_bytes( 'acac326ddd63b0bc70840659d4ac43619484e69f' ), 'type': 'file', }], 'id': hash_to_bytes( '2b41c40f0d1fbffcba12497db71fba83fcca96e5' ), }, b'foo/barfoo': { 'blake2s256': hash_to_bytes( 'e1252f2caa4a72653c4efd9af871b62b' 'f2abb7bb2f1b0e95969204bd8a70d4cd' ), 'data': b'bar/barfoo', 'length': 10, 'perms': DentryPerms.symlink, 'sha1': hash_to_bytes( '9057ee6d0162506e01c4d9d5459a7add1fedac37' ), 'sha1_git': hash_to_bytes( '8185dfb2c0c2c597d16f75a8a0c37668567c3d7e' ), 'sha256': hash_to_bytes( '29ad3f5725321b940332c78e403601af' 'ff61daea85e9c80b4a7063b6887ead68' ), }, b'foo/quotes.md': { 'blake2s256': hash_to_bytes( 'bf7ce4fe304378651ee6348d3e9336ed' '5ad603d33e83c83ba4e14b46f9b8a80b' ), 'length': 66, 'perms': DentryPerms.content, 'sha1': hash_to_bytes( '1bf0bb721ac92c18a19b13c0eb3d741cbfadebfc' ), 'sha1_git': hash_to_bytes( '7c4c57ba9ff496ad179b8f65b1d286edbda34c9a' ), 'sha256': hash_to_bytes( 'caca942aeda7b308859eb56f909ec96d' '07a499491690c453f73b9800a93b1659' ), }, b'foo/rel-link-to-barfoo': { 'blake2s256': hash_to_bytes( 'd9c327421588a1cf61f316615005a2e9' 'c13ac3a4e96d43a24138d718fa0e30db' ), 'data': b'../bar/barfoo', 'length': 13, 'perms': DentryPerms.symlink, 'sha1': hash_to_bytes( 'dc51221d308f3aeb2754db48391b85687c2869f4' ), 'sha1_git': hash_to_bytes( 'acac326ddd63b0bc70840659d4ac43619484e69f' ), 'sha256': hash_to_bytes( '8007d20db2af40435f42ddef4b8ad76b' '80adbec26b249fdf0473353f8d99df08' ), }, b'link-to-another-quote': { 'blake2s256': hash_to_bytes( '2d0e73cea01ba949c1022dc10c8a43e6' '6180639662e5dc2737b843382f7b1910' ), 'data': b'bar/barfoo/another-quote.org', 'length': 28, 'perms': DentryPerms.symlink, 'sha1': hash_to_bytes( 'cbeed15e79599c90de7383f420fed7acb48ea171' ), 'sha1_git': hash_to_bytes( '7d5c08111e21c8a9f71540939998551683375fad' ), 'sha256': hash_to_bytes( 'e6e17d0793aa750a0440eb9ad5b80b25' '8076637ef0fb68f3ac2e59e4b9ac3ba6' ), }, b'link-to-binary': { 'blake2s256': hash_to_bytes( '9ce18b1adecb33f891ca36664da676e1' '2c772cc193778aac9a137b8dc5834b9b' ), 'data': b'some-binary', 'length': 11, 'perms': DentryPerms.symlink, 'sha1': hash_to_bytes( 'd0248714948b3a48a25438232a6f99f0318f59f1' ), 'sha1_git': hash_to_bytes( 'e86b45e538d9b6888c969c89fbd22a85aa0e0366' ), 'sha256': hash_to_bytes( '14126e97d83f7d261c5a6889cee73619' '770ff09e40c5498685aba745be882eff' ), }, b'link-to-foo': { 'blake2s256': hash_to_bytes( '08d6cad88075de8f192db097573d0e82' '9411cd91eb6ec65e8fc16c017edfdb74' ), 'data': b'foo', 'length': 3, 'perms': DentryPerms.symlink, 'sha1': hash_to_bytes( '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33' ), 'sha1_git': hash_to_bytes( '19102815663d23f8b75a47e7a01965dcdc96468c' ), 'sha256': hash_to_bytes( '2c26b46b68ffc68ff99b453c1d304134' '13422d706483bfa0f98a5e886266e7ae' ), }, b'some-binary': { 'blake2s256': hash_to_bytes( '922e0f7015035212495b090c27577357' 'a740ddd77b0b9e0cd23b5480c07a18c6' ), 'length': 5, 'perms': DentryPerms.executable_content, 'sha1': hash_to_bytes( '0bbc12d7f4a2a15b143da84617d95cb223c9b23c' ), 'sha1_git': hash_to_bytes( '68769579c3eaadbe555379b9c3538e6628bae1eb' ), 'sha256': hash_to_bytes( 'bac650d34a7638bb0aeb5342646d24e3' 'b9ad6b44c9b383621faa482b990a367d' ), }, } def tearDown(self): self.tmpdir.cleanup() def assertContentEqual(self, left, right, *, check_data=False, # noqa check_path=False): if not isinstance(left, Content): raise ValueError('%s is not a Content' % left) if isinstance(right, Content): right = right.get_data() keys = DEFAULT_ALGORITHMS | { 'length', 'perms', } if check_data: keys |= {'data'} if check_path: keys |= {'path'} failed = [] for key in keys: try: lvalue = left.data[key] if key == 'perms' and 'perms' not in right: rvalue = from_disk.mode_to_perms(right['mode']) else: rvalue = right[key] except KeyError: failed.append(key) continue if lvalue != rvalue: failed.append(key) if failed: raise self.failureException( 'Content mismatched:\n' + '\n'.join( 'content[%s] = %r != %r' % ( key, left.data.get(key), right.get(key)) for key in failed ) ) def assertDirectoryEqual(self, left, right): # NoQA if not isinstance(left, Directory): raise ValueError('%s is not a Directory' % left) if isinstance(right, Directory): right = right.get_data() return self.assertCountEqual(left.entries, right['entries']) def make_contents(self, directory): for filename, content in self.contents.items(): path = os.path.join(directory, filename) with open(path, 'wb') as f: f.write(content['data']) os.chmod(path, content['mode']) def make_symlinks(self, directory): for filename, symlink in self.symlinks.items(): path = os.path.join(directory, filename) os.symlink(symlink['data'], path) def make_specials(self, directory): for filename, fn in self.specials.items(): path = os.path.join(directory, filename) fn(path) def make_from_tarball(self, directory): tarball = os.path.join(TEST_DATA, 'dir-folders', 'sample-folder.tgz') with tarfile.open(tarball, 'r:gz') as f: f.extractall(os.fsdecode(directory)) class TestContent(DataMixin, unittest.TestCase): def setUp(self): super().setUp() def test_data_to_content(self): for filename, content in self.contents.items(): conv_content = Content.from_bytes(mode=content['mode'], data=content['data']) self.assertContentEqual(conv_content, content) self.assertIn(hash_to_hex(conv_content.hash), repr(conv_content)) class SymlinkToContent(DataMixin, unittest.TestCase): def setUp(self): super().setUp() self.make_symlinks(self.tmpdir_name) def test_symlink_to_content(self): for filename, symlink in self.symlinks.items(): path = os.path.join(self.tmpdir_name, filename) perms = 0o120000 conv_content = Content.from_symlink(path=path, mode=perms) self.assertContentEqual(conv_content, symlink) class FileToContent(DataMixin, unittest.TestCase): def setUp(self): super().setUp() self.make_contents(self.tmpdir_name) self.make_symlinks(self.tmpdir_name) self.make_specials(self.tmpdir_name) def test_file_to_content(self): # Check whether loading the data works for data in [True, False]: for filename, symlink in self.symlinks.items(): path = os.path.join(self.tmpdir_name, filename) conv_content = Content.from_file(path=path, data=data) self.assertContentEqual(conv_content, symlink, check_data=data) for filename, content in self.contents.items(): path = os.path.join(self.tmpdir_name, filename) conv_content = Content.from_file(path=path, data=data) self.assertContentEqual(conv_content, content, check_data=data) for filename in self.specials: path = os.path.join(self.tmpdir_name, filename) conv_content = Content.from_file(path=path, data=data) self.assertContentEqual(conv_content, self.empty_content) def test_file_to_content_with_path(self): for filename, content in self.contents.items(): content_w_path = content.copy() path = os.path.join(self.tmpdir_name, filename) content_w_path['path'] = path conv_content = Content.from_file(path=path, save_path=True) self.assertContentEqual(conv_content, content_w_path, check_path=True) class DirectoryToObjects(DataMixin, unittest.TestCase): def setUp(self): super().setUp() contents = os.path.join(self.tmpdir_name, b'contents') os.mkdir(contents) self.make_contents(contents) symlinks = os.path.join(self.tmpdir_name, b'symlinks') os.mkdir(symlinks) self.make_symlinks(symlinks) specials = os.path.join(self.tmpdir_name, b'specials') os.mkdir(specials) self.make_specials(specials) empties = os.path.join(self.tmpdir_name, b'empty1', b'empty2') os.makedirs(empties) def test_directory_to_objects(self): directory = Directory.from_disk(path=self.tmpdir_name) for name, value in self.contents.items(): self.assertContentEqual(directory[b'contents/' + name], value) for name, value in self.symlinks.items(): self.assertContentEqual(directory[b'symlinks/' + name], value) for name in self.specials: self.assertContentEqual( directory[b'specials/' + name], self.empty_content, ) self.assertEqual( directory[b'empty1/empty2'].get_data(), self.empty_directory, ) # Raise on non existent file with self.assertRaisesRegex(KeyError, "b'nonexistent'"): directory[b'empty1/nonexistent'] # Raise on non existent directory with self.assertRaisesRegex(KeyError, "b'nonexistentdir'"): directory[b'nonexistentdir/file'] objs = directory.collect() self.assertCountEqual(['content', 'directory'], objs) self.assertEqual(len(objs['directory']), 6) self.assertEqual(len(objs['content']), len(self.contents) + len(self.symlinks) + 1) def test_directory_to_objects_ignore_empty(self): directory = Directory.from_disk( path=self.tmpdir_name, dir_filter=from_disk.ignore_empty_directories ) for name, value in self.contents.items(): self.assertContentEqual(directory[b'contents/' + name], value) for name, value in self.symlinks.items(): self.assertContentEqual(directory[b'symlinks/' + name], value) for name in self.specials: self.assertContentEqual( directory[b'specials/' + name], self.empty_content, ) # empty directories have been ignored recursively with self.assertRaisesRegex(KeyError, "b'empty1'"): directory[b'empty1'] with self.assertRaisesRegex(KeyError, "b'empty1'"): directory[b'empty1/empty2'] objs = directory.collect() self.assertCountEqual(['content', 'directory'], objs) self.assertEqual(len(objs['directory']), 4) self.assertEqual(len(objs['content']), len(self.contents) + len(self.symlinks) + 1) def test_directory_to_objects_ignore_name(self): directory = Directory.from_disk( path=self.tmpdir_name, dir_filter=from_disk.ignore_named_directories([b'symlinks']) ) for name, value in self.contents.items(): self.assertContentEqual(directory[b'contents/' + name], value) for name in self.specials: self.assertContentEqual( directory[b'specials/' + name], self.empty_content, ) self.assertEqual( directory[b'empty1/empty2'].get_data(), self.empty_directory, ) with self.assertRaisesRegex(KeyError, "b'symlinks'"): directory[b'symlinks'] objs = directory.collect() self.assertCountEqual(['content', 'directory'], objs) self.assertEqual(len(objs['directory']), 5) self.assertEqual(len(objs['content']), len(self.contents) + 1) def test_directory_to_objects_ignore_name_case(self): directory = Directory.from_disk( path=self.tmpdir_name, dir_filter=from_disk.ignore_named_directories([b'symLiNks'], case_sensitive=False) ) for name, value in self.contents.items(): self.assertContentEqual(directory[b'contents/' + name], value) for name in self.specials: self.assertContentEqual( directory[b'specials/' + name], self.empty_content, ) self.assertEqual( directory[b'empty1/empty2'].get_data(), self.empty_directory, ) with self.assertRaisesRegex(KeyError, "b'symlinks'"): directory[b'symlinks'] objs = directory.collect() self.assertCountEqual(['content', 'directory'], objs) self.assertEqual(len(objs['directory']), 5) self.assertEqual(len(objs['content']), len(self.contents) + 1) @pytest.mark.fs class TarballTest(DataMixin, unittest.TestCase): def setUp(self): super().setUp() self.make_from_tarball(self.tmpdir_name) def test_contents_match(self): directory = Directory.from_disk( path=os.path.join(self.tmpdir_name, b'sample-folder') ) for name, data in self.tarball_contents.items(): obj = directory[name] if isinstance(obj, Content): self.assertContentEqual(obj, data) elif isinstance(obj, Directory): self.assertDirectoryEqual(obj, data) else: raise self.failureException('Unknown type for %s' % obj) class DirectoryManipulation(DataMixin, unittest.TestCase): def test_directory_access_nested(self): d = Directory() d[b'a'] = Directory() d[b'a/b'] = Directory() self.assertEqual(d[b'a/b'].get_data(), self.empty_directory) def test_directory_del_nested(self): d = Directory() d[b'a'] = Directory() d[b'a/b'] = Directory() with self.assertRaisesRegex(KeyError, "b'c'"): del d[b'a/b/c'] with self.assertRaisesRegex(KeyError, "b'level2'"): del d[b'a/level2/c'] del d[b'a/b'] self.assertEqual(d[b'a'].get_data(), self.empty_directory) def test_directory_access_self(self): d = Directory() self.assertIs(d, d[b'']) self.assertIs(d, d[b'/']) self.assertIs(d, d[b'//']) def test_directory_access_wrong_type(self): d = Directory() with self.assertRaisesRegex(ValueError, 'bytes from Directory'): d['foo'] with self.assertRaisesRegex(ValueError, 'bytes from Directory'): d[42] def test_directory_repr(self): entries = [b'a', b'b', b'c'] d = Directory() for entry in entries: d[entry] = Directory() r = repr(d) self.assertIn(hash_to_hex(d.hash), r) for entry in entries: self.assertIn(str(entry), r) def test_directory_set_wrong_type_name(self): d = Directory() with self.assertRaisesRegex(ValueError, 'bytes Directory entry'): d['foo'] = Directory() with self.assertRaisesRegex(ValueError, 'bytes Directory entry'): d[42] = Directory() def test_directory_set_nul_in_name(self): d = Directory() with self.assertRaisesRegex(ValueError, 'nul bytes'): d[b'\x00\x01'] = Directory() def test_directory_set_empty_name(self): d = Directory() with self.assertRaisesRegex(ValueError, 'must have a name'): d[b''] = Directory() with self.assertRaisesRegex(ValueError, 'must have a name'): d[b'/'] = Directory() def test_directory_set_wrong_type(self): d = Directory() with self.assertRaisesRegex(ValueError, 'Content or Directory'): d[b'entry'] = object() def test_directory_del_wrong_type(self): d = Directory() with self.assertRaisesRegex(ValueError, 'bytes Directory entry'): del d['foo'] with self.assertRaisesRegex(ValueError, 'bytes Directory entry'): del d[42] diff --git a/tox.ini b/tox.ini index 0fb07c6..39200ed 100644 --- a/tox.ini +++ b/tox.ini @@ -1,16 +1,25 @@ [tox] -envlist=flake8,py3 +envlist=flake8,py3,mypy [testenv:py3] deps = .[testing] pytest-cov commands = pytest --cov=swh --cov-branch {posargs} [testenv:flake8] skip_install = true deps = flake8 commands = {envpython} -m flake8 + +[testenv:mypy] +skip_install = true +deps = + .[testing] + mypy + django-stubs +commands = + mypy swh