diff --git a/swh/model/cli.py b/swh/model/cli.py index 091f8ab..52f98aa 100644 --- a/swh/model/cli.py +++ b/swh/model/cli.py @@ -1,293 +1,335 @@ # Copyright (C) 2018-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import os import sys from typing import Dict, List, Optional # WARNING: do not import unnecessary things here to keep cli startup time under # control try: import click except ImportError: print( "Cannot run swh-identify; the Click package is not installed." "Please install 'swh.model[cli]' for full functionality.", file=sys.stderr, ) exit(1) try: from swh.core.cli import swh as swh_cli_group except ImportError: # stub so that swh-identify can be used when swh-core isn't installed swh_cli_group = click # type: ignore +from swh.model.from_disk import Directory from swh.model.identifiers import CoreSWHID, ObjectType CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"]) # Mapping between dulwich types and Software Heritage ones. Used by snapshot ID # computation. _DULWICH_TYPES = { b"blob": "content", b"tree": "directory", b"commit": "revision", b"tag": "release", } class CoreSWHIDParamType(click.ParamType): """Click argument that accepts a core SWHID and returns them as :class:`swh.model.identifiers.CoreSWHID` instances """ name = "SWHID" def convert(self, value, param, ctx) -> CoreSWHID: from swh.model.exceptions import ValidationError try: return CoreSWHID.from_string(value) except ValidationError as e: self.fail(f'"{value}" is not a valid core SWHID: {e}', param, ctx) def swhid_of_file(path) -> CoreSWHID: from swh.model.from_disk import Content from swh.model.hashutil import hash_to_bytes object = Content.from_file(path=path).get_data() return CoreSWHID( object_type=ObjectType.CONTENT, object_id=hash_to_bytes(object["sha1_git"]) ) def swhid_of_file_content(data) -> CoreSWHID: from swh.model.from_disk import Content from swh.model.hashutil import hash_to_bytes object = Content.from_bytes(mode=644, data=data).get_data() return CoreSWHID( object_type=ObjectType.CONTENT, object_id=hash_to_bytes(object["sha1_git"]) ) -def swhid_of_dir(path: bytes, exclude_patterns: List[bytes] = None) -> CoreSWHID: - from swh.model.from_disk import ( - Directory, - accept_all_directories, - ignore_directories_patterns, - ) - from swh.model.hashutil import hash_to_bytes +def model_of_dir(path: bytes, exclude_patterns: List[bytes] = None) -> Directory: + from swh.model.from_disk import accept_all_directories, ignore_directories_patterns dir_filter = ( ignore_directories_patterns(path, exclude_patterns) if exclude_patterns else accept_all_directories ) - object = Directory.from_disk(path=path, dir_filter=dir_filter).get_data() + return Directory.from_disk(path=path, dir_filter=dir_filter) + + +def swhid_of_dir(path: bytes, exclude_patterns: List[bytes] = None) -> CoreSWHID: + from swh.model.hashutil import hash_to_bytes + + obj = model_of_dir(path, exclude_patterns) + return CoreSWHID( - object_type=ObjectType.DIRECTORY, object_id=hash_to_bytes(object["id"]) + object_type=ObjectType.DIRECTORY, object_id=hash_to_bytes(obj.get_data()["id"]) ) def swhid_of_origin(url): from swh.model.hashutil import hash_to_bytes from swh.model.identifiers import ( ExtendedObjectType, ExtendedSWHID, origin_identifier, ) return ExtendedSWHID( object_type=ExtendedObjectType.ORIGIN, object_id=hash_to_bytes(origin_identifier({"url": url})), ) def swhid_of_git_repo(path) -> CoreSWHID: try: import dulwich.repo except ImportError: raise click.ClickException( "Cannot compute snapshot identifier; the Dulwich package is not installed. " "Please install 'swh.model[cli]' for full functionality.", ) from swh.model import hashutil from swh.model.identifiers import snapshot_identifier repo = dulwich.repo.Repo(path) branches: Dict[bytes, Optional[Dict]] = {} for ref, target in repo.refs.as_dict().items(): obj = repo[target] if obj: branches[ref] = { "target": hashutil.bytehex_to_hash(target), "target_type": _DULWICH_TYPES[obj.type_name], } else: branches[ref] = None for ref, target in repo.refs.get_symrefs().items(): branches[ref] = { "target": target, "target_type": "alias", } snapshot = {"branches": branches} return CoreSWHID( object_type=ObjectType.SNAPSHOT, object_id=hashutil.hash_to_bytes(snapshot_identifier(snapshot)), ) def identify_object(obj_type, follow_symlinks, exclude_patterns, obj) -> str: from urllib.parse import urlparse if obj_type == "auto": if obj == "-" or os.path.isfile(obj): obj_type = "content" elif os.path.isdir(obj): obj_type = "directory" else: try: # URL parsing if urlparse(obj).scheme: obj_type = "origin" else: raise ValueError except ValueError: raise click.BadParameter("cannot detect object type for %s" % obj) if obj == "-": content = sys.stdin.buffer.read() swhid = str(swhid_of_file_content(content)) elif obj_type in ["content", "directory"]: path = obj.encode(sys.getfilesystemencoding()) if follow_symlinks and os.path.islink(obj): path = os.path.realpath(obj) if obj_type == "content": swhid = str(swhid_of_file(path)) elif obj_type == "directory": swhid = str( swhid_of_dir(path, [pattern.encode() for pattern in exclude_patterns]) ) elif obj_type == "origin": swhid = str(swhid_of_origin(obj)) elif obj_type == "snapshot": swhid = str(swhid_of_git_repo(obj)) else: # shouldn't happen, due to option validation raise click.BadParameter("invalid object type: " + obj_type) # note: we return original obj instead of path here, to preserve user-given # file name in output return swhid @swh_cli_group.command(context_settings=CONTEXT_SETTINGS) @click.option( "--dereference/--no-dereference", "follow_symlinks", default=True, help="follow (or not) symlinks for OBJECTS passed as arguments " + "(default: follow)", ) @click.option( "--filename/--no-filename", "show_filename", default=True, help="show/hide file name (default: show)", ) @click.option( "--type", "-t", "obj_type", default="auto", type=click.Choice(["auto", "content", "directory", "origin", "snapshot"]), help="type of object to identify (default: auto)", ) @click.option( "--exclude", "-x", "exclude_patterns", metavar="PATTERN", multiple=True, help="Exclude directories using glob patterns \ (e.g., ``*.git`` to exclude all .git directories)", ) @click.option( "--verify", "-v", metavar="SWHID", type=CoreSWHIDParamType(), help="reference identifier to be compared with computed one", ) +@click.option( + "-r", "--recursive", is_flag=True, help="compute SWHID recursively", +) @click.argument("objects", nargs=-1, required=True) def identify( - obj_type, verify, show_filename, follow_symlinks, objects, exclude_patterns, + obj_type, + verify, + show_filename, + follow_symlinks, + objects, + exclude_patterns, + recursive, ): """Compute the Software Heritage persistent identifier (SWHID) for the given source code object(s). For more details about SWHIDs see: \b https://docs.softwareheritage.org/devel/swh-model/persistent-identifiers.html Tip: you can pass "-" to identify the content of standard input. \b Examples:: \b $ swh identify fork.c kmod.c sched/deadline.c swh:1:cnt:2e391c754ae730bd2d8520c2ab497c403220c6e3 fork.c swh:1:cnt:0277d1216f80ae1adeed84a686ed34c9b2931fc2 kmod.c swh:1:cnt:57b939c81bce5d06fa587df8915f05affbe22b82 sched/deadline.c \b $ swh identify --no-filename /usr/src/linux/kernel/ swh:1:dir:f9f858a48d663b3809c9e2f336412717496202ab \b $ git clone --mirror https://forge.softwareheritage.org/source/helloworld.git $ swh identify --type snapshot helloworld.git/ swh:1:snp:510aa88bdc517345d258c1fc2babcd0e1f905e93 helloworld.git """ # NoQA # overlong lines in shell examples are fine from functools import partial + import logging if verify and len(objects) != 1: raise click.BadParameter("verification requires a single object") - results = zip( - objects, - map( - partial(identify_object, obj_type, follow_symlinks, exclude_patterns), + if recursive and not os.path.isdir(objects[0]): + recursive = False + logging.warn("recursive option disabled, input is not a directory object") + + if recursive: + if verify: + raise click.BadParameter( + "verification of recursive object identification is not supported" + ) + + if not obj_type == ("auto" or "directory"): + raise click.BadParameter( + "recursive identification is supported only for directories" + ) + + path = os.fsencode(objects[0]) + dir_obj = model_of_dir(path, exclude_patterns) + for sub_obj in dir_obj.iter_tree(): + path_name = "path" if "path" in sub_obj.data.keys() else "data" + path = os.fsdecode(sub_obj.data[path_name]) + swhid = str( + CoreSWHID( + object_type=ObjectType[sub_obj.object_type.upper()], + object_id=sub_obj.hash, + ) + ) + msg = f"{swhid}\t{path}" if show_filename else f"{swhid}" + click.echo(msg) + else: + results = zip( objects, - ), - ) + map( + partial(identify_object, obj_type, follow_symlinks, exclude_patterns), + objects, + ), + ) - if verify: - swhid = next(results)[1] - if str(verify) == swhid: - click.echo("SWHID match: %s" % swhid) - sys.exit(0) + if verify: + swhid = next(results)[1] + if str(verify) == swhid: + click.echo("SWHID match: %s" % swhid) + sys.exit(0) + else: + click.echo("SWHID mismatch: %s != %s" % (verify, swhid)) + sys.exit(1) else: - click.echo("SWHID mismatch: %s != %s" % (verify, swhid)) - sys.exit(1) - else: - for (obj, swhid) in results: - msg = swhid - if show_filename: - msg = "%s\t%s" % (swhid, os.fsdecode(obj)) - click.echo(msg) + for (obj, swhid) in results: + msg = swhid + if show_filename: + msg = "%s\t%s" % (swhid, os.fsdecode(obj)) + click.echo(msg) if __name__ == "__main__": identify() diff --git a/swh/model/from_disk.py b/swh/model/from_disk.py index ce17079..78dc174 100644 --- a/swh/model/from_disk.py +++ b/swh/model/from_disk.py @@ -1,540 +1,540 @@ # Copyright (C) 2017-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime import enum import fnmatch import glob import os import re import stat from typing import Any, Iterable, Iterator, List, Optional, Pattern, Tuple import attr from attrs_strict import type_validator from typing_extensions import Final from . import model from .exceptions import InvalidDirectoryPath from .hashutil import MultiHash from .identifiers import directory_entry_sort_key, directory_identifier from .identifiers import identifier_to_bytes as id_to_bytes from .identifiers import identifier_to_str as id_to_str from .merkle import MerkleLeaf, MerkleNode @attr.s(frozen=True, slots=True) class DiskBackedContent(model.BaseContent): """Content-like class, which allows lazy-loading data from the disk.""" object_type: Final = "content_file" sha1 = attr.ib(type=bytes, validator=type_validator()) sha1_git = attr.ib(type=model.Sha1Git, validator=type_validator()) sha256 = attr.ib(type=bytes, validator=type_validator()) blake2s256 = attr.ib(type=bytes, validator=type_validator()) length = attr.ib(type=int, validator=type_validator()) status = attr.ib( type=str, validator=attr.validators.in_(["visible", "hidden"]), default="visible", ) ctime = attr.ib( type=Optional[datetime.datetime], validator=type_validator(), default=None, eq=False, ) path = attr.ib(type=Optional[bytes], default=None) @classmethod def from_dict(cls, d): return cls(**d) def __attrs_post_init__(self): if self.path is None: raise TypeError("path must not be None.") def with_data(self) -> model.Content: args = self.to_dict() del args["path"] assert self.path is not None with open(self.path, "rb") as fd: return model.Content.from_dict({**args, "data": fd.read()}) class DentryPerms(enum.IntEnum): """Admissible permissions for directory entries.""" content = 0o100644 """Content""" executable_content = 0o100755 """Executable content (e.g. executable script)""" symlink = 0o120000 """Symbolic link""" directory = 0o040000 """Directory""" revision = 0o160000 """Revision (e.g. submodule)""" def mode_to_perms(mode): """Convert a file mode to a permission compatible with Software Heritage directory entries Args: mode (int): a file mode as returned by :func:`os.stat` in :attr:`os.stat_result.st_mode` Returns: DentryPerms: one of the following values: :const:`DentryPerms.content`: plain file :const:`DentryPerms.executable_content`: executable file :const:`DentryPerms.symlink`: symbolic link :const:`DentryPerms.directory`: directory """ if stat.S_ISLNK(mode): return DentryPerms.symlink if stat.S_ISDIR(mode): return DentryPerms.directory else: # file is executable in any way if mode & (0o111): return DentryPerms.executable_content else: return DentryPerms.content class Content(MerkleLeaf): """Representation of a Software Heritage content as a node in a Merkle tree. The current Merkle hash for the Content nodes is the `sha1_git`, which makes it consistent with what :class:`Directory` uses for its own hash computation. """ __slots__ = [] # type: List[str] object_type: Final = "content" @classmethod def from_bytes(cls, *, mode, data): """Convert data (raw :class:`bytes`) to a Software Heritage content entry Args: mode (int): a file mode (passed to :func:`mode_to_perms`) data (bytes): raw contents of the file """ ret = MultiHash.from_data(data).digest() ret["length"] = len(data) ret["perms"] = mode_to_perms(mode) ret["data"] = data ret["status"] = "visible" return cls(ret) @classmethod def from_symlink(cls, *, path, mode): """Convert a symbolic link to a Software Heritage content entry""" return cls.from_bytes(mode=mode, data=os.readlink(path)) @classmethod def from_file(cls, *, path, max_content_length=None): """Compute the Software Heritage content entry corresponding to an on-disk file. The returned dictionary contains keys useful for both: - loading the content in the archive (hashes, `length`) - using the content as a directory entry in a directory Args: save_path (bool): add the file path to the entry max_content_length (Optional[int]): if given, all contents larger than this will be skipped. """ file_stat = os.lstat(path) mode = file_stat.st_mode length = file_stat.st_size too_large = max_content_length is not None and length > max_content_length if stat.S_ISLNK(mode): # Symbolic link: return a file whose contents are the link target if too_large: # Unlike large contents, we can't stream symlinks to # MultiHash, and we don't want to fit them in memory if # they exceed max_content_length either. # Thankfully, this should not happen for reasonable values of # max_content_length because of OS/filesystem limitations, # so let's just raise an error. raise Exception(f"Symlink too large ({length} bytes)") return cls.from_symlink(path=path, mode=mode) elif not stat.S_ISREG(mode): # not a regular file: return the empty file instead return cls.from_bytes(mode=mode, data=b"") if too_large: skip_reason = "Content too large" else: skip_reason = None hashes = MultiHash.from_path(path).digest() if skip_reason: ret = { **hashes, "status": "absent", "reason": skip_reason, } else: ret = { **hashes, "status": "visible", } ret["path"] = path ret["perms"] = mode_to_perms(mode) ret["length"] = length obj = cls(ret) return obj def __repr__(self): return "Content(id=%s)" % id_to_str(self.hash) def compute_hash(self): return self.data["sha1_git"] def to_model(self) -> model.BaseContent: """Builds a `model.BaseContent` object based on this leaf.""" data = self.get_data().copy() data.pop("perms", None) if data["status"] == "absent": data.pop("path", None) return model.SkippedContent.from_dict(data) elif "data" in data: return model.Content.from_dict(data) else: return DiskBackedContent.from_dict(data) def accept_all_directories(dirpath: str, dirname: str, entries: Iterable[Any]) -> bool: """Default filter for :func:`Directory.from_disk` accepting all directories Args: dirname (bytes): directory name entries (list): directory entries """ return True def ignore_empty_directories( dirpath: str, dirname: str, entries: Iterable[Any] ) -> bool: """Filter for :func:`directory_to_objects` ignoring empty directories Args: dirname (bytes): directory name entries (list): directory entries Returns: True if the directory is not empty, false if the directory is empty """ return bool(entries) def ignore_named_directories(names, *, case_sensitive=True): """Filter for :func:`directory_to_objects` to ignore directories named one of names. Args: names (list of bytes): names to ignore case_sensitive (bool): whether to do the filtering in a case sensitive way Returns: a directory filter for :func:`directory_to_objects` """ if not case_sensitive: names = [name.lower() for name in names] def named_filter( dirpath: str, dirname: str, entries: Iterable[Any], names: Iterable[Any] = names, case_sensitive: bool = case_sensitive, ): if case_sensitive: return dirname not in names else: return dirname.lower() not in names return named_filter # TODO: `extract_regex_objs` has been copied and adapted from `swh.scanner`. # In the future `swh.scanner` should use the `swh.model` version and remove its own. def extract_regex_objs( root_path: bytes, patterns: Iterable[bytes] ) -> Iterator[Pattern[bytes]]: """Generates a regex object for each pattern given in input and checks if the path is a subdirectory or relative to the root path. Args: root_path (bytes): path to the root directory patterns (list of byte): patterns to match Yields: an SRE_Pattern object """ absolute_root_path = os.path.abspath(root_path) for pattern in patterns: for path in glob.glob(pattern): absolute_path = os.path.abspath(path) if not absolute_path.startswith(absolute_root_path): error_msg = ( b'The path "' + path + b'" is not a subdirectory or relative ' b'to the root directory path: "' + root_path + b'"' ) raise InvalidDirectoryPath(error_msg) regex = fnmatch.translate((pattern.decode())) yield re.compile(regex.encode()) def ignore_directories_patterns(root_path: bytes, patterns: Iterable[bytes]): """Filter for :func:`directory_to_objects` to ignore directories matching certain patterns. Args: root_path (bytes): path of the root directory patterns (list of byte): patterns to ignore Returns: a directory filter for :func:`directory_to_objects` """ sre_patterns = set(extract_regex_objs(root_path, patterns)) def pattern_filter( dirpath: bytes, dirname: bytes, entries: Iterable[Any], patterns: Iterable[Any] = sre_patterns, root_path: bytes = os.path.abspath(root_path), ): full_path = os.path.abspath(dirpath) relative_path = os.path.relpath(full_path, root_path) return not any([pattern.match(relative_path) for pattern in patterns]) return pattern_filter def iter_directory( directory, ) -> Tuple[List[model.Content], List[model.SkippedContent], List[model.Directory]]: """Return the directory listing from a disk-memory directory instance. Raises: TypeError in case an unexpected object type is listed. Returns: Tuple of respectively iterable of content, skipped content and directories. """ contents: List[model.Content] = [] skipped_contents: List[model.SkippedContent] = [] directories: List[model.Directory] = [] for obj in directory.iter_tree(): obj = obj.to_model() obj_type = obj.object_type if obj_type in (model.Content.object_type, DiskBackedContent.object_type): # FIXME: read the data from disk later (when the # storage buffer is flushed). obj = obj.with_data() contents.append(obj) elif obj_type == model.SkippedContent.object_type: skipped_contents.append(obj) elif obj_type == model.Directory.object_type: directories.append(obj) else: raise TypeError(f"Unexpected object type from disk: {obj}") return contents, skipped_contents, directories class Directory(MerkleNode): """Representation of a Software Heritage directory as a node in a Merkle Tree. This class can be used to generate, from an on-disk directory, all the objects that need to be sent to the Software Heritage archive. The :func:`from_disk` constructor allows you to generate the data structure from a directory on disk. The resulting :class:`Directory` can then be manipulated as a dictionary, using the path as key. The :func:`collect` method is used to retrieve all the objects that need to be added to the Software Heritage archive since the last collection, by class (contents and directories). When using the dict-like methods to update the contents of the directory, the affected levels of hierarchy are reset and can be collected again using the same method. This enables the efficient collection of updated nodes, for instance when the client is applying diffs. """ __slots__ = ["__entries"] object_type: Final = "directory" @classmethod def from_disk( cls, *, path, dir_filter=accept_all_directories, max_content_length=None ): """Compute the Software Heritage objects for a given directory tree Args: path (bytes): the directory to traverse data (bool): whether to add the data to the content objects save_path (bool): whether to add the path to the content objects dir_filter (function): a filter to ignore some directories by name or contents. Takes two arguments: dirname and entries, and returns True if the directory should be added, False if the directory should be ignored. max_content_length (Optional[int]): if given, all contents larger than this will be skipped. """ top_path = path dirs = {} for root, dentries, fentries in os.walk(top_path, topdown=False): entries = {} # Join fentries and dentries in the same processing, as symbolic # links to directories appear in dentries... for name in fentries + dentries: path = os.path.join(root, name) if not os.path.isdir(path) or os.path.islink(path): content = Content.from_file( path=path, max_content_length=max_content_length ) entries[name] = content else: if dir_filter(path, name, dirs[path].entries): entries[name] = dirs[path] - dirs[root] = cls({"name": os.path.basename(root)}) + dirs[root] = cls({"name": os.path.basename(root), "path": root}) dirs[root].update(entries) return dirs[top_path] def __init__(self, data=None): super().__init__(data=data) self.__entries = None def invalidate_hash(self): self.__entries = None super().invalidate_hash() @staticmethod def child_to_directory_entry(name, child): if child.object_type == "directory": return { "type": "dir", "perms": DentryPerms.directory, "target": child.hash, "name": name, } elif child.object_type == "content": return { "type": "file", "perms": child.data["perms"], "target": child.hash, "name": name, } else: raise ValueError(f"unknown child {child}") def get_data(self, **kwargs): return { "id": self.hash, "entries": self.entries, } @property def entries(self): """Child nodes, sorted by name in the same way `directory_identifier` does.""" if self.__entries is None: self.__entries = sorted( ( self.child_to_directory_entry(name, child) for name, child in self.items() ), key=directory_entry_sort_key, ) return self.__entries def compute_hash(self): return id_to_bytes(directory_identifier({"entries": self.entries})) def to_model(self) -> model.Directory: """Builds a `model.Directory` object based on this node; ignoring its children.""" return model.Directory.from_dict(self.get_data()) def __getitem__(self, key): if not isinstance(key, bytes): raise ValueError("Can only get a bytes from Directory") # Convenience shortcut if key == b"": return self if b"/" not in key: return super().__getitem__(key) else: key1, key2 = key.split(b"/", 1) return self.__getitem__(key1)[key2] def __setitem__(self, key, value): if not isinstance(key, bytes): raise ValueError("Can only set a bytes Directory entry") if not isinstance(value, (Content, Directory)): raise ValueError( "Can only set a Directory entry to a Content or " "Directory" ) if key == b"": raise ValueError("Directory entry must have a name") if b"\x00" in key: raise ValueError("Directory entry name must not contain nul bytes") if b"/" not in key: return super().__setitem__(key, value) else: key1, key2 = key.rsplit(b"/", 1) self[key1].__setitem__(key2, value) def __delitem__(self, key): if not isinstance(key, bytes): raise ValueError("Can only delete a bytes Directory entry") if b"/" not in key: super().__delitem__(key) else: key1, key2 = key.rsplit(b"/", 1) del self[key1][key2] def __repr__(self): return "Directory(id=%s, entries=[%s])" % ( id_to_str(self.hash), ", ".join(str(entry) for entry in self), ) diff --git a/swh/model/tests/swh_model_data.py b/swh/model/tests/swh_model_data.py index 732cff5..8db2a28 100644 --- a/swh/model/tests/swh_model_data.py +++ b/swh/model/tests/swh_model_data.py @@ -1,363 +1,379 @@ # Copyright (C) 2019-2021 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime from typing import Dict, Sequence import attr from swh.model.hashutil import MultiHash, hash_to_bytes from swh.model.identifiers import ExtendedSWHID from swh.model.model import ( BaseModel, Content, Directory, DirectoryEntry, ExtID, MetadataAuthority, MetadataAuthorityType, MetadataFetcher, ObjectType, Origin, OriginVisit, OriginVisitStatus, Person, RawExtrinsicMetadata, Release, Revision, RevisionType, SkippedContent, Snapshot, SnapshotBranch, TargetType, Timestamp, TimestampWithTimezone, ) UTC = datetime.timezone.utc CONTENTS = [ Content( length=4, data=f"foo{i}".encode(), status="visible", **MultiHash.from_data(f"foo{i}".encode()).digest(), ) for i in range(10) ] + [ Content( length=14, data=f"forbidden foo{i}".encode(), status="hidden", **MultiHash.from_data(f"forbidden foo{i}".encode()).digest(), ) for i in range(10) ] SKIPPED_CONTENTS = [ SkippedContent( length=4, status="absent", reason=f"because chr({i}) != '*'", **MultiHash.from_data(f"bar{i}".encode()).digest(), ) for i in range(2) ] duplicate_content1 = Content( length=4, sha1=hash_to_bytes("44973274ccef6ab4dfaaf86599792fa9c3fe4689"), sha1_git=b"another-foo", blake2s256=b"another-bar", sha256=b"another-baz", status="visible", ) # Craft a sha1 collision sha1_array = bytearray(duplicate_content1.sha1_git) sha1_array[0] += 1 duplicate_content2 = attr.evolve(duplicate_content1, sha1_git=bytes(sha1_array)) DUPLICATE_CONTENTS = [duplicate_content1, duplicate_content2] COMMITTERS = [ Person(fullname=b"foo", name=b"foo", email=b""), Person(fullname=b"bar", name=b"bar", email=b""), ] DATES = [ TimestampWithTimezone( timestamp=Timestamp(seconds=1234567891, microseconds=0,), offset=120, negative_utc=False, ), TimestampWithTimezone( timestamp=Timestamp(seconds=1234567892, microseconds=0,), offset=120, negative_utc=False, ), ] REVISIONS = [ Revision( id=hash_to_bytes("66c7c1cd9673275037140f2abff7b7b11fc9439c"), message=b"hello", date=DATES[0], committer=COMMITTERS[0], author=COMMITTERS[0], committer_date=DATES[0], type=RevisionType.GIT, directory=b"\x01" * 20, synthetic=False, metadata=None, parents=( hash_to_bytes("9b918dd063cec85c2bc63cc7f167e29f5894dcbc"), hash_to_bytes("757f38bdcd8473aaa12df55357f5e2f1a318e672"), ), ), Revision( id=hash_to_bytes("c7f96242d73c267adc77c2908e64e0c1cb6a4431"), message=b"hello again", date=DATES[1], committer=COMMITTERS[1], author=COMMITTERS[1], committer_date=DATES[1], type=RevisionType.MERCURIAL, directory=b"\x02" * 20, synthetic=False, metadata=None, parents=(), extra_headers=((b"foo", b"bar"),), ), ] EXTIDS = [ ExtID(extid_type="git256", extid=b"\x03" * 32, target=REVISIONS[0].swhid(),), ExtID(extid_type="hg", extid=b"\x04" * 20, target=REVISIONS[1].swhid(),), ] RELEASES = [ Release( id=hash_to_bytes("8059dc4e17fcd0e51ca3bcd6b80f4577d281fd08"), name=b"v0.0.1", date=TimestampWithTimezone( timestamp=Timestamp(seconds=1234567890, microseconds=0,), offset=120, negative_utc=False, ), author=COMMITTERS[0], target_type=ObjectType.REVISION, target=b"\x04" * 20, message=b"foo", synthetic=False, ), Release( id=hash_to_bytes("ee4d20e80af850cc0f417d25dc5073792c5010d2"), name=b"this-is-a/tag/1.0", date=None, author=None, target_type=ObjectType.DIRECTORY, target=b"\x05" * 20, message=b"bar", synthetic=False, ), ] ORIGINS = [ Origin(url="https://somewhere.org/den/fox",), Origin(url="https://overtherainbow.org/fox/den",), ] ORIGIN_VISITS = [ OriginVisit( origin=ORIGINS[0].url, date=datetime.datetime(2013, 5, 7, 4, 20, 39, 369271, tzinfo=UTC), visit=1, type="git", ), OriginVisit( origin=ORIGINS[1].url, date=datetime.datetime(2014, 11, 27, 17, 20, 39, tzinfo=UTC), visit=1, type="hg", ), OriginVisit( origin=ORIGINS[0].url, date=datetime.datetime(2018, 11, 27, 17, 20, 39, tzinfo=UTC), visit=2, type="git", ), OriginVisit( origin=ORIGINS[0].url, date=datetime.datetime(2018, 11, 27, 17, 20, 39, tzinfo=UTC), visit=3, type="git", ), OriginVisit( origin=ORIGINS[1].url, date=datetime.datetime(2015, 11, 27, 17, 20, 39, tzinfo=UTC), visit=2, type="hg", ), ] # The origin-visit-status dates needs to be shifted slightly in the future from their # visit dates counterpart. Otherwise, we are hitting storage-wise the "on conflict" # ignore policy (because origin-visit-add creates an origin-visit-status with the same # parameters from the origin-visit {origin, visit, date}... ORIGIN_VISIT_STATUSES = [ OriginVisitStatus( origin=ORIGINS[0].url, date=datetime.datetime(2013, 5, 7, 4, 20, 39, 432222, tzinfo=UTC), visit=1, type="git", status="ongoing", snapshot=None, metadata=None, ), OriginVisitStatus( origin=ORIGINS[1].url, date=datetime.datetime(2014, 11, 27, 17, 21, 12, tzinfo=UTC), visit=1, type="hg", status="ongoing", snapshot=None, metadata=None, ), OriginVisitStatus( origin=ORIGINS[0].url, date=datetime.datetime(2018, 11, 27, 17, 20, 59, tzinfo=UTC), visit=2, type="git", status="ongoing", snapshot=None, metadata=None, ), OriginVisitStatus( origin=ORIGINS[0].url, date=datetime.datetime(2018, 11, 27, 17, 20, 49, tzinfo=UTC), visit=3, type="git", status="full", snapshot=hash_to_bytes("9e78d7105c5e0f886487511e2a92377b4ee4c32a"), metadata=None, ), OriginVisitStatus( origin=ORIGINS[1].url, date=datetime.datetime(2015, 11, 27, 17, 22, 18, tzinfo=UTC), visit=2, type="hg", status="partial", snapshot=hash_to_bytes("0e7f84ede9a254f2cd55649ad5240783f557e65f"), metadata=None, ), ] DIRECTORIES = [ Directory(id=hash_to_bytes("4b825dc642cb6eb9a060e54bf8d69288fbee4904"), entries=()), Directory( id=hash_to_bytes("87b339104f7dc2a8163dec988445e3987995545f"), entries=( DirectoryEntry( name=b"file1.ext", perms=0o644, type="file", target=CONTENTS[0].sha1_git, ), DirectoryEntry( name=b"dir1", perms=0o755, type="dir", target=hash_to_bytes("4b825dc642cb6eb9a060e54bf8d69288fbee4904"), ), DirectoryEntry( name=b"subprepo1", perms=0o160000, type="rev", target=REVISIONS[1].id, ), ), ), ] SNAPSHOTS = [ Snapshot( id=hash_to_bytes("9e78d7105c5e0f886487511e2a92377b4ee4c32a"), branches={ b"master": SnapshotBranch( target_type=TargetType.REVISION, target=REVISIONS[0].id ) }, ), Snapshot( id=hash_to_bytes("0e7f84ede9a254f2cd55649ad5240783f557e65f"), branches={ b"target/revision": SnapshotBranch( target_type=TargetType.REVISION, target=REVISIONS[0].id, ), b"target/alias": SnapshotBranch( target_type=TargetType.ALIAS, target=b"target/revision" ), b"target/directory": SnapshotBranch( target_type=TargetType.DIRECTORY, target=DIRECTORIES[0].id, ), b"target/release": SnapshotBranch( target_type=TargetType.RELEASE, target=RELEASES[0].id ), b"target/snapshot": SnapshotBranch( target_type=TargetType.SNAPSHOT, target=hash_to_bytes("9e78d7105c5e0f886487511e2a92377b4ee4c32a"), ), }, ), ] METADATA_AUTHORITIES = [ MetadataAuthority( type=MetadataAuthorityType.FORGE, url="http://example.org/", metadata={}, ), ] METADATA_FETCHERS = [ MetadataFetcher(name="test-fetcher", version="1.0.0", metadata={},) ] RAW_EXTRINSIC_METADATA = [ RawExtrinsicMetadata( target=Origin("http://example.org/foo.git").swhid(), discovery_date=datetime.datetime(2020, 7, 30, 17, 8, 20, tzinfo=UTC), authority=attr.evolve(METADATA_AUTHORITIES[0], metadata=None), fetcher=attr.evolve(METADATA_FETCHERS[0], metadata=None), format="json", metadata=b'{"foo": "bar"}', ), RawExtrinsicMetadata( target=ExtendedSWHID.from_string(str(CONTENTS[0].swhid())), discovery_date=datetime.datetime(2020, 7, 30, 17, 8, 20, tzinfo=UTC), authority=attr.evolve(METADATA_AUTHORITIES[0], metadata=None), fetcher=attr.evolve(METADATA_FETCHERS[0], metadata=None), format="json", metadata=b'{"foo": "bar"}', ), ] TEST_OBJECTS: Dict[str, Sequence[BaseModel]] = { "content": CONTENTS, "directory": DIRECTORIES, "extid": EXTIDS, "metadata_authority": METADATA_AUTHORITIES, "metadata_fetcher": METADATA_FETCHERS, "origin": ORIGINS, "origin_visit": ORIGIN_VISITS, "origin_visit_status": ORIGIN_VISIT_STATUSES, "raw_extrinsic_metadata": RAW_EXTRINSIC_METADATA, "release": RELEASES, "revision": REVISIONS, "snapshot": SNAPSHOTS, "skipped_content": SKIPPED_CONTENTS, } + +SAMPLE_FOLDER_SWHIDS = [ + "swh:1:dir:e8b0f1466af8608c8a3fb9879db172b887e80759", + "swh:1:cnt:7d5c08111e21c8a9f71540939998551683375fad", + "swh:1:cnt:68769579c3eaadbe555379b9c3538e6628bae1eb", + "swh:1:cnt:e86b45e538d9b6888c969c89fbd22a85aa0e0366", + "swh:1:dir:3c1f578394f4623f74a0ba7fe761729f59fc6ec4", + "swh:1:dir:c3020f6bf135a38c6df3afeb5fb38232c5e07087", + "swh:1:cnt:133693b125bad2b4ac318535b84901ebb1f6b638", + "swh:1:dir:4b825dc642cb6eb9a060e54bf8d69288fbee4904", + "swh:1:cnt:19102815663d23f8b75a47e7a01965dcdc96468c", + "swh:1:dir:2b41c40f0d1fbffcba12497db71fba83fcca96e5", + "swh:1:cnt:8185dfb2c0c2c597d16f75a8a0c37668567c3d7e", + "swh:1:cnt:7c4c57ba9ff496ad179b8f65b1d286edbda34c9a", + "swh:1:cnt:acac326ddd63b0bc70840659d4ac43619484e69f", +] diff --git a/swh/model/tests/test_cli.py b/swh/model/tests/test_cli.py index de0de48..eeb5a63 100644 --- a/swh/model/tests/test_cli.py +++ b/swh/model/tests/test_cli.py @@ -1,179 +1,211 @@ # Copyright (C) 2018-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import os import sys import tarfile import tempfile import unittest import unittest.mock from click.testing import CliRunner import pytest from swh.model import cli from swh.model.hashutil import hash_to_hex +from swh.model.tests.swh_model_data import SAMPLE_FOLDER_SWHIDS from swh.model.tests.test_from_disk import DataMixin @pytest.mark.fs class TestIdentify(DataMixin, unittest.TestCase): def setUp(self): super().setUp() self.runner = CliRunner() def assertSWHID(self, result, swhid): self.assertEqual(result.exit_code, 0, result.output) self.assertEqual(result.output.split()[0], swhid) def test_no_args(self): result = self.runner.invoke(cli.identify) self.assertNotEqual(result.exit_code, 0) def test_content_id(self): """identify file content""" self.make_contents(self.tmpdir_name) for filename, content in self.contents.items(): path = os.path.join(self.tmpdir_name, filename) result = self.runner.invoke(cli.identify, ["--type", "content", path]) self.assertSWHID(result, "swh:1:cnt:" + hash_to_hex(content["sha1_git"])) def test_content_id_from_stdin(self): """identify file content""" self.make_contents(self.tmpdir_name) for _, content in self.contents.items(): result = self.runner.invoke(cli.identify, ["-"], input=content["data"]) self.assertSWHID(result, "swh:1:cnt:" + hash_to_hex(content["sha1_git"])) def test_directory_id(self): """identify an entire directory""" self.make_from_tarball(self.tmpdir_name) path = os.path.join(self.tmpdir_name, b"sample-folder") result = self.runner.invoke(cli.identify, ["--type", "directory", path]) self.assertSWHID(result, "swh:1:dir:e8b0f1466af8608c8a3fb9879db172b887e80759") @pytest.mark.requires_optional_deps def test_snapshot_id(self): """identify a snapshot""" tarball = os.path.join( os.path.dirname(__file__), "data", "repos", "sample-repo.tgz" ) with tempfile.TemporaryDirectory(prefix="swh.model.cli") as d: with tarfile.open(tarball, "r:gz") as t: t.extractall(d) repo_dir = os.path.join(d, "sample-repo") result = self.runner.invoke( cli.identify, ["--type", "snapshot", repo_dir] ) self.assertSWHID( result, "swh:1:snp:abc888898124270905a0ef3c67e872ce08e7e0c1" ) def test_snapshot_without_dulwich(self): """checks swh-identify returns a 'nice' message instead of a traceback when dulwich is not installed""" with unittest.mock.patch.dict(sys.modules, {"dulwich": None}): with tempfile.TemporaryDirectory(prefix="swh.model.cli") as d: result = self.runner.invoke( cli.identify, ["--type", "snapshot", d], catch_exceptions=False, ) assert result.exit_code == 1 assert "'swh.model[cli]'" in result.output def test_origin_id(self): """identify an origin URL""" url = "https://github.com/torvalds/linux" result = self.runner.invoke(cli.identify, ["--type", "origin", url]) self.assertSWHID(result, "swh:1:ori:b63a575fe3faab7692c9f38fb09d4bb45651bb0f") def test_symlink(self): """identify symlink --- both itself and target""" regular = os.path.join(self.tmpdir_name, b"foo.txt") link = os.path.join(self.tmpdir_name, b"bar.txt") open(regular, "w").write("foo\n") os.symlink(os.path.basename(regular), link) result = self.runner.invoke(cli.identify, [link]) self.assertSWHID(result, "swh:1:cnt:257cc5642cb1a054f08cc83f2d943e56fd3ebe99") result = self.runner.invoke(cli.identify, ["--no-dereference", link]) self.assertSWHID(result, "swh:1:cnt:996f1789ff67c0e3f69ef5933a55d54c5d0e9954") def test_show_filename(self): """filename is shown by default""" self.make_contents(self.tmpdir_name) for filename, content in self.contents.items(): path = os.path.join(self.tmpdir_name, filename) result = self.runner.invoke(cli.identify, ["--type", "content", path]) self.assertEqual(result.exit_code, 0) self.assertEqual( result.output.rstrip(), "swh:1:cnt:%s\t%s" % (hash_to_hex(content["sha1_git"]), path.decode()), ) def test_hide_filename(self): """filename is hidden upon request""" self.make_contents(self.tmpdir_name) for filename, content in self.contents.items(): path = os.path.join(self.tmpdir_name, filename) result = self.runner.invoke( cli.identify, ["--type", "content", "--no-filename", path] ) self.assertSWHID(result, "swh:1:cnt:" + hash_to_hex(content["sha1_git"])) def test_auto_content(self): """automatic object type detection: content""" with tempfile.NamedTemporaryFile(prefix="swh.model.cli") as f: result = self.runner.invoke(cli.identify, [f.name]) self.assertEqual(result.exit_code, 0) self.assertRegex(result.output, r"^swh:\d+:cnt:") def test_auto_directory(self): """automatic object type detection: directory""" with tempfile.TemporaryDirectory(prefix="swh.model.cli") as dirname: result = self.runner.invoke(cli.identify, [dirname]) self.assertEqual(result.exit_code, 0) self.assertRegex(result.output, r"^swh:\d+:dir:") def test_auto_origin(self): """automatic object type detection: origin""" result = self.runner.invoke(cli.identify, ["https://github.com/torvalds/linux"]) self.assertEqual(result.exit_code, 0, result.output) self.assertRegex(result.output, r"^swh:\d+:ori:") def test_verify_content(self): """identifier verification""" self.make_contents(self.tmpdir_name) for filename, content in self.contents.items(): expected_id = "swh:1:cnt:" + hash_to_hex(content["sha1_git"]) # match path = os.path.join(self.tmpdir_name, filename) result = self.runner.invoke(cli.identify, ["--verify", expected_id, path]) self.assertEqual(result.exit_code, 0, result.output) # mismatch with open(path, "a") as f: f.write("trailing garbage to make verification fail") result = self.runner.invoke(cli.identify, ["--verify", expected_id, path]) self.assertEqual(result.exit_code, 1) def test_exclude(self): """exclude patterns""" self.make_from_tarball(self.tmpdir_name) path = os.path.join(self.tmpdir_name, b"sample-folder") excluded_dir = os.path.join(path, b"excluded_dir\x96") os.mkdir(excluded_dir) with open(os.path.join(excluded_dir, b"some_file"), "w") as f: f.write("content") result = self.runner.invoke( cli.identify, ["--type", "directory", "--exclude", "excluded_*", path] ) self.assertSWHID(result, "swh:1:dir:e8b0f1466af8608c8a3fb9879db172b887e80759") + + def test_recursive_directory(self): + self.make_from_tarball(self.tmpdir_name) + path = os.path.join(self.tmpdir_name, b"sample-folder") + result = self.runner.invoke(cli.identify, ["--recursive", path]) + self.assertEqual(result.exit_code, 0, result.output) + + result = result.output.split() + result_swhids = [] + # get all SWHID from the result + for i in range(0, len(result)): + if i % 2 == 0: + result_swhids.append(result[i]) + + assert len(result_swhids) == len(SAMPLE_FOLDER_SWHIDS) + for swhid in SAMPLE_FOLDER_SWHIDS: + assert swhid in result_swhids + + def test_recursive_directory_no_filename(self): + self.make_from_tarball(self.tmpdir_name) + path = os.path.join(self.tmpdir_name, b"sample-folder") + result = self.runner.invoke( + cli.identify, ["--recursive", "--no-filename", path] + ) + self.assertEqual(result.exit_code, 0, result.output) + + result_swhids = result.output.split() + + assert len(result_swhids) == len(SAMPLE_FOLDER_SWHIDS) + for swhid in SAMPLE_FOLDER_SWHIDS: + assert swhid in result_swhids