diff --git a/swh/storage/__init__.py b/swh/storage/__init__.py index b248e7f0..1e192790 100644 --- a/swh/storage/__init__.py +++ b/swh/storage/__init__.py @@ -1,96 +1,96 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import importlib from typing import Any, Dict, List import warnings from .interface import StorageInterface STORAGE_IMPLEMENTATIONS = { - "local": ".storage.Storage", + "local": ".postgresql.storage.Storage", "remote": ".api.client.RemoteStorage", "memory": ".in_memory.InMemoryStorage", "filter": ".filter.FilteringProxyStorage", "buffer": ".buffer.BufferingProxyStorage", "retry": ".retry.RetryingProxyStorage", "cassandra": ".cassandra.CassandraStorage", } def get_storage(cls: str, **kwargs) -> StorageInterface: """Get a storage object of class `storage_class` with arguments `storage_args`. Args: storage (dict): dictionary with keys: - cls (str): storage's class, either local, remote, memory, filter, buffer - args (dict): dictionary with keys Returns: an instance of swh.storage.Storage or compatible class Raises: ValueError if passed an unknown storage class. """ if "args" in kwargs: warnings.warn( 'Explicit "args" key is deprecated, use keys directly instead.', DeprecationWarning, ) kwargs = kwargs["args"] if cls == "pipeline": return get_storage_pipeline(**kwargs) class_path = STORAGE_IMPLEMENTATIONS.get(cls) if class_path is None: raise ValueError( "Unknown storage class `%s`. Supported: %s" % (cls, ", ".join(STORAGE_IMPLEMENTATIONS)) ) (module_path, class_name) = class_path.rsplit(".", 1) module = importlib.import_module(module_path, package=__package__) Storage = getattr(module, class_name) return Storage(**kwargs) def get_storage_pipeline(steps: List[Dict[str, Any]]) -> StorageInterface: """Recursively get a storage object that may use other storage objects as backends. Args: steps (List[dict]): List of dicts that may be used as kwargs for `get_storage`. Returns: an instance of swh.storage.Storage or compatible class Raises: ValueError if passed an unknown storage class. """ storage_config = None for step in reversed(steps): if "args" in step: warnings.warn( 'Explicit "args" key is deprecated, use keys directly ' "instead.", DeprecationWarning, ) step = { "cls": step["cls"], **step["args"], } if storage_config: step["storage"] = storage_config storage_config = step if storage_config is None: raise ValueError("'pipeline' has no steps.") return get_storage(**storage_config) diff --git a/swh/storage/algos/diff.py b/swh/storage/algos/diff.py index 3f67718a..a22d47b9 100644 --- a/swh/storage/algos/diff.py +++ b/swh/storage/algos/diff.py @@ -1,413 +1,413 @@ # Copyright (C) 2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information # Utility module to efficiently compute the list of changed files # between two directory trees. # The implementation is inspired from the work of Alberto Cortés # for the go-git project. For more details, you can refer to: # - this blog post: https://blog.sourced.tech/post/difftree/ # - the reference implementation in go: # https://github.com/src-d/go-git/tree/master/utils/merkletrie import collections from swh.model.hashutil import hash_to_bytes from swh.model.identifiers import directory_identifier from .dir_iterators import DirectoryIterator, DoubleDirectoryIterator, Remaining # get the hash identifier for an empty directory _empty_dir_hash = hash_to_bytes(directory_identifier({"entries": []})) def _get_rev(storage, rev_id): """ Return revision data from swh storage. """ return list(storage.revision_get([rev_id]))[0] class _RevisionChangesList(object): """ Helper class to track the changes between two revision directories. """ def __init__(self, storage, track_renaming): """ Args: storage: instance of swh storage track_renaming (bool): whether to track or not files renaming """ self.storage = storage self.track_renaming = track_renaming self.result = [] # dicts used to track file renaming based on hash value # we use a list instead of a single entry to handle the corner # case when a repository contains multiple instance of # the same file in different directories and a commit # renames all of them self.inserted_hash_idx = collections.defaultdict(list) self.deleted_hash_idx = collections.defaultdict(list) def add_insert(self, it_to): """ Add a file insertion in the to directory. Args: it_to (swh.storage.algos.dir_iterators.DirectoryIterator): iterator on the to directory """ to_hash = it_to.current_hash() # if the current file hash has been previously marked as deleted, # the file has been renamed if self.track_renaming and self.deleted_hash_idx[to_hash]: # pop the delete change index in the same order it was inserted change = self.result[self.deleted_hash_idx[to_hash].pop(0)] # change the delete change as a rename one change["type"] = "rename" change["to"] = it_to.current() change["to_path"] = it_to.current_path() else: # add the insert change in the list self.result.append( { "type": "insert", "from": None, "from_path": None, "to": it_to.current(), "to_path": it_to.current_path(), } ) # if rename tracking is activated, add the change index in # the inserted_hash_idx dict if self.track_renaming: self.inserted_hash_idx[to_hash].append(len(self.result) - 1) def add_delete(self, it_from): """ Add a file deletion in the from directory. Args: it_from (swh.storage.algos.dir_iterators.DirectoryIterator): iterator on the from directory """ from_hash = it_from.current_hash() # if the current file has been previously marked as inserted, # the file has been renamed if self.track_renaming and self.inserted_hash_idx[from_hash]: # pop the insert change index in the same order it was inserted change = self.result[self.inserted_hash_idx[from_hash].pop(0)] # change the insert change as a rename one change["type"] = "rename" change["from"] = it_from.current() change["from_path"] = it_from.current_path() else: # add the delete change in the list self.result.append( { "type": "delete", "from": it_from.current(), "from_path": it_from.current_path(), "to": None, "to_path": None, } ) # if rename tracking is activated, add the change index in # the deleted_hash_idx dict if self.track_renaming: self.deleted_hash_idx[from_hash].append(len(self.result) - 1) def add_modify(self, it_from, it_to): """ Add a file modification in the to directory. Args: it_from (swh.storage.algos.dir_iterators.DirectoryIterator): iterator on the from directory it_to (swh.storage.algos.dir_iterators.DirectoryIterator): iterator on the to directory """ self.result.append( { "type": "modify", "from": it_from.current(), "from_path": it_from.current_path(), "to": it_to.current(), "to_path": it_to.current_path(), } ) def add_recursive(self, it, insert): """ Recursively add changes from a directory. Args: it (swh.storage.algos.dir_iterators.DirectoryIterator): iterator on a directory insert (bool): the type of changes to add (insertion or deletion) """ # current iterated element is a regular file, # simply add adequate change in the list if not it.current_is_dir(): if insert: self.add_insert(it) else: self.add_delete(it) return # current iterated element is a directory, dir_id = it.current_hash() # handle empty dir insertion/deletion as the swh model allow # to have such object compared to git if dir_id == _empty_dir_hash: if insert: self.add_insert(it) else: self.add_delete(it) # iterate on files reachable from it and add # adequate changes in the list else: sub_it = DirectoryIterator(self.storage, dir_id, it.current_path() + b"/") sub_it_current = sub_it.step() while sub_it_current: if not sub_it.current_is_dir(): if insert: self.add_insert(sub_it) else: self.add_delete(sub_it) sub_it_current = sub_it.step() def add_recursive_insert(self, it_to): """ Recursively add files insertion from a to directory. Args: it_to (swh.storage.algos.dir_iterators.DirectoryIterator): iterator on a to directory """ self.add_recursive(it_to, True) def add_recursive_delete(self, it_from): """ Recursively add files deletion from a from directory. Args: it_from (swh.storage.algos.dir_iterators.DirectoryIterator): iterator on a from directory """ self.add_recursive(it_from, False) def _diff_elts_same_name(changes, it): """" Compare two directory entries with the same name and add adequate changes if any. Args: changes (_RevisionChangesList): the list of changes between two revisions it (swh.storage.algos.dir_iterators.DoubleDirectoryIterator): the iterator traversing two revision directories at the same time """ # compare the two current directory elements of the iterator status = it.compare() # elements have same hash and same permissions: # no changes to add and call next on the two iterators if status["same_hash"] and status["same_perms"]: it.next_both() # elements are regular files and have been modified: # insert the modification change in the list and # call next on the two iterators elif status["both_are_files"]: changes.add_modify(it.it_from, it.it_to) it.next_both() # one element is a regular file, the other a directory: # recursively add delete/insert changes and call next # on the two iterators elif status["file_and_dir"]: changes.add_recursive_delete(it.it_from) changes.add_recursive_insert(it.it_to) it.next_both() # both elements are directories: elif status["both_are_dirs"]: # from directory is empty: # recursively add insert changes in the to directory # and call next on the two iterators if status["from_is_empty_dir"]: changes.add_recursive_insert(it.it_to) it.next_both() # to directory is empty: # recursively add delete changes in the from directory # and call next on the two iterators elif status["to_is_empty_dir"]: changes.add_recursive_delete(it.it_from) it.next_both() # both directories are not empty: # call step on the two iterators to descend further in # the directory trees. elif not status["from_is_empty_dir"] and not status["to_is_empty_dir"]: it.step_both() def _compare_paths(path1, path2): """ Compare paths in lexicographic depth-first order. For instance, it returns: - "a" < "b" - "b/c/d" < "b" - "c/foo.txt" < "c.txt" """ path1_parts = path1.split(b"/") path2_parts = path2.split(b"/") i = 0 while True: if len(path1_parts) == len(path2_parts) and i == len(path1_parts): return 0 elif len(path2_parts) == i: return 1 elif len(path1_parts) == i: return -1 else: if path2_parts[i] > path1_parts[i]: return -1 elif path2_parts[i] < path1_parts[i]: return 1 i = i + 1 def _diff_elts(changes, it): """ Compare two directory entries. Args: changes (_RevisionChangesList): the list of changes between two revisions it (swh.storage.algos.dir_iterators.DoubleDirectoryIterator): the iterator traversing two revision directories at the same time """ # compare current to and from path in depth-first lexicographic order c = _compare_paths(it.it_from.current_path(), it.it_to.current_path()) # current from path is lower than the current to path: # the from path has been deleted if c < 0: changes.add_recursive_delete(it.it_from) it.next_from() # current from path is greater than the current to path: # the to path has been inserted elif c > 0: changes.add_recursive_insert(it.it_to) it.next_to() # paths are the same and need more processing else: _diff_elts_same_name(changes, it) def diff_directories(storage, from_dir, to_dir, track_renaming=False): """ Compute the differential between two directories, i.e. the list of file changes (insertion / deletion / modification / renaming) between them. Args: - storage (swh.storage.storage.Storage): instance of a swh + storage (swh.storage.interface.StorageInterface): instance of a swh storage (either local or remote, for optimal performance the use of a local storage is recommended) from_dir (bytes): the swh identifier of the directory to compare from to_dir (bytes): the swh identifier of the directory to compare to track_renaming (bool): whether or not to track files renaming Returns: list: A list of dict representing the changes between the two revisions. Each dict contains the following entries: - *type*: a string describing the type of change ('insert' / 'delete' / 'modify' / 'rename') - *from*: a dict containing the directory entry metadata in the from revision (None in case of an insertion) - *from_path*: bytes string corresponding to the absolute path of the from revision entry (None in case of an insertion) - *to*: a dict containing the directory entry metadata in the to revision (None in case of a deletion) - *to_path*: bytes string corresponding to the absolute path of the to revision entry (None in case of a deletion) The returned list is sorted in lexicographic depth-first order according to the value of the *to_path* field. """ changes = _RevisionChangesList(storage, track_renaming) it = DoubleDirectoryIterator(storage, from_dir, to_dir) while True: r = it.remaining() if r == Remaining.NoMoreFiles: break elif r == Remaining.OnlyFromFilesRemain: changes.add_recursive_delete(it.it_from) it.next_from() elif r == Remaining.OnlyToFilesRemain: changes.add_recursive_insert(it.it_to) it.next_to() else: _diff_elts(changes, it) return changes.result def diff_revisions(storage, from_rev, to_rev, track_renaming=False): """ Compute the differential between two revisions, i.e. the list of file changes between the two associated directories. Args: - storage (swh.storage.storage.Storage): instance of a swh + storage (swh.storage.interface.StorageInterface): instance of a swh storage (either local or remote, for optimal performance the use of a local storage is recommended) from_rev (bytes): the identifier of the revision to compare from to_rev (bytes): the identifier of the revision to compare to track_renaming (bool): whether or not to track files renaming Returns: list: A list of dict describing the introduced file changes (see :func:`swh.storage.algos.diff.diff_directories`). """ from_dir = None if from_rev: from_dir = _get_rev(storage, from_rev)["directory"] to_dir = _get_rev(storage, to_rev)["directory"] return diff_directories(storage, from_dir, to_dir, track_renaming) def diff_revision(storage, revision, track_renaming=False): """ Computes the differential between a revision and its first parent. If the revision has no parents, the directory to compare from is considered as empty. In other words, it computes the file changes introduced in a specific revision. Args: - storage (swh.storage.storage.Storage): instance of a swh + storage (swh.storage.interface.StorageInterface): instance of a swh storage (either local or remote, for optimal performance the use of a local storage is recommended) revision (bytes): the identifier of the revision from which to compute the introduced changes. track_renaming (bool): whether or not to track files renaming Returns: list: A list of dict describing the introduced file changes (see :func:`swh.storage.algos.diff.diff_directories`). """ rev_data = _get_rev(storage, revision) parent = None if rev_data["parents"]: parent = rev_data["parents"][0] return diff_revisions(storage, parent, revision, track_renaming) diff --git a/swh/storage/algos/dir_iterators.py b/swh/storage/algos/dir_iterators.py index 5616ab7a..05788080 100644 --- a/swh/storage/algos/dir_iterators.py +++ b/swh/storage/algos/dir_iterators.py @@ -1,375 +1,375 @@ # Copyright (C) 2018 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information # Utility module to iterate on directory trees. # The implementation is inspired from the work of Alberto Cortés # for the go-git project. For more details, you can refer to: # - this blog post: https://blog.sourced.tech/post/difftree/ # - the reference implementation in go: # https://github.com/src-d/go-git/tree/master/utils/merkletrie from enum import Enum from swh.model.hashutil import hash_to_bytes from swh.model.identifiers import directory_identifier # get the hash identifier for an empty directory _empty_dir_hash = hash_to_bytes(directory_identifier({"entries": []})) def _get_dir(storage, dir_id): """ Return directory data from swh storage. """ return storage.directory_ls(dir_id) if dir_id else [] class DirectoryIterator(object): """ Helper class used to iterate on a directory tree in a depth-first search way with some additional features: - sibling nodes are iterated in lexicographic order by name - it is possible to skip the visit of sub-directories nodes for efficiency reasons when comparing two trees (no need to go deeper if two directories have the same hash) """ def __init__(self, storage, dir_id, base_path=b""): """ Args: - storage (swh.storage.storage.Storage): instance of swh storage - (either local or remote) + storage (swh.storage.interface.StorageInterface): instance of + swh storage (either local or remote) dir_id (bytes): identifier of a root directory base_path (bytes): optional base path used when traversing a sub-directory """ self.storage = storage self.root_dir_id = dir_id self.base_path = base_path self.restart() def restart(self): """ Restart the iteration at the beginning. """ # stack of frames representing currently visited directories: # the root directory is at the bottom while the current one # is at the top self.frames = [] self._push_dir_frame(self.root_dir_id) self.has_started = False def _push_dir_frame(self, dir_id): """ Visit a sub-directory by pushing a new frame to the stack. Each frame is itself a stack of directory entries. Args: dir_id (bytes): identifier of a root directory """ # get directory entries dir_data = _get_dir(self.storage, dir_id) # sort them in lexicographical order and reverse the ordering # in order to unstack the "smallest" entry each time the # iterator advances dir_data = sorted(dir_data, key=lambda e: e["name"], reverse=True) # push the directory frame to the main stack self.frames.append(dir_data) def top(self): """ Returns: list: The top frame of the main directories stack """ if not self.frames: return None return self.frames[-1] def current(self): """ Returns: dict: The current visited directory entry, i.e. the top element from the top frame """ top_frame = self.top() if not top_frame: return None return top_frame[-1] def current_hash(self): """ Returns: bytes: The hash value of the currently visited directory entry """ return self.current()["target"] def current_perms(self): """ Returns: int: The permissions value of the currently visited directory entry """ return self.current()["perms"] def current_path(self): """ Returns: str: The absolute path from the root directory of the currently visited directory entry """ top_frame = self.top() if not top_frame: return None path = [] for frame in self.frames: path.append(frame[-1]["name"]) return self.base_path + b"/".join(path) def current_is_dir(self): """ Returns: bool: If the currently visited directory entry is a directory """ return self.current()["type"] == "dir" def _advance(self, descend): """ Advance in the tree iteration. Args: descend (bool): whether or not to push a new frame if the currently visited element is a sub-directory Returns: dict: The description of the newly visited directory entry """ current = self.current() if not self.has_started or not current: self.has_started = True return current if descend and self.current_is_dir() and current["target"] != _empty_dir_hash: self._push_dir_frame(current["target"]) else: self.drop() return self.current() def next(self): """ Advance the tree iteration by dropping the current visited directory entry from the top frame. If the top frame ends up empty, the operation is recursively applied to remove all empty frames as the tree is climbed up towards its root. Returns: dict: The description of the newly visited directory entry """ return self._advance(False) def step(self): """ Advance the tree iteration like the next operation with the difference that if the current visited element is a sub-directory a new frame representing its content is pushed to the main stack. Returns: dict: The description of the newly visited directory entry """ return self._advance(True) def drop(self): """ Drop the current visited element from the top frame. If the frame ends up empty, the operation is recursively applied. """ frame = self.top() if not frame: return frame.pop() if not frame: self.frames.pop() self.drop() def __next__(self): entry = self.step() if not entry: raise StopIteration entry["path"] = self.current_path() return entry def __iter__(self): return DirectoryIterator(self.storage, self.root_dir_id, self.base_path) def dir_iterator(storage, dir_id): """ Return an iterator for recursively visiting a directory and its sub-directories. The associated paths are visited in lexicographic depth-first search order. Args: storage (swh.storage.Storage): an instance of a swh storage dir_id (bytes): a directory identifier Returns: swh.storage.algos.dir_iterators.DirectoryIterator: an iterator returning a dict at each iteration step describing a directory entry. A 'path' field is added in that dict to store the absolute path of the entry. """ return DirectoryIterator(storage, dir_id) class Remaining(Enum): """ Enum to represent the current state when iterating on both directory trees at the same time. """ NoMoreFiles = 0 OnlyToFilesRemain = 1 OnlyFromFilesRemain = 2 BothHaveFiles = 3 class DoubleDirectoryIterator(object): """ Helper class to traverse two directory trees at the same time and compare their contents to detect changes between them. """ def __init__(self, storage, dir_from, dir_to): """ Args: storage: instance of swh storage dir_from (bytes): hash identifier of the from directory dir_to (bytes): hash identifier of the to directory """ self.storage = storage self.dir_from = dir_from self.dir_to = dir_to self.restart() def restart(self): """ Restart the double iteration at the beginning. """ # initialize custom dfs iterators for the two directories self.it_from = DirectoryIterator(self.storage, self.dir_from) self.it_to = DirectoryIterator(self.storage, self.dir_to) # grab the first element of each iterator self.it_from.next() self.it_to.next() def next_from(self): """ Apply the next operation on the from iterator. """ self.it_from.next() def next_to(self): """ Apply the next operation on the to iterator. """ self.it_to.next() def next_both(self): """ Apply the next operation on both iterators. """ self.next_from() self.next_to() def step_from(self): """ Apply the step operation on the from iterator. """ self.it_from.step() def step_to(self): """ Apply the step operation on the from iterator. """ self.it_to.step() def step_both(self): """ Apply the step operation on the both iterators. """ self.step_from() self.step_to() def remaining(self): """ Returns: Remaining: the current state of the double iteration """ from_current = self.it_from.current() to_current = self.it_to.current() # no more files to iterate in both iterators if not from_current and not to_current: return Remaining.NoMoreFiles # still some files to iterate in the to iterator elif not from_current and to_current: return Remaining.OnlyToFilesRemain # still some files to iterate in the from iterator elif from_current and not to_current: return Remaining.OnlyFromFilesRemain # still files to iterate in the both iterators else: return Remaining.BothHaveFiles def compare(self): """ Compare the current iterated directory entries in both iterators and return the comparison status. Returns: dict: The status of the comparison with the following bool values: * *same_hash*: indicates if the two entries have the same hash * *same_perms*: indicates if the two entries have the same permissions * *both_are_dirs*: indicates if the two entries are directories * *both_are_files*: indicates if the two entries are regular files * *file_and_dir*: indicates if one of the entry is a directory and the other a regular file * *from_is_empty_dir*: indicates if the from entry is the empty directory * *from_is_empty_dir*: indicates if the to entry is the empty directory """ from_current_hash = self.it_from.current_hash() to_current_hash = self.it_to.current_hash() from_current_perms = self.it_from.current_perms() to_current_perms = self.it_to.current_perms() from_is_dir = self.it_from.current_is_dir() to_is_dir = self.it_to.current_is_dir() status = {} # compare hash status["same_hash"] = from_current_hash == to_current_hash # compare permissions status["same_perms"] = from_current_perms == to_current_perms # check if both elements are directories status["both_are_dirs"] = from_is_dir and to_is_dir # check if both elements are regular files status["both_are_files"] = not from_is_dir and not to_is_dir # check if one element is a directory, the other a regular file status["file_and_dir"] = ( not status["both_are_dirs"] and not status["both_are_files"] ) # check if the from element is the empty directory status["from_is_empty_dir"] = ( from_is_dir and from_current_hash == _empty_dir_hash ) # check if the to element is the empty directory status["to_is_empty_dir"] = to_is_dir and to_current_hash == _empty_dir_hash return status diff --git a/swh/storage/algos/revisions_walker.py b/swh/storage/algos/revisions_walker.py index a49e9cac..20a52a28 100644 --- a/swh/storage/algos/revisions_walker.py +++ b/swh/storage/algos/revisions_walker.py @@ -1,548 +1,548 @@ # Copyright (C) 2018-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import heapq from abc import ABCMeta, abstractmethod from collections import deque _revs_walker_classes = {} class _RevisionsWalkerMetaClass(ABCMeta): def __new__(cls, clsname, bases, attrs): newclass = super().__new__(cls, clsname, bases, attrs) if "rw_type" in attrs: _revs_walker_classes[attrs["rw_type"]] = newclass return newclass class RevisionsWalker(metaclass=_RevisionsWalkerMetaClass): """ Abstract base class encapsulating the logic to walk across a revisions history starting from a given one. It defines an iterator returning the revisions according to a specific ordering implemented in derived classes. The iteration step performs the following operations: 1) Check if the iteration is finished by calling method :meth:`is_finished` and raises :exc:`StopIteration` if it it is the case 2) Get the next unseen revision by calling method :meth:`get_next_rev_id` 3) Process parents of that revision by calling method :meth:`process_parent_revs` for the next iteration steps 4) Check if the revision should be returned by calling method :meth:`should_return` and returns it if it is the case In order to easily instantiate a specific type of revisions walker, it is recommended to use the factory function :func:`get_revisions_walker`. Args: - storage (swh.storage.storage.Storage): instance of swh storage + storage (swh.storage.interface.StorageInterface): instance of swh storage (either local or remote) rev_start (bytes): a revision identifier max_revs (Optional[int]): maximum number of revisions to return state (Optional[dict]): previous state of that revisions walker """ def __init__(self, storage, rev_start, max_revs=None, state=None): self._revs_to_visit = [] self._done = set() self._revs = {} self._last_rev = None self._num_revs = 0 self._max_revs = max_revs self._missing_revs = set() if state: self._revs_to_visit = state["revs_to_visit"] self._done = state["done"] self._last_rev = state["last_rev"] self._num_revs = state["num_revs"] self._missing_revs = state["missing_revs"] self.storage = storage self.process_rev(rev_start) @abstractmethod def process_rev(self, rev_id): """ Abstract method whose purpose is to process a newly visited revision during the walk. Derived classes must implement it according to the desired method to walk across the revisions history (for instance through a dfs on the revisions DAG). Args: rev_id (bytes): the newly visited revision identifier """ pass @abstractmethod def get_next_rev_id(self): """ Abstract method whose purpose is to return the next revision during the iteration. Derived classes must implement it according to the desired method to walk across the revisions history. Returns: dict: A dict describing a revision as returned by - :meth:`swh.storage.storage.Storage.revision_get` + :meth:`swh.storage.interface.StorageInterface.revision_get` """ pass def process_parent_revs(self, rev): """ Process the parents of a revision when it is iterated. The default implementation simply calls :meth:`process_rev` for each parent revision in the order they are declared. Args: rev (dict): A dict describing a revision as returned by - :meth:`swh.storage.storage.Storage.revision_get` + :meth:`swh.storage.interface.StorageInterface.revision_get` """ for parent_id in rev["parents"]: self.process_rev(parent_id) def should_return(self, rev): """ Filter out a revision to return if needed. Default implementation returns all iterated revisions. Args: rev (dict): A dict describing a revision as returned by - :meth:`swh.storage.storage.Storage.revision_get` + :meth:`swh.storage.interface.StorageInterface.revision_get` Returns: bool: Whether to return the revision in the iteration """ return True def is_finished(self): """ Determine if the iteration is finished. This method is called at the beginning of each iteration loop. Returns: bool: Whether the iteration is finished """ if self._max_revs is not None and self._num_revs >= self._max_revs: return True if not self._revs_to_visit: return True return False def _get_rev(self, rev_id): rev = self._revs.get(rev_id) if rev is None: # cache some revisions in advance to avoid sending too much # requests to storage and thus speedup the revisions walk for rev in self.storage.revision_log([rev_id], limit=100): # revision data is missing, returned history will be truncated if rev is None: continue self._revs[rev["id"]] = rev return self._revs.get(rev_id) def missing_revisions(self): """ Return a set of revision identifiers whose associated data were found missing into the archive content while walking on the revisions graph. Returns: Set[bytes]: a set of revision identifiers """ return self._missing_revs def is_history_truncated(self): """ Return if the revision history generated so far has been truncated of not. A revision history might end up truncated if some revision data were found missing into the archive content. Returns: bool: Whether the history got truncated or not """ return len(self.missing_revisions()) > 0 def export_state(self): """ Export the internal state of that revision walker to a dict. Its purpose is to continue the iteration in a pagination context. Returns: dict: A dict containing the internal state of that revisions walker """ return { "revs_to_visit": self._revs_to_visit, "done": self._done, "last_rev": self._last_rev, "num_revs": self._num_revs, "missing_revs": self._missing_revs, } def __next__(self): if self.is_finished(): raise StopIteration while self._revs_to_visit: rev_id = self.get_next_rev_id() if rev_id in self._done: continue self._done.add(rev_id) rev = self._get_rev(rev_id) # revision data is missing, returned history will be truncated if rev is None: self._missing_revs.add(rev_id) continue self.process_parent_revs(rev) if self.should_return(rev): self._num_revs += 1 self._last_rev = rev return rev raise StopIteration def __iter__(self): return self class CommitterDateRevisionsWalker(RevisionsWalker): """ Revisions walker that returns revisions in reverse chronological order according to committer date (same behaviour as ``git log``) """ rw_type = "committer_date" def process_rev(self, rev_id): """ Add the revision to a priority queue according to the committer date. Args: rev_id (bytes): the newly visited revision identifier """ if rev_id not in self._done: rev = self._get_rev(rev_id) if rev is not None: commit_time = rev["committer_date"]["timestamp"]["seconds"] heapq.heappush(self._revs_to_visit, (-commit_time, rev_id)) else: self._missing_revs.add(rev_id) def get_next_rev_id(self): """ Return the smallest revision from the priority queue, i.e. the one with highest committer date. Returns: dict: A dict describing a revision as returned by - :meth:`swh.storage.storage.Storage.revision_get` + :meth:`swh.storage.interface.StorageInterface.revision_get` """ _, rev_id = heapq.heappop(self._revs_to_visit) return rev_id class BFSRevisionsWalker(RevisionsWalker): """ Revisions walker that returns revisions in the same order as when performing a breadth-first search on the revisions DAG. """ rw_type = "bfs" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._revs_to_visit = deque(self._revs_to_visit) def process_rev(self, rev_id): """ Append the revision to a queue. Args: rev_id (bytes): the newly visited revision identifier """ if rev_id not in self._done: self._revs_to_visit.append(rev_id) def get_next_rev_id(self): """ Return the next revision from the queue. Returns: dict: A dict describing a revision as returned by - :meth:`swh.storage.storage.Storage.revision_get` + :meth:`swh.storage.interface.StorageInterface.revision_get` """ return self._revs_to_visit.popleft() class DFSPostRevisionsWalker(RevisionsWalker): """ Revisions walker that returns revisions in the same order as when performing a depth-first search in post-order on the revisions DAG (i.e. after visiting a merge commit, the merged commit will be visited before the base it was merged on). """ rw_type = "dfs_post" def process_rev(self, rev_id): """ Append the revision to a stack. Args: rev_id (bytes): the newly visited revision identifier """ if rev_id not in self._done: self._revs_to_visit.append(rev_id) def get_next_rev_id(self): """ Return the next revision from the stack. Returns: dict: A dict describing a revision as returned by - :meth:`swh.storage.storage.Storage.revision_get` + :meth:`swh.storage.interface.StorageInterface.revision_get` """ return self._revs_to_visit.pop() class DFSRevisionsWalker(DFSPostRevisionsWalker): """ Revisions walker that returns revisions in the same order as when performing a depth-first search in pre-order on the revisions DAG (i.e. after visiting a merge commit, the base commit it was merged on will be visited before the merged commit). """ rw_type = "dfs" def process_parent_revs(self, rev): """ Process the parents of a revision when it is iterated in the reversed order they are declared. Args: rev (dict): A dict describing a revision as returned by - :meth:`swh.storage.storage.Storage.revision_get` + :meth:`swh.storage.interface.StorageInterface.revision_get` """ for parent_id in reversed(rev["parents"]): self.process_rev(parent_id) class PathRevisionsWalker(CommitterDateRevisionsWalker): """ Revisions walker that returns revisions where a specific path in the source tree has been modified, in other terms it allows to get the history for a specific file or directory. It has a behaviour similar to what ``git log`` offers by default, meaning the returned history is simplified in order to only show relevant revisions (see the `History Simplification `_ section of the associated manual for more details). Please note that to avoid walking the entire history, the iteration will stop once a revision where the path has been added is found. .. warning:: Due to client-side implementation, performances are not optimal when the total numbers of revisions to walk is large. This should only be used when the total number of revisions does not exceed a couple of thousands. Args: - storage (swh.storage.storage.Storage): instance of swh storage + storage (swh.storage.interface.StorageInterface): instance of swh storage (either local or remote) rev_start (bytes): a revision identifier path (str): the path in the source tree to retrieve the history max_revs (Optional[int]): maximum number of revisions to return state (Optional[dict]): previous state of that revisions walker """ rw_type = "path" def __init__(self, storage, rev_start, path, **kwargs): super().__init__(storage, rev_start, **kwargs) paths = path.strip("/").split("/") self._path = list(map(lambda p: p.encode("utf-8"), paths)) self._rev_dir_path = {} def _get_path_id(self, rev_id): """ Return the path checksum identifier in the source tree of the provided revision. If the path corresponds to a directory, the value computed by :func:`swh.model.identifiers.directory_identifier` will be returned. If the path corresponds to a file, its sha1 checksum will be returned. Args: rev_id (bytes): a revision identifier Returns: bytes: the path identifier """ rev = self._get_rev(rev_id) rev_dir_id = rev["directory"] if rev_dir_id not in self._rev_dir_path: try: dir_info = self.storage.directory_entry_get_by_path( rev_dir_id, self._path ) self._rev_dir_path[rev_dir_id] = dir_info["target"] except Exception: self._rev_dir_path[rev_dir_id] = None return self._rev_dir_path[rev_dir_id] def is_finished(self): """ Check if the revisions iteration is finished. This checks for the specified path's existence in the last returned revision's parents' source trees. If not, the iteration is considered finished. Returns: bool: Whether to return the revision in the iteration """ if self._path and self._last_rev: last_rev_parents = self._last_rev["parents"] last_rev_parents_path_ids = [ self._get_path_id(p_rev) for p_rev in last_rev_parents ] no_path = all([path_id is None for path_id in last_rev_parents_path_ids]) if no_path: return True return super().is_finished() def process_parent_revs(self, rev): """ Process parents when a new revision is iterated. It enables to get a simplified revisions history in the same manner as ``git log``. When a revision has multiple parents, the following process is applied. If the revision was a merge, and has the same path identifier to one parent, follow only that parent (even if there are several parents with the same path identifier, follow only one of them.) Otherwise, follow all parents. Args: rev (dict): A dict describing a revision as returned by - :meth:`swh.storage.storage.Storage.revision_get` + :meth:`swh.storage.interface.StorageInterface.revision_get` """ rev_path_id = self._get_path_id(rev["id"]) if rev_path_id: if len(rev["parents"]) == 1: self.process_rev(rev["parents"][0]) else: parent_rev_path_ids = [ self._get_path_id(p_rev) for p_rev in rev["parents"] ] different_trees = all( [path_id != rev_path_id for path_id in parent_rev_path_ids] ) for i, p_rev in enumerate(rev["parents"]): if different_trees or parent_rev_path_ids[i] == rev_path_id: self.process_rev(p_rev) if not different_trees: break else: super().process_parent_revs(rev) def should_return(self, rev): """ Check if a revision should be returned when iterating. It verifies that the specified path has been modified by the revision but also that all parents have a path identifier different from the revision one in order to get a simplified history. Args: rev (dict): A dict describing a revision as returned by - :meth:`swh.storage.storage.Storage.revision_get` + :meth:`swh.storage.interface.StorageInterface.revision_get` Returns: bool: Whether to return the revision in the iteration """ rev_path_id = self._get_path_id(rev["id"]) if not rev["parents"]: return rev_path_id is not None parent_rev_path_ids = [self._get_path_id(p_rev) for p_rev in rev["parents"]] different_trees = all( [path_id != rev_path_id for path_id in parent_rev_path_ids] ) if rev_path_id != parent_rev_path_ids[0] and different_trees: return True return False def get_revisions_walker(rev_walker_type, *args, **kwargs): """ Instantiate a revisions walker of a given type. The following code snippet demonstrates how to use a revisions walker for processing a whole revisions history:: from swh.storage import get_storage storage = get_storage(...) revs_walker = get_revisions_walker('committer_date', storage, rev_id) for rev in revs_walker: # process revision rev It is also possible to walk a revisions history in a paginated way as illustrated below:: def get_revs_history_page(rw_type, storage, rev_id, page_num, page_size, rw_state): max_revs = (page_num + 1) * page_size revs_walker = get_revisions_walker(rw_type, storage, rev_id, max_revs=max_revs, state=rw_state) revs = list(revs_walker) rw_state = revs_walker.export_state() return revs rev_start = ... per_page = 50 rw_state = {} for page in range(0, 10): revs_page = get_revs_history_page('dfs', storage, rev_start, page, per_page, rw_state) # process revisions page Args: rev_walker_type (str): the type of revisions walker to return, possible values are: *committer_date*, *dfs*, *dfs_post*, *bfs* and *path* args (list): position arguments to pass to the revisions walker constructor kwargs (dict): keyword arguments to pass to the revisions walker constructor """ if rev_walker_type not in _revs_walker_classes: raise Exception('No revisions walker found for type "%s"' % rev_walker_type) revs_walker_class = _revs_walker_classes[rev_walker_type] return revs_walker_class(*args, **kwargs) diff --git a/swh/storage/algos/snapshot.py b/swh/storage/algos/snapshot.py index c236088b..eee81699 100644 --- a/swh/storage/algos/snapshot.py +++ b/swh/storage/algos/snapshot.py @@ -1,141 +1,141 @@ # Copyright (C) 2018-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from typing import List, Optional from swh.model.hashutil import hash_to_hex from swh.model.model import Sha1Git, Snapshot, TargetType from swh.storage.algos.origin import ( origin_get_latest_visit_status, iter_origin_visits, iter_origin_visit_statuses, ) from swh.storage.interface import ListOrder, StorageInterface def snapshot_get_all_branches( storage: StorageInterface, snapshot_id: Sha1Git ) -> Optional[Snapshot]: """Get all the branches for a given snapshot Args: - storage (swh.storage.storage.Storage): the storage instance + storage (swh.storage.interface.StorageInterface): the storage instance snapshot_id (bytes): the snapshot's identifier Returns: dict: a dict with two keys: * **id**: identifier of the snapshot * **branches**: a dict of branches contained in the snapshot whose keys are the branches' names. """ ret = storage.snapshot_get_branches(snapshot_id) if not ret: return None next_branch = ret["next_branch"] while next_branch: data = storage.snapshot_get_branches(snapshot_id, branches_from=next_branch) assert data, f"Snapshot {hash_to_hex(snapshot_id)} ceased to exist" ret["branches"].update(data["branches"]) next_branch = data["next_branch"] return Snapshot(id=ret["id"], branches=ret["branches"]) def snapshot_get_latest( storage: StorageInterface, origin: str, allowed_statuses: Optional[List[str]] = None, branches_count: Optional[int] = None, ) -> Optional[Snapshot]: """Get the latest snapshot for the given origin, optionally only from visits that have one of the given allowed_statuses. The branches of the snapshot are iterated in the lexicographical order of their names. Args: storage: Storage instance origin: the origin's URL allowed_statuses: list of visit statuses considered to find the latest snapshot for the visit. For instance, ``allowed_statuses=['full']`` will only consider visits that have successfully run to completion. branches_count: Optional parameter to retrieve snapshot with all branches (default behavior when None) or not. If set to positive number, the snapshot will be partial with only that number of branches. Raises: ValueError if branches_count is not a positive value Returns: The snapshot object if one is found matching the criteria or None. """ visit_and_status = origin_get_latest_visit_status( storage, origin, allowed_statuses=allowed_statuses, require_snapshot=True, ) if not visit_and_status: return None _, visit_status = visit_and_status snapshot_id = visit_status.snapshot if not snapshot_id: return None if branches_count: # partial snapshot if not isinstance(branches_count, int) or branches_count <= 0: raise ValueError( "Parameter branches_count must be a positive integer. " f"Current value is {branches_count}" ) snapshot = storage.snapshot_get_branches( snapshot_id, branches_count=branches_count ) if snapshot is None: return None return Snapshot(id=snapshot["id"], branches=snapshot["branches"]) else: return snapshot_get_all_branches(storage, snapshot_id) def snapshot_id_get_from_revision( storage: StorageInterface, origin: str, revision_id: bytes ) -> Optional[bytes]: """Retrieve the most recent snapshot id targeting the revision_id for the given origin. *Warning* This is a potentially highly costly operation Returns The snapshot id if found. None otherwise. """ revision = storage.revision_get([revision_id]) if not revision: return None for visit in iter_origin_visits(storage, origin, order=ListOrder.DESC): assert visit.visit is not None for visit_status in iter_origin_visit_statuses( storage, origin, visit.visit, order=ListOrder.DESC ): snapshot_id = visit_status.snapshot if snapshot_id is None: continue snapshot = snapshot_get_all_branches(storage, snapshot_id) if not snapshot: continue for branch_name, branch in snapshot.branches.items(): if ( branch is not None and branch.target_type == TargetType.REVISION and branch.target == revision_id ): # snapshot found return snapshot_id return None diff --git a/swh/storage/backfill.py b/swh/storage/backfill.py index f23657ca..90b1ba1e 100644 --- a/swh/storage/backfill.py +++ b/swh/storage/backfill.py @@ -1,548 +1,548 @@ # Copyright (C) 2017-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information """Storage backfiller. The backfiller goal is to produce back part or all of the objects from a storage to the journal topics Current implementation consists in the JournalBackfiller class. It simply reads the objects from the storage and sends every object identifier back to the journal. """ import logging from typing import Any, Callable, Dict from swh.core.db import BaseDb from swh.journal.writer.kafka import KafkaJournalWriter from swh.model.model import ( BaseModel, Directory, DirectoryEntry, RawExtrinsicMetadata, Release, Revision, Snapshot, SnapshotBranch, TargetType, ) -from swh.storage.converters import ( +from swh.storage.postgresql.converters import ( db_to_raw_extrinsic_metadata, db_to_release, db_to_revision, ) from swh.storage.replay import object_converter_fn logger = logging.getLogger(__name__) PARTITION_KEY = { "content": "sha1", "skipped_content": "sha1", "directory": "id", "metadata_authority": "type, url", "metadata_fetcher": "name, version", "raw_extrinsic_metadata": "id", "revision": "revision.id", "release": "release.id", "snapshot": "id", "origin": "id", "origin_visit": "origin_visit.origin", "origin_visit_status": "origin_visit_status.origin", } COLUMNS = { "content": [ "sha1", "sha1_git", "sha256", "blake2s256", "length", "status", "ctime", ], "skipped_content": [ "sha1", "sha1_git", "sha256", "blake2s256", "length", "ctime", "status", "reason", ], "directory": ["id", "dir_entries", "file_entries", "rev_entries"], "metadata_authority": ["type", "url", "metadata",], "metadata_fetcher": ["name", "version", "metadata",], "raw_extrinsic_metadata": [ "raw_extrinsic_metadata.type", "raw_extrinsic_metadata.id", "metadata_authority.type", "metadata_authority.url", "metadata_fetcher.name", "metadata_fetcher.version", "discovery_date", "format", "raw_extrinsic_metadata.metadata", "origin", "visit", "snapshot", "release", "revision", "path", "directory", ], "revision": [ ("revision.id", "id"), "date", "date_offset", "date_neg_utc_offset", "committer_date", "committer_date_offset", "committer_date_neg_utc_offset", "type", "directory", "message", "synthetic", "metadata", "extra_headers", ( "array(select parent_id::bytea from revision_history rh " "where rh.id = revision.id order by rh.parent_rank asc)", "parents", ), ("a.id", "author_id"), ("a.name", "author_name"), ("a.email", "author_email"), ("a.fullname", "author_fullname"), ("c.id", "committer_id"), ("c.name", "committer_name"), ("c.email", "committer_email"), ("c.fullname", "committer_fullname"), ], "release": [ ("release.id", "id"), "date", "date_offset", "date_neg_utc_offset", "comment", ("release.name", "name"), "synthetic", "target", "target_type", ("a.id", "author_id"), ("a.name", "author_name"), ("a.email", "author_email"), ("a.fullname", "author_fullname"), ], "snapshot": ["id", "object_id"], "origin": ["url"], "origin_visit": ["visit", "type", ("origin.url", "origin"), "date",], "origin_visit_status": [ "visit", ("origin.url", "origin"), "date", "snapshot", "status", "metadata", ], } JOINS = { "release": ["person a on release.author=a.id"], "revision": [ "person a on revision.author=a.id", "person c on revision.committer=c.id", ], "origin_visit": ["origin on origin_visit.origin=origin.id"], "origin_visit_status": ["origin on origin_visit_status.origin=origin.id"], "raw_extrinsic_metadata": [ "metadata_authority on " "raw_extrinsic_metadata.authority_id=metadata_authority.id", "metadata_fetcher on raw_extrinsic_metadata.fetcher_id=metadata_fetcher.id", ], } def directory_converter(db: BaseDb, directory_d: Dict[str, Any]) -> Directory: """Convert directory from the flat representation to swh model compatible objects. """ columns = ["target", "name", "perms"] query_template = """ select %(columns)s from directory_entry_%(type)s where id in %%s """ types = ["file", "dir", "rev"] entries = [] with db.cursor() as cur: for type in types: ids = directory_d.pop("%s_entries" % type) if not ids: continue query = query_template % { "columns": ",".join(columns), "type": type, } cur.execute(query, (tuple(ids),)) for row in cur: entry_d = dict(zip(columns, row)) entry = DirectoryEntry( name=entry_d["name"], type=type, target=entry_d["target"], perms=entry_d["perms"], ) entries.append(entry) return Directory(id=directory_d["id"], entries=tuple(entries),) def raw_extrinsic_metadata_converter( db: BaseDb, metadata: Dict[str, Any] ) -> RawExtrinsicMetadata: """Convert revision from the flat representation to swh model compatible objects. """ return db_to_raw_extrinsic_metadata(metadata) def revision_converter(db: BaseDb, revision_d: Dict[str, Any]) -> Revision: """Convert revision from the flat representation to swh model compatible objects. """ revision = db_to_revision(revision_d) assert revision is not None, revision_d["id"] return revision def release_converter(db: BaseDb, release_d: Dict[str, Any]) -> Release: """Convert release from the flat representation to swh model compatible objects. """ release = db_to_release(release_d) assert release is not None, release_d["id"] return release def snapshot_converter(db: BaseDb, snapshot_d: Dict[str, Any]) -> Snapshot: """Convert snapshot from the flat representation to swh model compatible objects. """ columns = ["name", "target", "target_type"] query = """ select %s from snapshot_branches sbs inner join snapshot_branch sb on sb.object_id=sbs.branch_id where sbs.snapshot_id=%%s """ % ", ".join( columns ) with db.cursor() as cur: cur.execute(query, (snapshot_d["object_id"],)) branches = {} for name, *row in cur: branch_d = dict(zip(columns[1:], row)) if branch_d["target"] or branch_d["target_type"]: branch = None else: branch = SnapshotBranch( target=branch_d["target"], target_type=TargetType(branch_d["target_type"]), ) branches[name] = branch return Snapshot(id=snapshot_d["id"], branches=branches,) CONVERTERS: Dict[str, Callable[[BaseDb, Dict[str, Any]], BaseModel]] = { "directory": directory_converter, "raw_extrinsic_metadata": raw_extrinsic_metadata_converter, "revision": revision_converter, "release": release_converter, "snapshot": snapshot_converter, } def object_to_offset(object_id, numbits): """Compute the index of the range containing object id, when dividing space into 2^numbits. Args: object_id (str): The hex representation of object_id numbits (int): Number of bits in which we divide input space Returns: The index of the range containing object id """ q, r = divmod(numbits, 8) length = q + (r != 0) shift_bits = 8 - r if r else 0 truncated_id = object_id[: length * 2] if len(truncated_id) < length * 2: truncated_id += "0" * (length * 2 - len(truncated_id)) truncated_id_bytes = bytes.fromhex(truncated_id) return int.from_bytes(truncated_id_bytes, byteorder="big") >> shift_bits def byte_ranges(numbits, start_object=None, end_object=None): """Generate start/end pairs of bytes spanning numbits bits and constrained by optional start_object and end_object. Args: numbits (int): Number of bits in which we divide input space start_object (str): Hex object id contained in the first range returned end_object (str): Hex object id contained in the last range returned Yields: 2^numbits pairs of bytes """ q, r = divmod(numbits, 8) length = q + (r != 0) shift_bits = 8 - r if r else 0 def to_bytes(i): return int.to_bytes(i << shift_bits, length=length, byteorder="big") start_offset = 0 end_offset = 1 << numbits if start_object is not None: start_offset = object_to_offset(start_object, numbits) if end_object is not None: end_offset = object_to_offset(end_object, numbits) + 1 for start in range(start_offset, end_offset): end = start + 1 if start == 0: yield None, to_bytes(end) elif end == 1 << numbits: yield to_bytes(start), None else: yield to_bytes(start), to_bytes(end) def integer_ranges(start, end, block_size=1000): for start in range(start, end, block_size): if start == 0: yield None, block_size elif start + block_size > end: yield start, end else: yield start, start + block_size RANGE_GENERATORS = { "content": lambda start, end: byte_ranges(24, start, end), "skipped_content": lambda start, end: [(None, None)], "directory": lambda start, end: byte_ranges(24, start, end), "revision": lambda start, end: byte_ranges(24, start, end), "release": lambda start, end: byte_ranges(16, start, end), "snapshot": lambda start, end: byte_ranges(16, start, end), "origin": integer_ranges, "origin_visit": integer_ranges, "origin_visit_status": integer_ranges, } def compute_query(obj_type, start, end): columns = COLUMNS.get(obj_type) join_specs = JOINS.get(obj_type, []) join_clause = "\n".join("left join %s" % clause for clause in join_specs) where = [] where_args = [] if start: where.append("%(keys)s >= %%s") where_args.append(start) if end: where.append("%(keys)s < %%s") where_args.append(end) where_clause = "" if where: where_clause = ("where " + " and ".join(where)) % { "keys": "(%s)" % PARTITION_KEY[obj_type] } column_specs = [] column_aliases = [] for column in columns: if isinstance(column, str): column_specs.append(column) column_aliases.append(column) else: column_specs.append("%s as %s" % column) column_aliases.append(column[1]) query = """ select %(columns)s from %(table)s %(join)s %(where)s """ % { "columns": ",".join(column_specs), "table": obj_type, "join": join_clause, "where": where_clause, } return query, where_args, column_aliases def fetch(db, obj_type, start, end): """Fetch all obj_type's identifiers from db. This opens one connection, stream objects and when done, close the connection. Args: db (BaseDb): Db connection object obj_type (str): Object type start (Union[bytes|Tuple]): Range start identifier end (Union[bytes|Tuple]): Range end identifier Raises: ValueError if obj_type is not supported Yields: Objects in the given range """ query, where_args, column_aliases = compute_query(obj_type, start, end) converter = CONVERTERS.get(obj_type) with db.cursor() as cursor: logger.debug("Fetching data for table %s", obj_type) logger.debug("query: %s %s", query, where_args) cursor.execute(query, where_args) for row in cursor: record = dict(zip(column_aliases, row)) if converter: record = converter(db, record) else: record = object_converter_fn[obj_type](record) logger.debug("record: %s" % record) yield record def _format_range_bound(bound): if isinstance(bound, bytes): return bound.hex() else: return str(bound) MANDATORY_KEYS = ["brokers", "storage_dbconn", "prefix", "client_id"] class JournalBackfiller: """Class in charge of reading the storage's objects and sends those back to the journal's topics. This is designed to be run periodically. """ def __init__(self, config=None): self.config = config self.check_config(config) def check_config(self, config): missing_keys = [] for key in MANDATORY_KEYS: if not config.get(key): missing_keys.append(key) if missing_keys: raise ValueError( "Configuration error: The following keys must be" " provided: %s" % (",".join(missing_keys),) ) def parse_arguments(self, object_type, start_object, end_object): """Parse arguments Raises: ValueError for unsupported object type ValueError if object ids are not parseable Returns: Parsed start and end object ids """ if object_type not in COLUMNS: raise ValueError( "Object type %s is not supported. " "The only possible values are %s" % (object_type, ", ".join(COLUMNS.keys())) ) if object_type in ["origin", "origin_visit"]: if start_object: start_object = int(start_object) else: start_object = 0 if end_object: end_object = int(end_object) else: end_object = 100 * 1000 * 1000 # hard-coded limit return start_object, end_object def run(self, object_type, start_object, end_object, dry_run=False): """Reads storage's subscribed object types and send them to the journal's reading topic. """ start_object, end_object = self.parse_arguments( object_type, start_object, end_object ) db = BaseDb.connect(self.config["storage_dbconn"]) writer = KafkaJournalWriter( brokers=self.config["brokers"], prefix=self.config["prefix"], client_id=self.config["client_id"], ) for range_start, range_end in RANGE_GENERATORS[object_type]( start_object, end_object ): logger.info( "Processing %s range %s to %s", object_type, _format_range_bound(range_start), _format_range_bound(range_end), ) for obj in fetch(db, object_type, start=range_start, end=range_end,): if dry_run: continue writer.write_addition(object_type=object_type, object_=obj) writer.producer.flush() if __name__ == "__main__": print('Please use the "swh-journal backfiller run" command') diff --git a/swh/storage/common.py b/swh/storage/common.py index e32ba824..9a3f345b 100644 --- a/swh/storage/common.py +++ b/swh/storage/common.py @@ -1,6 +1,11 @@ -# Copyright (C) 2015-2016 The Software Heritage developers +# Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information -from swh.core.db.common import * # noqa +from swh.model.hashutil import MultiHash + + +def origin_url_to_sha1(origin_url: str) -> bytes: + """Convert an origin URL to a sha1. Encodes URL to utf-8.""" + return MultiHash.from_data(origin_url.encode("utf-8"), {"sha1"}).digest()["sha1"] diff --git a/swh/storage/in_memory.py b/swh/storage/in_memory.py index 9eda0424..fc61c763 100644 --- a/swh/storage/in_memory.py +++ b/swh/storage/in_memory.py @@ -1,625 +1,625 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime import functools import random from collections import defaultdict from typing import ( Any, Dict, Generic, Iterable, Iterator, List, Optional, Tuple, Type, TypeVar, Union, ) from swh.model.model import ( Content, SkippedContent, Sha1Git, ) from swh.storage.cassandra import CassandraStorage from swh.storage.cassandra.model import ( BaseRow, ContentRow, DirectoryRow, DirectoryEntryRow, MetadataAuthorityRow, MetadataFetcherRow, ObjectCountRow, OriginRow, OriginVisitRow, OriginVisitStatusRow, RawExtrinsicMetadataRow, ReleaseRow, RevisionRow, RevisionParentRow, SkippedContentRow, SnapshotRow, SnapshotBranchRow, ) from swh.storage.interface import ListOrder from swh.storage.objstorage import ObjStorage -from .converters import origin_url_to_sha1 +from .common import origin_url_to_sha1 from .writer import JournalWriter TRow = TypeVar("TRow", bound=BaseRow) class Table(Generic[TRow]): def __init__(self, row_class: Type[TRow]): self.row_class = row_class self.primary_key_cols = row_class.PARTITION_KEY + row_class.CLUSTERING_KEY # Map from tokens to clustering keys to rows # These are not actually partitions (or rather, there is one partition # for each token) and they aren't sorted. # But it is good enough if we don't care about performance; # and makes the code a lot simpler. self.data: Dict[int, Dict[Tuple, TRow]] = defaultdict(dict) def __repr__(self): return f"<__module__.Table[{self.row_class.__name__}] object>" def partition_key(self, row: Union[TRow, Dict[str, Any]]) -> Tuple: """Returns the partition key of a row (ie. the cells which get hashed into the token.""" if isinstance(row, dict): row_d = row else: row_d = row.to_dict() return tuple(row_d[col] for col in self.row_class.PARTITION_KEY) def clustering_key(self, row: Union[TRow, Dict[str, Any]]) -> Tuple: """Returns the clustering key of a row (ie. the cells which are used for sorting rows within a partition.""" if isinstance(row, dict): row_d = row else: row_d = row.to_dict() return tuple(row_d[col] for col in self.row_class.CLUSTERING_KEY) def primary_key(self, row): return self.partition_key(row) + self.clustering_key(row) def primary_key_from_dict(self, d: Dict[str, Any]) -> Tuple: """Returns the primary key (ie. concatenation of partition key and clustering key) of the given dictionary interpreted as a row.""" return tuple(d[col] for col in self.primary_key_cols) def token(self, key: Tuple): """Returns the token of a row (ie. the hash of its partition key).""" return hash(key) def get_partition(self, token: int) -> Dict[Tuple, TRow]: """Returns the partition that contains this token.""" return self.data[token] def insert(self, row: TRow): partition = self.data[self.token(self.partition_key(row))] partition[self.clustering_key(row)] = row def split_primary_key(self, key: Tuple) -> Tuple[Tuple, Tuple]: """Returns (partition_key, clustering_key) from a partition key""" assert len(key) == len(self.primary_key_cols) partition_key = key[0 : len(self.row_class.PARTITION_KEY)] clustering_key = key[len(self.row_class.PARTITION_KEY) :] return (partition_key, clustering_key) def get_from_partition_key(self, partition_key: Tuple) -> Iterable[TRow]: """Returns at most one row, from its partition key.""" token = self.token(partition_key) for row in self.get_from_token(token): if self.partition_key(row) == partition_key: yield row def get_from_primary_key(self, primary_key: Tuple) -> Optional[TRow]: """Returns at most one row, from its primary key.""" (partition_key, clustering_key) = self.split_primary_key(primary_key) token = self.token(partition_key) partition = self.get_partition(token) return partition.get(clustering_key) def get_from_token(self, token: int) -> Iterable[TRow]: """Returns all rows whose token (ie. non-cryptographic hash of the partition key) is the one passed as argument.""" return (v for (k, v) in sorted(self.get_partition(token).items())) def iter_all(self) -> Iterator[Tuple[Tuple, TRow]]: return ( (self.primary_key(row), row) for (token, partition) in self.data.items() for (clustering_key, row) in partition.items() ) def get_random(self) -> Optional[TRow]: return random.choice([row for (pk, row) in self.iter_all()]) class InMemoryCqlRunner: def __init__(self): self._contents = Table(ContentRow) self._content_indexes = defaultdict(lambda: defaultdict(set)) self._skipped_contents = Table(ContentRow) self._skipped_content_indexes = defaultdict(lambda: defaultdict(set)) self._directories = Table(DirectoryRow) self._directory_entries = Table(DirectoryEntryRow) self._revisions = Table(RevisionRow) self._revision_parents = Table(RevisionParentRow) self._releases = Table(ReleaseRow) self._snapshots = Table(SnapshotRow) self._snapshot_branches = Table(SnapshotBranchRow) self._origins = Table(OriginRow) self._origin_visits = Table(OriginVisitRow) self._origin_visit_statuses = Table(OriginVisitStatusRow) self._metadata_authorities = Table(MetadataAuthorityRow) self._metadata_fetchers = Table(MetadataFetcherRow) self._raw_extrinsic_metadata = Table(RawExtrinsicMetadataRow) self._stat_counters = defaultdict(int) def increment_counter(self, object_type: str, nb: int): self._stat_counters[object_type] += nb def stat_counters(self) -> Iterable[ObjectCountRow]: for (object_type, count) in self._stat_counters.items(): yield ObjectCountRow(partition_key=0, object_type=object_type, count=count) ########################## # 'content' table ########################## def _content_add_finalize(self, content: ContentRow) -> None: self._contents.insert(content) self.increment_counter("content", 1) def content_add_prepare(self, content: ContentRow): finalizer = functools.partial(self._content_add_finalize, content) return (self._contents.token(self._contents.partition_key(content)), finalizer) def content_get_from_pk( self, content_hashes: Dict[str, bytes] ) -> Optional[ContentRow]: primary_key = self._contents.primary_key_from_dict(content_hashes) return self._contents.get_from_primary_key(primary_key) def content_get_from_token(self, token: int) -> Iterable[ContentRow]: return self._contents.get_from_token(token) def content_get_random(self) -> Optional[ContentRow]: return self._contents.get_random() def content_get_token_range( self, start: int, end: int, limit: int, ) -> Iterable[Tuple[int, ContentRow]]: matches = [ (token, row) for (token, partition) in self._contents.data.items() for (clustering_key, row) in partition.items() if start <= token <= end ] matches.sort() return matches[0:limit] ########################## # 'content_by_*' tables ########################## def content_missing_by_sha1_git(self, ids: List[bytes]) -> List[bytes]: missing = [] for id_ in ids: if id_ not in self._content_indexes["sha1_git"]: missing.append(id_) return missing def content_index_add_one(self, algo: str, content: Content, token: int) -> None: self._content_indexes[algo][content.get_hash(algo)].add(token) def content_get_tokens_from_single_hash( self, algo: str, hash_: bytes ) -> Iterable[int]: return self._content_indexes[algo][hash_] ########################## # 'skipped_content' table ########################## def _skipped_content_add_finalize(self, content: SkippedContentRow) -> None: self._skipped_contents.insert(content) self.increment_counter("skipped_content", 1) def skipped_content_add_prepare(self, content: SkippedContentRow): finalizer = functools.partial(self._skipped_content_add_finalize, content) return ( self._skipped_contents.token(self._contents.partition_key(content)), finalizer, ) def skipped_content_get_from_pk( self, content_hashes: Dict[str, bytes] ) -> Optional[SkippedContentRow]: primary_key = self._skipped_contents.primary_key_from_dict(content_hashes) return self._skipped_contents.get_from_primary_key(primary_key) ########################## # 'skipped_content_by_*' tables ########################## def skipped_content_index_add_one( self, algo: str, content: SkippedContent, token: int ) -> None: self._skipped_content_indexes[algo][content.get_hash(algo)].add(token) ########################## # 'directory' table ########################## def directory_missing(self, ids: List[bytes]) -> List[bytes]: missing = [] for id_ in ids: if self._directories.get_from_primary_key((id_,)) is None: missing.append(id_) return missing def directory_add_one(self, directory: DirectoryRow) -> None: self._directories.insert(directory) self.increment_counter("directory", 1) def directory_get_random(self) -> Optional[DirectoryRow]: return self._directories.get_random() ########################## # 'directory_entry' table ########################## def directory_entry_add_one(self, entry: DirectoryEntryRow) -> None: self._directory_entries.insert(entry) def directory_entry_get( self, directory_ids: List[Sha1Git] ) -> Iterable[DirectoryEntryRow]: for id_ in directory_ids: yield from self._directory_entries.get_from_partition_key((id_,)) ########################## # 'revision' table ########################## def revision_missing(self, ids: List[bytes]) -> Iterable[bytes]: missing = [] for id_ in ids: if self._revisions.get_from_primary_key((id_,)) is None: missing.append(id_) return missing def revision_add_one(self, revision: RevisionRow) -> None: self._revisions.insert(revision) self.increment_counter("revision", 1) def revision_get_ids(self, revision_ids) -> Iterable[int]: for id_ in revision_ids: if self._revisions.get_from_primary_key((id_,)) is not None: yield id_ def revision_get(self, revision_ids: List[Sha1Git]) -> Iterable[RevisionRow]: for id_ in revision_ids: row = self._revisions.get_from_primary_key((id_,)) if row: yield row def revision_get_random(self) -> Optional[RevisionRow]: return self._revisions.get_random() ########################## # 'revision_parent' table ########################## def revision_parent_add_one(self, revision_parent: RevisionParentRow) -> None: self._revision_parents.insert(revision_parent) def revision_parent_get(self, revision_id: Sha1Git) -> Iterable[bytes]: for parent in self._revision_parents.get_from_partition_key((revision_id,)): yield parent.parent_id ########################## # 'release' table ########################## def release_missing(self, ids: List[bytes]) -> List[bytes]: missing = [] for id_ in ids: if self._releases.get_from_primary_key((id_,)) is None: missing.append(id_) return missing def release_add_one(self, release: ReleaseRow) -> None: self._releases.insert(release) self.increment_counter("release", 1) def release_get(self, release_ids: List[str]) -> Iterable[ReleaseRow]: for id_ in release_ids: row = self._releases.get_from_primary_key((id_,)) if row: yield row def release_get_random(self) -> Optional[ReleaseRow]: return self._releases.get_random() ########################## # 'snapshot' table ########################## def snapshot_missing(self, ids: List[bytes]) -> List[bytes]: missing = [] for id_ in ids: if self._snapshots.get_from_primary_key((id_,)) is None: missing.append(id_) return missing def snapshot_add_one(self, snapshot: SnapshotRow) -> None: self._snapshots.insert(snapshot) self.increment_counter("snapshot", 1) def snapshot_get_random(self) -> Optional[SnapshotRow]: return self._snapshots.get_random() ########################## # 'snapshot_branch' table ########################## def snapshot_branch_add_one(self, branch: SnapshotBranchRow) -> None: self._snapshot_branches.insert(branch) def snapshot_count_branches(self, snapshot_id: Sha1Git) -> Dict[Optional[str], int]: """Returns a dictionary from type names to the number of branches of that type.""" counts: Dict[Optional[str], int] = defaultdict(int) for branch in self._snapshot_branches.get_from_partition_key((snapshot_id,)): if branch.target_type is None: target_type = None else: target_type = branch.target_type counts[target_type] += 1 return counts def snapshot_branch_get( self, snapshot_id: Sha1Git, from_: bytes, limit: int ) -> Iterable[SnapshotBranchRow]: count = 0 for branch in self._snapshot_branches.get_from_partition_key((snapshot_id,)): if branch.name >= from_: count += 1 yield branch if count >= limit: break ########################## # 'origin' table ########################## def origin_add_one(self, origin: OriginRow) -> None: self._origins.insert(origin) self.increment_counter("origin", 1) def origin_get_by_sha1(self, sha1: bytes) -> Iterable[OriginRow]: return self._origins.get_from_partition_key((sha1,)) def origin_get_by_url(self, url: str) -> Iterable[OriginRow]: return self.origin_get_by_sha1(origin_url_to_sha1(url)) def origin_list( self, start_token: int, limit: int ) -> Iterable[Tuple[int, OriginRow]]: """Returns an iterable of (token, origin)""" matches = [ (token, row) for (token, partition) in self._origins.data.items() for (clustering_key, row) in partition.items() if token >= start_token ] matches.sort() return matches[0:limit] def origin_iter_all(self) -> Iterable[OriginRow]: return ( row for (token, partition) in self._origins.data.items() for (clustering_key, row) in partition.items() ) def origin_generate_unique_visit_id(self, origin_url: str) -> int: origin = list(self.origin_get_by_url(origin_url))[0] visit_id = origin.next_visit_id origin.next_visit_id += 1 return visit_id ########################## # 'origin_visit' table ########################## def origin_visit_get( self, origin_url: str, last_visit: Optional[int], limit: int, order: ListOrder, ) -> Iterable[OriginVisitRow]: visits = list(self._origin_visits.get_from_partition_key((origin_url,))) if last_visit is not None: if order == ListOrder.ASC: visits = [v for v in visits if v.visit > last_visit] else: visits = [v for v in visits if v.visit < last_visit] visits.sort(key=lambda v: v.visit, reverse=order == ListOrder.DESC) visits = visits[0:limit] return visits def origin_visit_add_one(self, visit: OriginVisitRow) -> None: self._origin_visits.insert(visit) self.increment_counter("origin_visit", 1) def origin_visit_get_one( self, origin_url: str, visit_id: int ) -> Optional[OriginVisitRow]: return self._origin_visits.get_from_primary_key((origin_url, visit_id)) def origin_visit_get_all(self, origin_url: str) -> Iterable[OriginVisitRow]: return self._origin_visits.get_from_partition_key((origin_url,)) def origin_visit_iter(self, start_token: int) -> Iterator[OriginVisitRow]: """Returns all origin visits in order from this token, and wraps around the token space.""" return ( row for (token, partition) in self._origin_visits.data.items() for (clustering_key, row) in partition.items() ) ########################## # 'origin_visit_status' table ########################## def origin_visit_status_get_range( self, origin: str, visit: int, date_from: Optional[datetime.datetime], limit: int, order: ListOrder, ) -> Iterable[OriginVisitStatusRow]: statuses = list(self.origin_visit_status_get(origin, visit)) if date_from is not None: if order == ListOrder.ASC: statuses = [s for s in statuses if s.date >= date_from] else: statuses = [s for s in statuses if s.date <= date_from] statuses.sort(key=lambda s: s.date, reverse=order == ListOrder.DESC) return statuses[0:limit] def origin_visit_status_add_one(self, visit_update: OriginVisitStatusRow) -> None: self._origin_visit_statuses.insert(visit_update) self.increment_counter("origin_visit_status", 1) def origin_visit_status_get_latest( self, origin: str, visit: int, ) -> Optional[OriginVisitStatusRow]: """Given an origin visit id, return its latest origin_visit_status """ return next(self.origin_visit_status_get(origin, visit), None) def origin_visit_status_get( self, origin: str, visit: int, ) -> Iterator[OriginVisitStatusRow]: """Return all origin visit statuses for a given visit """ statuses = [ s for s in self._origin_visit_statuses.get_from_partition_key((origin,)) if s.visit == visit ] statuses.sort(key=lambda s: s.date, reverse=True) return iter(statuses) ########################## # 'metadata_authority' table ########################## def metadata_authority_add(self, authority: MetadataAuthorityRow): self._metadata_authorities.insert(authority) self.increment_counter("metadata_authority", 1) def metadata_authority_get(self, type, url) -> Optional[MetadataAuthorityRow]: return self._metadata_authorities.get_from_primary_key((url, type)) ########################## # 'metadata_fetcher' table ########################## def metadata_fetcher_add(self, fetcher: MetadataFetcherRow): self._metadata_fetchers.insert(fetcher) self.increment_counter("metadata_fetcher", 1) def metadata_fetcher_get(self, name, version) -> Optional[MetadataAuthorityRow]: return self._metadata_fetchers.get_from_primary_key((name, version)) ######################### # 'raw_extrinsic_metadata' table ######################### def raw_extrinsic_metadata_add(self, raw_extrinsic_metadata): self._raw_extrinsic_metadata.insert(raw_extrinsic_metadata) self.increment_counter("raw_extrinsic_metadata", 1) def raw_extrinsic_metadata_get_after_date( self, id: str, authority_type: str, authority_url: str, after: datetime.datetime, ) -> Iterable[RawExtrinsicMetadataRow]: metadata = self.raw_extrinsic_metadata_get(id, authority_type, authority_url) return (m for m in metadata if m.discovery_date > after) def raw_extrinsic_metadata_get_after_date_and_fetcher( self, id: str, authority_type: str, authority_url: str, after_date: datetime.datetime, after_fetcher_name: str, after_fetcher_version: str, ) -> Iterable[RawExtrinsicMetadataRow]: metadata = self._raw_extrinsic_metadata.get_from_partition_key((id,)) after_tuple = (after_date, after_fetcher_name, after_fetcher_version) return ( m for m in metadata if m.authority_type == authority_type and m.authority_url == authority_url and (m.discovery_date, m.fetcher_name, m.fetcher_version) > after_tuple ) def raw_extrinsic_metadata_get( self, id: str, authority_type: str, authority_url: str ) -> Iterable[RawExtrinsicMetadataRow]: metadata = self._raw_extrinsic_metadata.get_from_partition_key((id,)) return ( m for m in metadata if m.authority_type == authority_type and m.authority_url == authority_url ) class InMemoryStorage(CassandraStorage): _cql_runner: InMemoryCqlRunner # type: ignore def __init__(self, journal_writer=None): self.reset() self.journal_writer = JournalWriter(journal_writer) def reset(self): self._cql_runner = InMemoryCqlRunner() self.objstorage = ObjStorage({"cls": "memory", "args": {}}) def check_config(self, *, check_write: bool) -> bool: return True diff --git a/swh/storage/postgresql/__init__.py b/swh/storage/postgresql/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/swh/storage/converters.py b/swh/storage/postgresql/converters.py similarity index 97% rename from swh/storage/converters.py rename to swh/storage/postgresql/converters.py index 1fbfa213..a6509456 100644 --- a/swh/storage/converters.py +++ b/swh/storage/postgresql/converters.py @@ -1,330 +1,324 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime from typing import Any, Optional, Dict from swh.core.utils import encode_with_unescape from swh.model.identifiers import parse_swhid from swh.model.model import ( MetadataAuthority, MetadataAuthorityType, MetadataFetcher, MetadataTargetType, ObjectType, Person, RawExtrinsicMetadata, Release, Revision, RevisionType, Timestamp, TimestampWithTimezone, ) -from swh.model.hashutil import MultiHash -from .utils import map_optional +from ..utils import map_optional DEFAULT_AUTHOR = { "fullname": None, "name": None, "email": None, } DEFAULT_DATE = { "timestamp": None, "offset": 0, "neg_utc_offset": None, } def author_to_db(author: Optional[Person]) -> Dict[str, Any]: """Convert a swh-model author to its DB representation. Args: author: a :mod:`swh.model` compatible author Returns: dict: a dictionary with three keys: author, fullname and email """ if author is None: return DEFAULT_AUTHOR return author.to_dict() def db_to_author( fullname: Optional[bytes], name: Optional[bytes], email: Optional[bytes] ) -> Optional[Person]: """Convert the DB representation of an author to a swh-model author. Args: fullname (bytes): the author's fullname name (bytes): the author's name email (bytes): the author's email Returns: a Person object, or None if 'fullname' is None. """ if fullname is None: return None return Person(fullname=fullname, name=name, email=email,) def db_to_git_headers(db_git_headers): ret = [] for key, value in db_git_headers: ret.append([key.encode("utf-8"), encode_with_unescape(value)]) return ret def db_to_date( date: Optional[datetime.datetime], offset: int, neg_utc_offset: Optional[bool] ) -> Optional[TimestampWithTimezone]: """Convert the DB representation of a date to a swh-model compatible date. Args: date: a date pulled out of the database offset: an integer number of minutes representing an UTC offset neg_utc_offset: whether an utc offset is negative Returns: a TimestampWithTimezone, or None if the date is None. """ if date is None: return None if neg_utc_offset is None: # For older versions of the database that were not migrated to schema v160 neg_utc_offset = False return TimestampWithTimezone( timestamp=Timestamp( seconds=int(date.timestamp()), microseconds=date.microsecond, ), offset=offset, negative_utc=neg_utc_offset, ) def date_to_db(ts_with_tz: Optional[TimestampWithTimezone]) -> Dict[str, Any]: """Convert a swh-model date_offset to its DB representation. Args: ts_with_tz: a TimestampWithTimezone object Returns: dict: a dictionary with three keys: - timestamp: a date in ISO format - offset: the UTC offset in minutes - neg_utc_offset: a boolean indicating whether a null offset is negative or positive. """ if ts_with_tz is None: return DEFAULT_DATE ts = ts_with_tz.timestamp timestamp = datetime.datetime.fromtimestamp(ts.seconds, datetime.timezone.utc) timestamp = timestamp.replace(microsecond=ts.microseconds) return { # PostgreSQL supports isoformatted timestamps "timestamp": timestamp.isoformat(), "offset": ts_with_tz.offset, "neg_utc_offset": ts_with_tz.negative_utc, } def revision_to_db(revision: Revision) -> Dict[str, Any]: """Convert a swh-model revision to its database representation. """ author = author_to_db(revision.author) date = date_to_db(revision.date) committer = author_to_db(revision.committer) committer_date = date_to_db(revision.committer_date) return { "id": revision.id, "author_fullname": author["fullname"], "author_name": author["name"], "author_email": author["email"], "date": date["timestamp"], "date_offset": date["offset"], "date_neg_utc_offset": date["neg_utc_offset"], "committer_fullname": committer["fullname"], "committer_name": committer["name"], "committer_email": committer["email"], "committer_date": committer_date["timestamp"], "committer_date_offset": committer_date["offset"], "committer_date_neg_utc_offset": committer_date["neg_utc_offset"], "type": revision.type.value, "directory": revision.directory, "message": revision.message, "metadata": None if revision.metadata is None else dict(revision.metadata), "synthetic": revision.synthetic, "extra_headers": revision.extra_headers, "parents": [ {"id": revision.id, "parent_id": parent, "parent_rank": i,} for i, parent in enumerate(revision.parents) ], } def db_to_revision(db_revision: Dict[str, Any]) -> Optional[Revision]: """Convert a database representation of a revision to its swh-model representation.""" if db_revision["type"] is None: assert all( v is None for (k, v) in db_revision.items() if k not in ("id", "parents") ) return None author = db_to_author( db_revision["author_fullname"], db_revision["author_name"], db_revision["author_email"], ) date = db_to_date( db_revision["date"], db_revision["date_offset"], db_revision["date_neg_utc_offset"], ) committer = db_to_author( db_revision["committer_fullname"], db_revision["committer_name"], db_revision["committer_email"], ) committer_date = db_to_date( db_revision["committer_date"], db_revision["committer_date_offset"], db_revision["committer_date_neg_utc_offset"], ) assert author, "author is None" assert committer, "committer is None" parents = [] if "parents" in db_revision: for parent in db_revision["parents"]: if parent: parents.append(parent) metadata = db_revision["metadata"] extra_headers = db_revision["extra_headers"] if not extra_headers: if metadata and "extra_headers" in metadata: extra_headers = db_to_git_headers(metadata.pop("extra_headers")) else: # For older versions of the database that were not migrated to schema v161 extra_headers = () return Revision( id=db_revision["id"], author=author, date=date, committer=committer, committer_date=committer_date, type=RevisionType(db_revision["type"]), directory=db_revision["directory"], message=db_revision["message"], metadata=metadata, synthetic=db_revision["synthetic"], extra_headers=extra_headers, parents=tuple(parents), ) def release_to_db(release: Release) -> Dict[str, Any]: """Convert a swh-model release to its database representation. """ author = author_to_db(release.author) date = date_to_db(release.date) return { "id": release.id, "author_fullname": author["fullname"], "author_name": author["name"], "author_email": author["email"], "date": date["timestamp"], "date_offset": date["offset"], "date_neg_utc_offset": date["neg_utc_offset"], "name": release.name, "target": release.target, "target_type": release.target_type.value, "comment": release.message, "synthetic": release.synthetic, } def db_to_release(db_release: Dict[str, Any]) -> Optional[Release]: """Convert a database representation of a release to its swh-model representation. """ if db_release["target_type"] is None: assert all(v is None for (k, v) in db_release.items() if k != "id") return None author = db_to_author( db_release["author_fullname"], db_release["author_name"], db_release["author_email"], ) date = db_to_date( db_release["date"], db_release["date_offset"], db_release["date_neg_utc_offset"] ) return Release( author=author, date=date, id=db_release["id"], name=db_release["name"], message=db_release["comment"], synthetic=db_release["synthetic"], target=db_release["target"], target_type=ObjectType(db_release["target_type"]), ) def db_to_raw_extrinsic_metadata(row) -> RawExtrinsicMetadata: type_ = MetadataTargetType(row["raw_extrinsic_metadata.type"]) id_ = row["raw_extrinsic_metadata.id"] if type_ != MetadataTargetType.ORIGIN: id_ = parse_swhid(id_) return RawExtrinsicMetadata( type=type_, id=id_, authority=MetadataAuthority( type=MetadataAuthorityType(row["metadata_authority.type"]), url=row["metadata_authority.url"], ), fetcher=MetadataFetcher( name=row["metadata_fetcher.name"], version=row["metadata_fetcher.version"], ), discovery_date=row["discovery_date"], format=row["format"], metadata=row["raw_extrinsic_metadata.metadata"], origin=row["origin"], visit=row["visit"], snapshot=map_optional(parse_swhid, row["snapshot"]), release=map_optional(parse_swhid, row["release"]), revision=map_optional(parse_swhid, row["revision"]), path=row["path"], directory=map_optional(parse_swhid, row["directory"]), ) - - -def origin_url_to_sha1(origin_url: str) -> bytes: - """Convert an origin URL to a sha1. Encodes URL to utf-8.""" - return MultiHash.from_data(origin_url.encode("utf-8"), {"sha1"}).digest()["sha1"] diff --git a/swh/storage/db.py b/swh/storage/postgresql/db.py similarity index 100% rename from swh/storage/db.py rename to swh/storage/postgresql/db.py diff --git a/swh/storage/storage.py b/swh/storage/postgresql/storage.py similarity index 99% rename from swh/storage/storage.py rename to swh/storage/postgresql/storage.py index 8af3ac68..2bba8503 100644 --- a/swh/storage/storage.py +++ b/swh/storage/postgresql/storage.py @@ -1,1434 +1,1438 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import base64 import contextlib import datetime import itertools from collections import defaultdict from contextlib import contextmanager from typing import ( Any, Counter, Dict, Iterable, List, Optional, Tuple, Union, ) import attr import psycopg2 import psycopg2.pool import psycopg2.errors from swh.core.api.serializers import msgpack_loads, msgpack_dumps +from swh.core.db.common import db_transaction_generator, db_transaction from swh.model.identifiers import SWHID from swh.model.model import ( Content, Directory, Origin, OriginVisit, OriginVisitStatus, Revision, Release, SkippedContent, Sha1, Sha1Git, Snapshot, SnapshotBranch, TargetType, SHA1_SIZE, MetadataAuthority, MetadataAuthorityType, MetadataFetcher, MetadataTargetType, RawExtrinsicMetadata, ) from swh.model.hashutil import DEFAULT_ALGORITHMS, hash_to_bytes, hash_to_hex +from swh.storage.algos import diff +from swh.storage.exc import StorageArgumentException, StorageDBError, HashCollision from swh.storage.interface import ( ListOrder, PagedResult, PartialBranches, VISIT_STATUSES, ) +from swh.storage.metrics import timed, send_metric, process_metrics from swh.storage.objstorage import ObjStorage -from swh.storage.utils import now +from swh.storage.utils import ( + get_partition_bounds_bytes, + extract_collision_hash, + map_optional, + now, +) +from swh.storage.writer import JournalWriter from . import converters -from .common import db_transaction_generator, db_transaction from .db import Db -from .exc import StorageArgumentException, StorageDBError, HashCollision -from .algos import diff -from .metrics import timed, send_metric, process_metrics -from .utils import get_partition_bounds_bytes, extract_collision_hash, map_optional -from .writer import JournalWriter # Max block size of contents to return BULK_BLOCK_CONTENT_LEN_MAX = 10000 EMPTY_SNAPSHOT_ID = hash_to_bytes("1a8893e6a86f444e8be8e7bda6cb34fb1735a00e") """Identifier for the empty snapshot""" VALIDATION_EXCEPTIONS = ( KeyError, TypeError, ValueError, psycopg2.errors.CheckViolation, psycopg2.errors.IntegrityError, psycopg2.errors.InvalidTextRepresentation, psycopg2.errors.NotNullViolation, psycopg2.errors.NumericValueOutOfRange, psycopg2.errors.UndefinedFunction, # (raised on wrong argument typs) ) """Exceptions raised by postgresql when validation of the arguments failed.""" @contextlib.contextmanager def convert_validation_exceptions(): """Catches postgresql errors related to invalid arguments, and re-raises a StorageArgumentException.""" try: yield except tuple(VALIDATION_EXCEPTIONS) as e: raise StorageArgumentException(str(e)) class Storage: """SWH storage proxy, encompassing DB and object storage """ def __init__( self, db, objstorage, min_pool_conns=1, max_pool_conns=10, journal_writer=None ): """ Args: db_conn: either a libpq connection string, or a psycopg2 connection obj_root: path to the root of the object storage """ try: if isinstance(db, psycopg2.extensions.connection): self._pool = None self._db = Db(db) else: self._pool = psycopg2.pool.ThreadedConnectionPool( min_pool_conns, max_pool_conns, db ) self._db = None except psycopg2.OperationalError as e: raise StorageDBError(e) self.journal_writer = JournalWriter(journal_writer) self.objstorage = ObjStorage(objstorage) def get_db(self): if self._db: return self._db else: return Db.from_pool(self._pool) def put_db(self, db): if db is not self._db: db.put_conn() @contextmanager def db(self): db = None try: db = self.get_db() yield db finally: if db: self.put_db(db) @timed @db_transaction() def check_config(self, *, check_write: bool, db=None, cur=None) -> bool: if not self.objstorage.check_config(check_write=check_write): return False # Check permissions on one of the tables if check_write: check = "INSERT" else: check = "SELECT" cur.execute("select has_table_privilege(current_user, 'content', %s)", (check,)) return cur.fetchone()[0] def _content_unique_key(self, hash, db): """Given a hash (tuple or dict), return a unique key from the aggregation of keys. """ keys = db.content_hash_keys if isinstance(hash, tuple): return hash return tuple([hash[k] for k in keys]) def _content_add_metadata(self, db, cur, content): """Add content to the postgresql database but not the object storage. """ # create temporary table for metadata injection db.mktemp("content", cur) db.copy_to( (c.to_dict() for c in content), "tmp_content", db.content_add_keys, cur ) # move metadata in place try: db.content_add_from_temp(cur) except psycopg2.IntegrityError as e: if e.diag.sqlstate == "23505" and e.diag.table_name == "content": message_detail = e.diag.message_detail if message_detail: hash_name, hash_id = extract_collision_hash(message_detail) collision_contents_hashes = [ c.hashes() for c in content if c.get_hash(hash_name) == hash_id ] else: constraint_to_hash_name = { "content_pkey": "sha1", "content_sha1_git_idx": "sha1_git", "content_sha256_idx": "sha256", } hash_name = constraint_to_hash_name.get(e.diag.constraint_name) hash_id = None collision_contents_hashes = None raise HashCollision( hash_name, hash_id, collision_contents_hashes ) from None else: raise @timed @process_metrics def content_add(self, content: List[Content]) -> Dict: ctime = now() contents = [attr.evolve(c, ctime=ctime) for c in content] objstorage_summary = self.objstorage.content_add(contents) with self.db() as db: with db.transaction() as cur: missing = list( self.content_missing( map(Content.to_dict, contents), key_hash="sha1_git", db=db, cur=cur, ) ) contents = [c for c in contents if c.sha1_git in missing] self.journal_writer.content_add(contents) self._content_add_metadata(db, cur, contents) return { "content:add": len(contents), "content:add:bytes": objstorage_summary["content:add:bytes"], } @timed @db_transaction() def content_update( self, contents: List[Dict[str, Any]], keys: List[str] = [], db=None, cur=None ) -> None: # TODO: Add a check on input keys. How to properly implement # this? We don't know yet the new columns. self.journal_writer.content_update(contents) db.mktemp("content", cur) select_keys = list(set(db.content_get_metadata_keys).union(set(keys))) with convert_validation_exceptions(): db.copy_to(contents, "tmp_content", select_keys, cur) db.content_update_from_temp(keys_to_update=keys, cur=cur) @timed @process_metrics @db_transaction() def content_add_metadata(self, content: List[Content], db=None, cur=None) -> Dict: missing = self.content_missing( (c.to_dict() for c in content), key_hash="sha1_git", db=db, cur=cur, ) contents = [c for c in content if c.sha1_git in missing] self.journal_writer.content_add_metadata(contents) self._content_add_metadata(db, cur, contents) return { "content:add": len(contents), } @timed def content_get_data(self, content: Sha1) -> Optional[bytes]: # FIXME: Make this method support slicing the `data` return self.objstorage.content_get(content) @timed @db_transaction() def content_get_partition( self, partition_id: int, nb_partitions: int, page_token: Optional[str] = None, limit: int = 1000, db=None, cur=None, ) -> PagedResult[Content]: if limit is None: raise StorageArgumentException("limit should not be None") (start, end) = get_partition_bounds_bytes( partition_id, nb_partitions, SHA1_SIZE ) if page_token: start = hash_to_bytes(page_token) if end is None: end = b"\xff" * SHA1_SIZE next_page_token: Optional[str] = None contents = [] for counter, row in enumerate(db.content_get_range(start, end, limit + 1, cur)): row_d = dict(zip(db.content_get_metadata_keys, row)) content = Content(**row_d) if counter >= limit: # take the last content for the next page starting from this next_page_token = hash_to_hex(content.sha1) break contents.append(content) assert len(contents) <= limit return PagedResult(results=contents, next_page_token=next_page_token) @timed @db_transaction(statement_timeout=500) def content_get( self, contents: List[Sha1], db=None, cur=None ) -> List[Optional[Content]]: contents_by_sha1: Dict[Sha1, Optional[Content]] = {} for row in db.content_get_metadata_from_sha1s(contents, cur): row_d = dict(zip(db.content_get_metadata_keys, row)) content = Content(**row_d) contents_by_sha1[content.sha1] = content return [contents_by_sha1.get(sha1) for sha1 in contents] @timed @db_transaction_generator() def content_missing( self, contents: List[Dict[str, Any]], key_hash: str = "sha1", db=None, cur=None ) -> Iterable[bytes]: if key_hash not in DEFAULT_ALGORITHMS: raise StorageArgumentException( "key_hash should be one of {','.join(DEFAULT_ALGORITHMS)}" ) keys = db.content_hash_keys key_hash_idx = keys.index(key_hash) for obj in db.content_missing_from_list(contents, cur): yield obj[key_hash_idx] @timed @db_transaction_generator() def content_missing_per_sha1( self, contents: List[bytes], db=None, cur=None ) -> Iterable[bytes]: for obj in db.content_missing_per_sha1(contents, cur): yield obj[0] @timed @db_transaction_generator() def content_missing_per_sha1_git( self, contents: List[bytes], db=None, cur=None ) -> Iterable[Sha1Git]: for obj in db.content_missing_per_sha1_git(contents, cur): yield obj[0] @timed @db_transaction() def content_find(self, content: Dict[str, Any], db=None, cur=None) -> List[Content]: if not set(content).intersection(DEFAULT_ALGORITHMS): raise StorageArgumentException( "content keys must contain at least one " f"of: {', '.join(sorted(DEFAULT_ALGORITHMS))}" ) rows = db.content_find( sha1=content.get("sha1"), sha1_git=content.get("sha1_git"), sha256=content.get("sha256"), blake2s256=content.get("blake2s256"), cur=cur, ) contents = [] for row in rows: row_d = dict(zip(db.content_find_cols, row)) contents.append(Content(**row_d)) return contents @timed @db_transaction() def content_get_random(self, db=None, cur=None) -> Sha1Git: return db.content_get_random(cur) @staticmethod def _skipped_content_normalize(d): d = d.copy() if d.get("status") is None: d["status"] = "absent" if d.get("length") is None: d["length"] = -1 return d def _skipped_content_add_metadata(self, db, cur, content: List[SkippedContent]): origin_ids = db.origin_id_get_by_url([cont.origin for cont in content], cur=cur) content = [ attr.evolve(c, origin=origin_id) for (c, origin_id) in zip(content, origin_ids) ] db.mktemp("skipped_content", cur) db.copy_to( [c.to_dict() for c in content], "tmp_skipped_content", db.skipped_content_keys, cur, ) # move metadata in place db.skipped_content_add_from_temp(cur) @timed @process_metrics @db_transaction() def skipped_content_add( self, content: List[SkippedContent], db=None, cur=None ) -> Dict: ctime = now() content = [attr.evolve(c, ctime=ctime) for c in content] missing_contents = self.skipped_content_missing( (c.to_dict() for c in content), db=db, cur=cur, ) content = [ c for c in content if any( all( c.get_hash(algo) == missing_content.get(algo) for algo in DEFAULT_ALGORITHMS ) for missing_content in missing_contents ) ] self.journal_writer.skipped_content_add(content) self._skipped_content_add_metadata(db, cur, content) return { "skipped_content:add": len(content), } @timed @db_transaction_generator() def skipped_content_missing( self, contents: List[Dict[str, Any]], db=None, cur=None ) -> Iterable[Dict[str, Any]]: contents = list(contents) for content in db.skipped_content_missing(contents, cur): yield dict(zip(db.content_hash_keys, content)) @timed @process_metrics @db_transaction() def directory_add(self, directories: List[Directory], db=None, cur=None) -> Dict: summary = {"directory:add": 0} dirs = set() dir_entries: Dict[str, defaultdict] = { "file": defaultdict(list), "dir": defaultdict(list), "rev": defaultdict(list), } for cur_dir in directories: dir_id = cur_dir.id dirs.add(dir_id) for src_entry in cur_dir.entries: entry = src_entry.to_dict() entry["dir_id"] = dir_id dir_entries[entry["type"]][dir_id].append(entry) dirs_missing = set(self.directory_missing(dirs, db=db, cur=cur)) if not dirs_missing: return summary self.journal_writer.directory_add( dir_ for dir_ in directories if dir_.id in dirs_missing ) # Copy directory ids dirs_missing_dict = ({"id": dir} for dir in dirs_missing) db.mktemp("directory", cur) db.copy_to(dirs_missing_dict, "tmp_directory", ["id"], cur) # Copy entries for entry_type, entry_list in dir_entries.items(): entries = itertools.chain.from_iterable( entries_for_dir for dir_id, entries_for_dir in entry_list.items() if dir_id in dirs_missing ) db.mktemp_dir_entry(entry_type) db.copy_to( entries, "tmp_directory_entry_%s" % entry_type, ["target", "name", "perms", "dir_id"], cur, ) # Do the final copy db.directory_add_from_temp(cur) summary["directory:add"] = len(dirs_missing) return summary @timed @db_transaction_generator() def directory_missing( self, directories: List[Sha1Git], db=None, cur=None ) -> Iterable[Sha1Git]: for obj in db.directory_missing_from_list(directories, cur): yield obj[0] @timed @db_transaction_generator(statement_timeout=20000) def directory_ls( self, directory: Sha1Git, recursive: bool = False, db=None, cur=None ) -> Iterable[Dict[str, Any]]: if recursive: res_gen = db.directory_walk(directory, cur=cur) else: res_gen = db.directory_walk_one(directory, cur=cur) for line in res_gen: yield dict(zip(db.directory_ls_cols, line)) @timed @db_transaction(statement_timeout=2000) def directory_entry_get_by_path( self, directory: Sha1Git, paths: List[bytes], db=None, cur=None ) -> Optional[Dict[str, Any]]: res = db.directory_entry_get_by_path(directory, paths, cur) return dict(zip(db.directory_ls_cols, res)) if res else None @timed @db_transaction() def directory_get_random(self, db=None, cur=None) -> Sha1Git: return db.directory_get_random(cur) @timed @process_metrics @db_transaction() def revision_add(self, revisions: List[Revision], db=None, cur=None) -> Dict: summary = {"revision:add": 0} revisions_missing = set( self.revision_missing( set(revision.id for revision in revisions), db=db, cur=cur ) ) if not revisions_missing: return summary db.mktemp_revision(cur) revisions_filtered = [ revision for revision in revisions if revision.id in revisions_missing ] self.journal_writer.revision_add(revisions_filtered) db_revisions_filtered = list(map(converters.revision_to_db, revisions_filtered)) parents_filtered: List[bytes] = [] with convert_validation_exceptions(): db.copy_to( db_revisions_filtered, "tmp_revision", db.revision_add_cols, cur, lambda rev: parents_filtered.extend(rev["parents"]), ) db.revision_add_from_temp(cur) db.copy_to( parents_filtered, "revision_history", ["id", "parent_id", "parent_rank"], cur, ) return {"revision:add": len(revisions_missing)} @timed @db_transaction_generator() def revision_missing( self, revisions: List[Sha1Git], db=None, cur=None ) -> Iterable[Sha1Git]: if not revisions: return None for obj in db.revision_missing_from_list(revisions, cur): yield obj[0] @timed @db_transaction_generator(statement_timeout=1000) def revision_get( self, revisions: List[Sha1Git], db=None, cur=None ) -> Iterable[Optional[Dict[str, Any]]]: for line in db.revision_get_from_list(revisions, cur): data = converters.db_to_revision(dict(zip(db.revision_get_cols, line))) if not data: yield None continue yield data.to_dict() @timed @db_transaction_generator(statement_timeout=2000) def revision_log( self, revisions: List[Sha1Git], limit: Optional[int] = None, db=None, cur=None ) -> Iterable[Optional[Dict[str, Any]]]: for line in db.revision_log(revisions, limit, cur): data = converters.db_to_revision(dict(zip(db.revision_get_cols, line))) if not data: yield None continue yield data.to_dict() @timed @db_transaction_generator(statement_timeout=2000) def revision_shortlog( self, revisions: List[Sha1Git], limit: Optional[int] = None, db=None, cur=None ) -> Iterable[Optional[Tuple[Sha1Git, Tuple[Sha1Git, ...]]]]: yield from db.revision_shortlog(revisions, limit, cur) @timed @db_transaction() def revision_get_random(self, db=None, cur=None) -> Sha1Git: return db.revision_get_random(cur) @timed @process_metrics @db_transaction() def release_add(self, releases: List[Release], db=None, cur=None) -> Dict: summary = {"release:add": 0} release_ids = set(release.id for release in releases) releases_missing = set(self.release_missing(release_ids, db=db, cur=cur)) if not releases_missing: return summary db.mktemp_release(cur) releases_filtered = [ release for release in releases if release.id in releases_missing ] self.journal_writer.release_add(releases_filtered) db_releases_filtered = list(map(converters.release_to_db, releases_filtered)) with convert_validation_exceptions(): db.copy_to(db_releases_filtered, "tmp_release", db.release_add_cols, cur) db.release_add_from_temp(cur) return {"release:add": len(releases_missing)} @timed @db_transaction_generator() def release_missing( self, releases: List[Sha1Git], db=None, cur=None ) -> Iterable[Sha1Git]: if not releases: return for obj in db.release_missing_from_list(releases, cur): yield obj[0] @timed @db_transaction_generator(statement_timeout=500) def release_get( self, releases: List[Sha1Git], db=None, cur=None ) -> Iterable[Optional[Dict[str, Any]]]: for release in db.release_get_from_list(releases, cur): data = converters.db_to_release(dict(zip(db.release_get_cols, release))) yield data.to_dict() if data else None @timed @db_transaction() def release_get_random(self, db=None, cur=None) -> Sha1Git: return db.release_get_random(cur) @timed @process_metrics @db_transaction() def snapshot_add(self, snapshots: List[Snapshot], db=None, cur=None) -> Dict: created_temp_table = False count = 0 for snapshot in snapshots: if not db.snapshot_exists(snapshot.id, cur): if not created_temp_table: db.mktemp_snapshot_branch(cur) created_temp_table = True with convert_validation_exceptions(): db.copy_to( ( { "name": name, "target": info.target if info else None, "target_type": ( info.target_type.value if info else None ), } for name, info in snapshot.branches.items() ), "tmp_snapshot_branch", ["name", "target", "target_type"], cur, ) self.journal_writer.snapshot_add([snapshot]) db.snapshot_add(snapshot.id, cur) count += 1 return {"snapshot:add": count} @timed @db_transaction_generator() def snapshot_missing( self, snapshots: List[Sha1Git], db=None, cur=None ) -> Iterable[Sha1Git]: for obj in db.snapshot_missing_from_list(snapshots, cur): yield obj[0] @timed @db_transaction(statement_timeout=2000) def snapshot_get( self, snapshot_id: Sha1Git, db=None, cur=None ) -> Optional[Dict[str, Any]]: d = self.snapshot_get_branches(snapshot_id) if d is None: return d return { "id": d["id"], "branches": { name: branch.to_dict() if branch else None for (name, branch) in d["branches"].items() }, "next_branch": d["next_branch"], } @timed @db_transaction(statement_timeout=2000) def snapshot_count_branches( self, snapshot_id: Sha1Git, db=None, cur=None ) -> Optional[Dict[Optional[str], int]]: return dict([bc for bc in db.snapshot_count_branches(snapshot_id, cur)]) @timed @db_transaction(statement_timeout=2000) def snapshot_get_branches( self, snapshot_id: Sha1Git, branches_from: bytes = b"", branches_count: int = 1000, target_types: Optional[List[str]] = None, db=None, cur=None, ) -> Optional[PartialBranches]: if snapshot_id == EMPTY_SNAPSHOT_ID: return PartialBranches(id=snapshot_id, branches={}, next_branch=None,) branches = {} next_branch = None fetched_branches = list( db.snapshot_get_by_id( snapshot_id, branches_from=branches_from, branches_count=branches_count + 1, target_types=target_types, cur=cur, ) ) for row in fetched_branches[:branches_count]: branch_d = dict(zip(db.snapshot_get_cols, row)) del branch_d["snapshot_id"] name = branch_d.pop("name") if branch_d["target"] is None and branch_d["target_type"] is None: branch = None else: assert branch_d["target_type"] is not None branch = SnapshotBranch( target=branch_d["target"], target_type=TargetType(branch_d["target_type"]), ) branches[name] = branch if len(fetched_branches) > branches_count: next_branch = dict(zip(db.snapshot_get_cols, fetched_branches[-1]))["name"] if branches: return PartialBranches( id=snapshot_id, branches=branches, next_branch=next_branch, ) return None @timed @db_transaction() def snapshot_get_random(self, db=None, cur=None) -> Sha1Git: return db.snapshot_get_random(cur) @timed @db_transaction() def origin_visit_add( self, visits: List[OriginVisit], db=None, cur=None ) -> Iterable[OriginVisit]: for visit in visits: origin = self.origin_get([visit.origin], db=db, cur=cur)[0] if not origin: # Cannot add a visit without an origin raise StorageArgumentException("Unknown origin %s", visit.origin) all_visits = [] nb_visits = 0 for visit in visits: nb_visits += 1 if not visit.visit: with convert_validation_exceptions(): visit_id = db.origin_visit_add( visit.origin, visit.date, visit.type, cur=cur ) visit = attr.evolve(visit, visit=visit_id) else: db.origin_visit_add_with_id(visit, cur=cur) assert visit.visit is not None all_visits.append(visit) # Forced to write after for the case when the visit has no id self.journal_writer.origin_visit_add([visit]) visit_status = OriginVisitStatus( origin=visit.origin, visit=visit.visit, date=visit.date, status="created", snapshot=None, ) self._origin_visit_status_add(visit_status, db=db, cur=cur) send_metric("origin_visit:add", count=nb_visits, method_name="origin_visit") return all_visits def _origin_visit_status_add( self, visit_status: OriginVisitStatus, db, cur ) -> None: """Add an origin visit status""" self.journal_writer.origin_visit_status_add([visit_status]) db.origin_visit_status_add(visit_status, cur=cur) send_metric( "origin_visit_status:add", count=1, method_name="origin_visit_status" ) @timed @db_transaction() def origin_visit_status_add( self, visit_statuses: List[OriginVisitStatus], db=None, cur=None, ) -> None: # First round to check existence (fail early if any is ko) for visit_status in visit_statuses: origin_url = self.origin_get([visit_status.origin], db=db, cur=cur)[0] if not origin_url: raise StorageArgumentException(f"Unknown origin {visit_status.origin}") for visit_status in visit_statuses: self._origin_visit_status_add(visit_status, db, cur) @timed @db_transaction() def origin_visit_status_get_latest( self, origin_url: str, visit: int, allowed_statuses: Optional[List[str]] = None, require_snapshot: bool = False, db=None, cur=None, ) -> Optional[OriginVisitStatus]: if allowed_statuses and not set(allowed_statuses).intersection(VISIT_STATUSES): raise StorageArgumentException( f"Unknown allowed statuses {','.join(allowed_statuses)}, only " f"{','.join(VISIT_STATUSES)} authorized" ) row = db.origin_visit_status_get_latest( origin_url, visit, allowed_statuses, require_snapshot, cur=cur ) if not row: return None return OriginVisitStatus.from_dict(row) @timed @db_transaction(statement_timeout=500) def origin_visit_get( self, origin: str, page_token: Optional[str] = None, order: ListOrder = ListOrder.ASC, limit: int = 10, db=None, cur=None, ) -> PagedResult[OriginVisit]: page_token = page_token or "0" if not isinstance(order, ListOrder): raise StorageArgumentException("order must be a ListOrder value") if not isinstance(page_token, str): raise StorageArgumentException("page_token must be a string.") next_page_token = None visit_from = int(page_token) visits: List[OriginVisit] = [] extra_limit = limit + 1 for row in db.origin_visit_get_range( origin, visit_from=visit_from, order=order, limit=extra_limit, cur=cur ): row_d = dict(zip(db.origin_visit_cols, row)) visits.append( OriginVisit( origin=row_d["origin"], visit=row_d["visit"], date=row_d["date"], type=row_d["type"], ) ) assert len(visits) <= extra_limit if len(visits) == extra_limit: visits = visits[:limit] next_page_token = str(visits[-1].visit) return PagedResult(results=visits, next_page_token=next_page_token) @timed @db_transaction(statement_timeout=500) def origin_visit_find_by_date( self, origin: str, visit_date: datetime.datetime, db=None, cur=None ) -> Optional[OriginVisit]: row_d = db.origin_visit_find_by_date(origin, visit_date, cur=cur) if not row_d: return None return OriginVisit( origin=row_d["origin"], visit=row_d["visit"], date=row_d["date"], type=row_d["type"], ) @timed @db_transaction(statement_timeout=500) def origin_visit_get_by( self, origin: str, visit: int, db=None, cur=None ) -> Optional[OriginVisit]: row = db.origin_visit_get(origin, visit, cur) if row: row_d = dict(zip(db.origin_visit_get_cols, row)) return OriginVisit( origin=row_d["origin"], visit=row_d["visit"], date=row_d["date"], type=row_d["type"], ) return None @timed @db_transaction(statement_timeout=4000) def origin_visit_get_latest( self, origin: str, type: Optional[str] = None, allowed_statuses: Optional[List[str]] = None, require_snapshot: bool = False, db=None, cur=None, ) -> Optional[OriginVisit]: if allowed_statuses and not set(allowed_statuses).intersection(VISIT_STATUSES): raise StorageArgumentException( f"Unknown allowed statuses {','.join(allowed_statuses)}, only " f"{','.join(VISIT_STATUSES)} authorized" ) row = db.origin_visit_get_latest( origin, type=type, allowed_statuses=allowed_statuses, require_snapshot=require_snapshot, cur=cur, ) if row: row_d = dict(zip(db.origin_visit_get_cols, row)) visit = OriginVisit( origin=row_d["origin"], visit=row_d["visit"], date=row_d["date"], type=row_d["type"], ) return visit return None @timed @db_transaction(statement_timeout=500) def origin_visit_status_get( self, origin: str, visit: int, page_token: Optional[str] = None, order: ListOrder = ListOrder.ASC, limit: int = 10, db=None, cur=None, ) -> PagedResult[OriginVisitStatus]: next_page_token = None date_from = None if page_token is not None: date_from = datetime.datetime.fromisoformat(page_token) visit_statuses: List[OriginVisitStatus] = [] # Take one more visit status so we can reuse it as the next page token if any for row in db.origin_visit_status_get_range( origin, visit, date_from=date_from, order=order, limit=limit + 1, cur=cur, ): row_d = dict(zip(db.origin_visit_status_cols, row)) visit_statuses.append( OriginVisitStatus( origin=row_d["origin"], visit=row_d["visit"], date=row_d["date"], status=row_d["status"], snapshot=row_d["snapshot"], metadata=row_d["metadata"], ) ) if len(visit_statuses) > limit: # last visit status date is the next page token next_page_token = str(visit_statuses[-1].date) # excluding that visit status from the result to respect the limit size visit_statuses = visit_statuses[:limit] return PagedResult(results=visit_statuses, next_page_token=next_page_token) @timed @db_transaction() def origin_visit_status_get_random( self, type: str, db=None, cur=None ) -> Optional[Tuple[OriginVisit, OriginVisitStatus]]: row = db.origin_visit_get_random(type, cur) if row is not None: row_d = dict(zip(db.origin_visit_get_cols, row)) visit = OriginVisit( origin=row_d["origin"], visit=row_d["visit"], date=row_d["date"], type=row_d["type"], ) visit_status = OriginVisitStatus( origin=row_d["origin"], visit=row_d["visit"], date=row_d["date"], status=row_d["status"], metadata=row_d["metadata"], snapshot=row_d["snapshot"], ) return visit, visit_status return None @timed @db_transaction(statement_timeout=2000) def object_find_by_sha1_git( self, ids: List[Sha1Git], db=None, cur=None ) -> Dict[Sha1Git, List[Dict]]: ret: Dict[Sha1Git, List[Dict]] = {id: [] for id in ids} for retval in db.object_find_by_sha1_git(ids, cur=cur): if retval[1]: ret[retval[0]].append( dict(zip(db.object_find_by_sha1_git_cols, retval)) ) return ret @timed @db_transaction(statement_timeout=500) def origin_get( self, origins: List[str], db=None, cur=None ) -> Iterable[Optional[Origin]]: rows = db.origin_get_by_url(origins, cur) result: List[Optional[Origin]] = [] for row in rows: origin_d = dict(zip(db.origin_cols, row)) url = origin_d["url"] result.append(None if url is None else Origin(url=url)) return result @timed @db_transaction(statement_timeout=500) def origin_get_by_sha1( self, sha1s: List[bytes], db=None, cur=None ) -> List[Optional[Dict[str, Any]]]: return [ dict(zip(db.origin_cols, row)) if row[0] else None for row in db.origin_get_by_sha1(sha1s, cur) ] @timed @db_transaction_generator() def origin_get_range(self, origin_from=1, origin_count=100, db=None, cur=None): for origin in db.origin_get_range(origin_from, origin_count, cur): yield dict(zip(db.origin_get_range_cols, origin)) @timed @db_transaction() def origin_list( self, page_token: Optional[str] = None, limit: int = 100, *, db=None, cur=None ) -> PagedResult[Origin]: page_token = page_token or "0" if not isinstance(page_token, str): raise StorageArgumentException("page_token must be a string.") origin_from = int(page_token) next_page_token = None origins: List[Origin] = [] # Take one more origin so we can reuse it as the next page token if any for row_d in self.origin_get_range(origin_from, limit + 1, db=db, cur=cur): origins.append(Origin(url=row_d["url"])) # keep the last_id for the pagination if needed last_id = row_d["id"] if len(origins) > limit: # data left for subsequent call # last origin id is the next page token next_page_token = str(last_id) # excluding that origin from the result to respect the limit size origins = origins[:limit] assert len(origins) <= limit return PagedResult(results=origins, next_page_token=next_page_token) @timed @db_transaction() def origin_search( self, url_pattern: str, page_token: Optional[str] = None, limit: int = 50, regexp: bool = False, with_visit: bool = False, db=None, cur=None, ) -> PagedResult[Origin]: next_page_token = None offset = int(page_token) if page_token else 0 origins = [] # Take one more origin so we can reuse it as the next page token if any for origin in db.origin_search( url_pattern, offset, limit + 1, regexp, with_visit, cur ): row_d = dict(zip(db.origin_cols, origin)) origins.append(Origin(url=row_d["url"])) if len(origins) > limit: # next offset next_page_token = str(offset + limit) # excluding that origin from the result to respect the limit size origins = origins[:limit] assert len(origins) <= limit return PagedResult(results=origins, next_page_token=next_page_token) @timed @db_transaction() def origin_count( self, url_pattern: str, regexp: bool = False, with_visit: bool = False, db=None, cur=None, ) -> int: return db.origin_count(url_pattern, regexp, with_visit, cur) @timed @process_metrics @db_transaction() def origin_add(self, origins: List[Origin], db=None, cur=None) -> Dict[str, int]: urls = [o.url for o in origins] known_origins = set(url for (url,) in db.origin_get_by_url(urls, cur)) # use lists here to keep origins sorted; some tests depend on this to_add = [url for url in urls if url not in known_origins] self.journal_writer.origin_add([Origin(url=url) for url in to_add]) added = 0 for url in to_add: if db.origin_add(url, cur): added += 1 return {"origin:add": added} @db_transaction(statement_timeout=500) def stat_counters(self, db=None, cur=None): return {k: v for (k, v) in db.stat_counters()} @db_transaction() def refresh_stat_counters(self, db=None, cur=None): keys = [ "content", "directory", "directory_entry_dir", "directory_entry_file", "directory_entry_rev", "origin", "origin_visit", "person", "release", "revision", "revision_history", "skipped_content", "snapshot", ] for key in keys: cur.execute("select * from swh_update_counter(%s)", (key,)) @db_transaction() def raw_extrinsic_metadata_add( self, metadata: List[RawExtrinsicMetadata], db, cur, ) -> None: metadata = list(metadata) self.journal_writer.raw_extrinsic_metadata_add(metadata) counter = Counter[MetadataTargetType]() for metadata_entry in metadata: authority_id = self._get_authority_id(metadata_entry.authority, db, cur) fetcher_id = self._get_fetcher_id(metadata_entry.fetcher, db, cur) db.raw_extrinsic_metadata_add( type=metadata_entry.type.value, id=str(metadata_entry.id), discovery_date=metadata_entry.discovery_date, authority_id=authority_id, fetcher_id=fetcher_id, format=metadata_entry.format, metadata=metadata_entry.metadata, origin=metadata_entry.origin, visit=metadata_entry.visit, snapshot=map_optional(str, metadata_entry.snapshot), release=map_optional(str, metadata_entry.release), revision=map_optional(str, metadata_entry.revision), path=metadata_entry.path, directory=map_optional(str, metadata_entry.directory), cur=cur, ) counter[metadata_entry.type] += 1 for (type, count) in counter.items(): send_metric( f"{type.value}_metadata:add", count=count, method_name=f"{type.value}_metadata_add", ) @db_transaction() def raw_extrinsic_metadata_get( self, type: MetadataTargetType, id: Union[str, SWHID], authority: MetadataAuthority, after: Optional[datetime.datetime] = None, page_token: Optional[bytes] = None, limit: int = 1000, db=None, cur=None, ) -> PagedResult[RawExtrinsicMetadata]: if type == MetadataTargetType.ORIGIN: if isinstance(id, SWHID): raise StorageArgumentException( f"raw_extrinsic_metadata_get called with type='origin', " f"but provided id is an SWHID: {id!r}" ) else: if not isinstance(id, SWHID): raise StorageArgumentException( f"raw_extrinsic_metadata_get called with type!='origin', " f"but provided id is not an SWHID: {id!r}" ) if page_token: (after_time, after_fetcher) = msgpack_loads(base64.b64decode(page_token)) if after and after_time < after: raise StorageArgumentException( "page_token is inconsistent with the value of 'after'." ) else: after_time = after after_fetcher = None authority_id = self._get_authority_id(authority, db, cur) if not authority_id: return PagedResult(next_page_token=None, results=[],) rows = db.raw_extrinsic_metadata_get( type, str(id), authority_id, after_time, after_fetcher, limit + 1, cur, ) rows = [dict(zip(db.raw_extrinsic_metadata_get_cols, row)) for row in rows] results = [] for row in rows: assert str(id) == row["raw_extrinsic_metadata.id"] results.append(converters.db_to_raw_extrinsic_metadata(row)) if len(results) > limit: results.pop() assert len(results) == limit last_returned_row = rows[-2] # rows[-1] corresponds to the popped result next_page_token: Optional[str] = base64.b64encode( msgpack_dumps( ( last_returned_row["discovery_date"], last_returned_row["metadata_fetcher.id"], ) ) ).decode() else: next_page_token = None return PagedResult(next_page_token=next_page_token, results=results,) @timed @db_transaction() def metadata_fetcher_add( self, fetchers: List[MetadataFetcher], db=None, cur=None ) -> None: fetchers = list(fetchers) self.journal_writer.metadata_fetcher_add(fetchers) count = 0 for fetcher in fetchers: if fetcher.metadata is None: raise StorageArgumentException( "MetadataFetcher.metadata may not be None in metadata_fetcher_add." ) db.metadata_fetcher_add( fetcher.name, fetcher.version, dict(fetcher.metadata), cur=cur ) count += 1 send_metric("metadata_fetcher:add", count=count, method_name="metadata_fetcher") @timed @db_transaction(statement_timeout=500) def metadata_fetcher_get( self, name: str, version: str, db=None, cur=None ) -> Optional[MetadataFetcher]: row = db.metadata_fetcher_get(name, version, cur=cur) if not row: return None return MetadataFetcher.from_dict(dict(zip(db.metadata_fetcher_cols, row))) @timed @db_transaction() def metadata_authority_add( self, authorities: List[MetadataAuthority], db=None, cur=None ) -> None: authorities = list(authorities) self.journal_writer.metadata_authority_add(authorities) count = 0 for authority in authorities: if authority.metadata is None: raise StorageArgumentException( "MetadataAuthority.metadata may not be None in " "metadata_authority_add." ) db.metadata_authority_add( authority.type.value, authority.url, dict(authority.metadata), cur=cur ) count += 1 send_metric( "metadata_authority:add", count=count, method_name="metadata_authority" ) @timed @db_transaction() def metadata_authority_get( self, type: MetadataAuthorityType, url: str, db=None, cur=None ) -> Optional[MetadataAuthority]: row = db.metadata_authority_get(type.value, url, cur=cur) if not row: return None return MetadataAuthority.from_dict(dict(zip(db.metadata_authority_cols, row))) @timed def diff_directories(self, from_dir, to_dir, track_renaming=False): return diff.diff_directories(self, from_dir, to_dir, track_renaming) @timed def diff_revisions(self, from_rev, to_rev, track_renaming=False): return diff.diff_revisions(self, from_rev, to_rev, track_renaming) @timed def diff_revision(self, revision, track_renaming=False): return diff.diff_revision(self, revision, track_renaming) def clear_buffers(self, object_types: Optional[List[str]] = None) -> None: """Do nothing """ return None def flush(self, object_types: Optional[List[str]] = None) -> Dict: return {} def _get_authority_id(self, authority: MetadataAuthority, db, cur): authority_id = db.metadata_authority_get_id( authority.type.value, authority.url, cur ) if not authority_id: raise StorageArgumentException(f"Unknown authority {authority}") return authority_id def _get_fetcher_id(self, fetcher: MetadataFetcher, db, cur): fetcher_id = db.metadata_fetcher_get_id(fetcher.name, fetcher.version, cur) if not fetcher_id: raise StorageArgumentException(f"Unknown fetcher {fetcher}") return fetcher_id diff --git a/swh/storage/tests/algos/test_revisions_walker.py b/swh/storage/tests/algos/test_revisions_walker.py index edd20780..c8ba0477 100644 --- a/swh/storage/tests/algos/test_revisions_walker.py +++ b/swh/storage/tests/algos/test_revisions_walker.py @@ -1,511 +1,511 @@ # Copyright (C) 2018-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import unittest from unittest.mock import patch from swh.model.hashutil import hash_to_bytes, hash_to_hex from swh.storage.algos.revisions_walker import get_revisions_walker # For those tests, we will walk the following revisions history # with different orderings: # # * commit b364f53155044e5308a0f73abb3b5f01995a5b7d # |\ Merge: 836d498 b94886c # | | Author: Adam # | | AuthorDate: Fri Oct 4 12:50:49 2013 +0200 # | | Commit: Adam # | | CommitDate: Fri Oct 4 12:50:49 2013 +0200 # | | # | | Merge branch 'release/1.0' # | | # | * commit b94886c500c46e32dc3d7ebae8a5409accd592e5 # | | Author: Adam # | | AuthorDate: Fri Oct 4 12:50:38 2013 +0200 # | | Commit: Adam # | | CommitDate: Fri Oct 4 12:50:38 2013 +0200 # | | # | | updating poms for 1.0 release # | | # | * commit 0cb6b4611d65bee0f57821dac7f611e2f8a02433 # | | Author: Adam # | | AuthorDate: Fri Oct 4 12:50:38 2013 +0200 # | | Commit: Adam # | | CommitDate: Fri Oct 4 12:50:38 2013 +0200 # | | # | | updating poms for 1.0 release # | | # | * commit 2b0240c6d682bad51532eec15b8a7ed6b75c8d31 # | | Author: Adam Janicki # | | AuthorDate: Fri Oct 4 12:50:22 2013 +0200 # | | Commit: Adam Janicki # | | CommitDate: Fri Oct 4 12:50:22 2013 +0200 # | | # | | For 1.0 release. Allow untracked. # | | # | * commit b401c50863475db4440c85c10ac0b6423b61554d # | | Author: Adam # | | AuthorDate: Fri Oct 4 12:48:12 2013 +0200 # | | Commit: Adam # | | CommitDate: Fri Oct 4 12:48:12 2013 +0200 # | | # | | updating poms for 1.0 release # | | # | * commit 9c5051397e5c2e0c258bb639c3dd34406584ca10 # |/ Author: Adam Janicki # | AuthorDate: Fri Oct 4 12:47:48 2013 +0200 # | Commit: Adam Janicki # | CommitDate: Fri Oct 4 12:47:48 2013 +0200 # | # | For 1.0 release. # | # * commit 836d498396fb9b5d45c896885f84d8d60a5651dc # | Author: Adam Janicki # | AuthorDate: Fri Oct 4 12:08:16 2013 +0200 # | Commit: Adam Janicki # | CommitDate: Fri Oct 4 12:08:16 2013 +0200 # | # | Add ignores # | # * commit ee96c2a2d397b79070d2b6fe3051290963748358 # | Author: Adam # | AuthorDate: Fri Oct 4 10:48:16 2013 +0100 # | Commit: Adam # | CommitDate: Fri Oct 4 10:48:16 2013 +0100 # | # | Reset author # | # * commit 8f89dda8e072383cf50d42532ae8f52ad89f8fdf # Author: Adam # AuthorDate: Fri Oct 4 02:20:19 2013 -0700 # Commit: Adam # CommitDate: Fri Oct 4 02:20:19 2013 -0700 # # Initial commit # raw dump of the above history in swh format _revisions_list = [ { "author": { "email": b"adam.janicki@roche.com", # noqa "fullname": b"Adam ", "id": 8040905, "name": b"Adam", }, "committer": { "email": b"adam.janicki@roche.com", "fullname": b"Adam ", "id": 8040905, "name": b"Adam", }, "committer_date": { "negative_utc": None, "offset": 120, "timestamp": {"microseconds": 0, "seconds": 1380883849}, }, "date": { "negative_utc": None, "offset": 120, "timestamp": {"microseconds": 0, "seconds": 1380883849}, }, "directory": b"\xefX\xe7\xa6\\\xda\xdf\xfdH\xdbH\xfbq\x96@{\x98?9\xfe", "id": b"\xb3d\xf51U\x04NS\x08\xa0\xf7:\xbb;_\x01\x99Z[}", "message": b"Merge branch 'release/1.0'", "metadata": None, "parents": [ b"\x83mI\x83\x96\xfb\x9b]E\xc8\x96\x88_\x84\xd8\xd6\nVQ\xdc", b"\xb9H\x86\xc5\x00\xc4n2\xdc=~\xba\xe8\xa5@\x9a\xcc\xd5\x92\xe5", ], # noqa "synthetic": False, "type": "git", }, { "author": { "email": b"adam.janicki@roche.com", "fullname": b"Adam ", "id": 8040905, "name": b"Adam", }, "committer": { "email": b"adam.janicki@roche.com", "fullname": b"Adam ", "id": 8040905, "name": b"Adam", }, "committer_date": { "negative_utc": None, "offset": 120, "timestamp": {"microseconds": 0, "seconds": 1380883838}, }, "date": { "negative_utc": None, "offset": 120, "timestamp": {"microseconds": 0, "seconds": 1380883838}, }, "directory": b"\xefX\xe7\xa6\\\xda\xdf\xfdH\xdbH\xfbq\x96@{\x98?9\xfe", "id": b"\xb9H\x86\xc5\x00\xc4n2\xdc=~\xba\xe8\xa5@\x9a\xcc\xd5\x92\xe5", "message": b"updating poms for 1.0 release", "metadata": None, "parents": [ b"\x0c\xb6\xb4a\x1de\xbe\xe0\xf5x!\xda\xc7\xf6\x11\xe2\xf8\xa0$3" ], # noqa "synthetic": False, "type": "git", }, { "author": { "email": b"adam.janicki@roche.com", "fullname": b"Adam ", "id": 8040905, "name": b"Adam", }, "committer": { "email": b"adam.janicki@roche.com", "fullname": b"Adam ", "id": 8040905, "name": b"Adam", }, "committer_date": { "negative_utc": None, "offset": 120, "timestamp": {"microseconds": 0, "seconds": 1380883838}, }, "date": { "negative_utc": None, "offset": 120, "timestamp": {"microseconds": 0, "seconds": 1380883838}, }, "directory": b"\xefX\xe7\xa6\\\xda\xdf\xfdH\xdbH\xfbq\x96@{\x98?9\xfe", "id": b"\x0c\xb6\xb4a\x1de\xbe\xe0\xf5x!\xda\xc7\xf6\x11\xe2\xf8\xa0$3", "message": b"updating poms for 1.0 release", "metadata": None, "parents": [b"+\x02@\xc6\xd6\x82\xba\xd5\x152\xee\xc1[\x8a~\xd6\xb7\\\x8d1"], "synthetic": False, "type": "git", }, { "author": { "email": b"janickia", "fullname": b"Adam Janicki ", "id": 8040906, "name": b"Adam Janicki", }, "committer": { "email": b"janickia", "fullname": b"Adam Janicki ", "id": 8040906, "name": b"Adam Janicki", }, "committer_date": { "negative_utc": None, "offset": 120, "timestamp": {"microseconds": 0, "seconds": 1380883822}, }, "date": { "negative_utc": None, "offset": 120, "timestamp": {"microseconds": 0, "seconds": 1380883822}, }, "directory": b"\xefX\xe7\xa6\\\xda\xdf\xfdH\xdbH\xfbq\x96@{\x98?9\xfe", "id": b"+\x02@\xc6\xd6\x82\xba\xd5\x152\xee\xc1[\x8a~\xd6\xb7\\\x8d1", "message": b"For 1.0 release. Allow untracked.\n", "metadata": None, "parents": [b"\xb4\x01\xc5\x08cG]\xb4D\x0c\x85\xc1\n\xc0\xb6B;aUM"], "synthetic": False, "type": "git", }, { "author": { "email": b"adam.janicki@roche.com", "fullname": b"Adam ", "id": 8040905, "name": b"Adam", }, "committer": { "email": b"adam.janicki@roche.com", "fullname": b"Adam ", "id": 8040905, "name": b"Adam", }, "committer_date": { "negative_utc": None, "offset": 120, "timestamp": {"microseconds": 0, "seconds": 1380883692}, }, "date": { "negative_utc": None, "offset": 120, "timestamp": {"microseconds": 0, "seconds": 1380883692}, }, "directory": b"d@\xe7\x143w\xcb\xf7\xad\xae\x91\xd5\xec\xd8\x95\x82" b"\x02\xa6=\x1b", "id": b"\xb4\x01\xc5\x08cG]\xb4D\x0c\x85\xc1\n\xc0\xb6B;aUM", "message": b"updating poms for 1.0 release", "metadata": None, "parents": [b"\x9cPQ9~\\.\x0c%\x8b\xb69\xc3\xdd4@e\x84\xca\x10"], "synthetic": False, "type": "git", }, { "author": { "email": b"janickia", "fullname": b"Adam Janicki ", "id": 8040906, "name": b"Adam Janicki", }, "committer": { "email": b"janickia", "fullname": b"Adam Janicki ", "id": 8040906, "name": b"Adam Janicki", }, "committer_date": { "negative_utc": None, "offset": 120, "timestamp": {"microseconds": 0, "seconds": 1380883668}, }, "date": { "negative_utc": None, "offset": 120, "timestamp": {"microseconds": 0, "seconds": 1380883668}, }, "directory": b"\n\x857\x94r\xbe\xcc\x04=\xe9}\xe5\xfd\xdf?nR\xe6\xa7\x9e", "id": b"\x9cPQ9~\\.\x0c%\x8b\xb69\xc3\xdd4@e\x84\xca\x10", "message": b"For 1.0 release.\n", "metadata": None, "parents": [b"\x83mI\x83\x96\xfb\x9b]E\xc8\x96\x88_\x84\xd8\xd6\nVQ\xdc"], "synthetic": False, "type": "git", }, { "author": { "email": b"janickia", "fullname": b"Adam Janicki ", "id": 8040906, "name": b"Adam Janicki", }, "committer": { "email": b"janickia", "fullname": b"Adam Janicki ", "id": 8040906, "name": b"Adam Janicki", }, "committer_date": { "negative_utc": None, "offset": 120, "timestamp": {"microseconds": 0, "seconds": 1380881296}, }, "date": { "negative_utc": None, "offset": 120, "timestamp": {"microseconds": 0, "seconds": 1380881296}, }, "directory": b".\xf9\xa5\xcb\xb0\xd3\xdc\x9b{\xb8\x81\x03l\xe2P\x16c\x0b|\xe6", # noqa "id": b"\x83mI\x83\x96\xfb\x9b]E\xc8\x96\x88_\x84\xd8\xd6\nVQ\xdc", "message": b"Add ignores\n", "metadata": None, "parents": [b"\xee\x96\xc2\xa2\xd3\x97\xb7\x90p\xd2\xb6\xfe0Q)\tct\x83X"], "synthetic": False, "type": "git", }, { "author": { "email": b"adam.janicki@roche.com", "fullname": b"Adam ", "id": 8040905, "name": b"Adam", }, "committer": { "email": b"adam.janicki@roche.com", "fullname": b"Adam ", "id": 8040905, "name": b"Adam", }, "committer_date": { "negative_utc": None, "offset": 60, "timestamp": {"microseconds": 0, "seconds": 1380880096}, }, "date": { "negative_utc": None, "offset": 60, "timestamp": {"microseconds": 0, "seconds": 1380880096}, }, "directory": b"\xc7r\xc4\x9f\xc0$\xd4\xab\xff\xcb]\xf6<\xcb\x8b~\xec\xc4\xd1)", # noqa "id": b"\xee\x96\xc2\xa2\xd3\x97\xb7\x90p\xd2\xb6\xfe0Q)\tct\x83X", "message": b"Reset author\n", "metadata": None, "parents": [b"\x8f\x89\xdd\xa8\xe0r8<\xf5\rBS*\xe8\xf5*\xd8\x9f\x8f\xdf"], "synthetic": False, "type": "git", }, { "author": { "email": b"adam.janicki@roche.com", "fullname": b"Adam ", "id": 8040905, "name": b"Adam", }, "committer": { "email": b"adam.janicki@roche.com", "fullname": b"Adam ", "id": 8040905, "name": b"Adam", }, "committer_date": { "negative_utc": None, "offset": -420, "timestamp": {"microseconds": 0, "seconds": 1380878419}, }, "date": { "negative_utc": None, "offset": -420, "timestamp": {"microseconds": 0, "seconds": 1380878419}, }, "directory": b"WS\xbaX\xd6x{q\x8f\x020i\xc5\x95\xa01\xf7y\xb2\x80", "id": b"\x8f\x89\xdd\xa8\xe0r8<\xf5\rBS*\xe8\xf5*\xd8\x9f\x8f\xdf", "message": b"Initial commit\n", "metadata": None, "parents": [], "synthetic": False, "type": "git", }, ] _rev_start = "b364f53155044e5308a0f73abb3b5f01995a5b7d" _rev_missing = "836d498396fb9b5d45c896885f84d8d60a5651dc" class RevisionsWalkerTest(unittest.TestCase): def check_revisions_ordering( self, rev_walker_type, expected_result, truncated_history ): - with patch("swh.storage.storage.Storage") as MockStorage: + with patch("swh.storage.postgresql.storage.Storage") as MockStorage: storage = MockStorage() if not truncated_history: storage.revision_log.return_value = _revisions_list else: revs_lists_truncated = [ None if hash_to_hex(rev["id"]) == _rev_missing else rev for rev in _revisions_list ] storage.revision_log.return_value = revs_lists_truncated revs_walker = get_revisions_walker( rev_walker_type, storage, hash_to_bytes(_rev_start) ) self.assertEqual( list(map(hash_to_bytes, expected_result)), [rev["id"] for rev in revs_walker], ) self.assertEqual(revs_walker.is_history_truncated(), truncated_history) if truncated_history: missing_revs = revs_walker.missing_revisions() self.assertEqual(missing_revs, {hash_to_bytes(_rev_missing)}) else: self.assertEqual(revs_walker.missing_revisions(), set()) def test_revisions_walker_committer_date(self): # revisions should be returned in reverse chronological order # of their committer date expected_result = [ "b364f53155044e5308a0f73abb3b5f01995a5b7d", "b94886c500c46e32dc3d7ebae8a5409accd592e5", "0cb6b4611d65bee0f57821dac7f611e2f8a02433", "2b0240c6d682bad51532eec15b8a7ed6b75c8d31", "b401c50863475db4440c85c10ac0b6423b61554d", "9c5051397e5c2e0c258bb639c3dd34406584ca10", "836d498396fb9b5d45c896885f84d8d60a5651dc", "ee96c2a2d397b79070d2b6fe3051290963748358", "8f89dda8e072383cf50d42532ae8f52ad89f8fdf", ] self.check_revisions_ordering( "committer_date", expected_result, truncated_history=False ) def test_revisions_walker_dfs(self): # revisions should be returned in the same order they are # visited when performing a depth-first search in pre order # on the revisions DAG expected_result = [ "b364f53155044e5308a0f73abb3b5f01995a5b7d", "836d498396fb9b5d45c896885f84d8d60a5651dc", "ee96c2a2d397b79070d2b6fe3051290963748358", "8f89dda8e072383cf50d42532ae8f52ad89f8fdf", "b94886c500c46e32dc3d7ebae8a5409accd592e5", "0cb6b4611d65bee0f57821dac7f611e2f8a02433", "2b0240c6d682bad51532eec15b8a7ed6b75c8d31", "b401c50863475db4440c85c10ac0b6423b61554d", "9c5051397e5c2e0c258bb639c3dd34406584ca10", ] self.check_revisions_ordering("dfs", expected_result, truncated_history=False) def test_revisions_walker_dfs_post(self): # revisions should be returned in the same order they are # visited when performing a depth-first search in post order # on the revisions DAG expected_result = [ "b364f53155044e5308a0f73abb3b5f01995a5b7d", "b94886c500c46e32dc3d7ebae8a5409accd592e5", "0cb6b4611d65bee0f57821dac7f611e2f8a02433", "2b0240c6d682bad51532eec15b8a7ed6b75c8d31", "b401c50863475db4440c85c10ac0b6423b61554d", "9c5051397e5c2e0c258bb639c3dd34406584ca10", "836d498396fb9b5d45c896885f84d8d60a5651dc", "ee96c2a2d397b79070d2b6fe3051290963748358", "8f89dda8e072383cf50d42532ae8f52ad89f8fdf", ] self.check_revisions_ordering( "dfs_post", expected_result, truncated_history=False ) def test_revisions_walker_bfs(self): # revisions should be returned in the same order they are # visited when performing a breadth-first search on the # revisions DAG expected_result = [ "b364f53155044e5308a0f73abb3b5f01995a5b7d", "836d498396fb9b5d45c896885f84d8d60a5651dc", "b94886c500c46e32dc3d7ebae8a5409accd592e5", "ee96c2a2d397b79070d2b6fe3051290963748358", "0cb6b4611d65bee0f57821dac7f611e2f8a02433", "8f89dda8e072383cf50d42532ae8f52ad89f8fdf", "2b0240c6d682bad51532eec15b8a7ed6b75c8d31", "b401c50863475db4440c85c10ac0b6423b61554d", "9c5051397e5c2e0c258bb639c3dd34406584ca10", ] self.check_revisions_ordering("bfs", expected_result, truncated_history=False) def test_revisions_walker_truncated_history(self): expected_result = [ "b364f53155044e5308a0f73abb3b5f01995a5b7d", "b94886c500c46e32dc3d7ebae8a5409accd592e5", "0cb6b4611d65bee0f57821dac7f611e2f8a02433", "2b0240c6d682bad51532eec15b8a7ed6b75c8d31", "b401c50863475db4440c85c10ac0b6423b61554d", "9c5051397e5c2e0c258bb639c3dd34406584ca10", ] for revs_walker_type in ("committer_date", "bfs", "dfs", "dfs_post"): self.check_revisions_ordering( revs_walker_type, expected_result, truncated_history=True ) diff --git a/swh/storage/tests/test_storage.py b/swh/storage/tests/storage_tests.py similarity index 99% rename from swh/storage/tests/test_storage.py rename to swh/storage/tests/storage_tests.py index 006ad10a..0109e9d6 100644 --- a/swh/storage/tests/test_storage.py +++ b/swh/storage/tests/storage_tests.py @@ -1,4123 +1,4123 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime import inspect import itertools import math import queue import random import threading from collections import defaultdict from contextlib import contextmanager from datetime import timedelta from unittest.mock import Mock import attr import pytest from hypothesis import given, strategies, settings, HealthCheck from typing import Any, ClassVar, Dict, Iterator, Optional from swh.model import from_disk from swh.model.hashutil import hash_to_bytes from swh.model.identifiers import SWHID from swh.model.model import ( Content, Directory, MetadataTargetType, Origin, OriginVisit, OriginVisitStatus, Person, Release, Revision, Snapshot, TargetType, ) from swh.model.hypothesis_strategies import objects from swh.storage import get_storage -from swh.storage.converters import origin_url_to_sha1 as sha1 +from swh.storage.common import origin_url_to_sha1 as sha1 from swh.storage.exc import HashCollision, StorageArgumentException from swh.storage.interface import ListOrder, PagedResult, StorageInterface from swh.storage.utils import content_hex_hashes, now @contextmanager def db_transaction(storage): with storage.db() as db: with db.transaction() as cur: yield db, cur def transform_entries( storage: StorageInterface, dir_: Directory, *, prefix: bytes = b"" ) -> Iterator[Dict[str, Any]]: """Iterate through a directory's entries, and yields the items 'directory_ls' is expected to return; including content metadata for file entries.""" for ent in dir_.entries: if ent.type == "dir": yield { "dir_id": dir_.id, "type": ent.type, "target": ent.target, "name": prefix + ent.name, "perms": ent.perms, "status": None, "sha1": None, "sha1_git": None, "sha256": None, "length": None, } elif ent.type == "file": contents = storage.content_find({"sha1_git": ent.target}) assert contents ent_dict = contents[0].to_dict() for key in ["ctime", "blake2s256"]: ent_dict.pop(key, None) ent_dict.update( { "dir_id": dir_.id, "type": ent.type, "target": ent.target, "name": prefix + ent.name, "perms": ent.perms, } ) yield ent_dict def assert_contents_ok( expected_contents, actual_contents, keys_to_check={"sha1", "data"} ): """Assert that a given list of contents matches on a given set of keys. """ for k in keys_to_check: expected_list = set([c.get(k) for c in expected_contents]) actual_list = set([c.get(k) for c in actual_contents]) assert actual_list == expected_list, k def round_to_milliseconds(date): """Round datetime to milliseconds before insertion, so equality doesn't fail after a round-trip through a DB (eg. Cassandra) """ return date.replace(microsecond=(date.microsecond // 1000) * 1000) def test_round_to_milliseconds(): date = now() for (ms, expected_ms) in [(0, 0), (1000, 1000), (555555, 555000), (999500, 999000)]: date = date.replace(microsecond=ms) actual_date = round_to_milliseconds(date) assert actual_date.microsecond == expected_ms class LazyContent(Content): def with_data(self): return Content.from_dict({**self.to_dict(), "data": b"42\n"}) class TestStorage: """Main class for Storage testing. This class is used as-is to test local storage (see TestLocalStorage below) and remote storage (see TestRemoteStorage in test_remote_storage.py. We need to have the two classes inherit from this base class separately to avoid nosetests running the tests from the base class twice. """ maxDiff = None # type: ClassVar[Optional[int]] def test_types(self, swh_storage_backend_config): """Checks all methods of StorageInterface are implemented by this backend, and that they have the same signature.""" # Create an instance of the protocol (which cannot be instantiated # directly, so this creates a subclass, then instantiates it) interface = type("_", (StorageInterface,), {})() storage = get_storage(**swh_storage_backend_config) assert "content_add" in dir(interface) missing_methods = [] for meth_name in dir(interface): if meth_name.startswith("_"): continue interface_meth = getattr(interface, meth_name) try: concrete_meth = getattr(storage, meth_name) except AttributeError: if not getattr(interface_meth, "deprecated_endpoint", False): # The backend is missing a (non-deprecated) endpoint missing_methods.append(meth_name) continue expected_signature = inspect.signature(interface_meth) actual_signature = inspect.signature(concrete_meth) assert expected_signature == actual_signature, meth_name assert missing_methods == [] def test_check_config(self, swh_storage): assert swh_storage.check_config(check_write=True) assert swh_storage.check_config(check_write=False) def test_content_add(self, swh_storage, sample_data): cont = sample_data.content insertion_start_time = now() actual_result = swh_storage.content_add([cont]) insertion_end_time = now() assert actual_result == { "content:add": 1, "content:add:bytes": cont.length, } assert swh_storage.content_get_data(cont.sha1) == cont.data expected_cont = attr.evolve(cont, data=None) contents = [ obj for (obj_type, obj) in swh_storage.journal_writer.journal.objects if obj_type == "content" ] assert len(contents) == 1 for obj in contents: assert insertion_start_time <= obj.ctime assert obj.ctime <= insertion_end_time assert obj == expected_cont swh_storage.refresh_stat_counters() assert swh_storage.stat_counters()["content"] == 1 def test_content_add_from_lazy_content(self, swh_storage, sample_data): cont = sample_data.content lazy_content = LazyContent.from_dict(cont.to_dict()) insertion_start_time = now() actual_result = swh_storage.content_add([lazy_content]) insertion_end_time = now() assert actual_result == { "content:add": 1, "content:add:bytes": cont.length, } # the fact that we retrieve the content object from the storage with # the correct 'data' field ensures it has been 'called' assert swh_storage.content_get_data(cont.sha1) == cont.data expected_cont = attr.evolve(lazy_content, data=None, ctime=None) contents = [ obj for (obj_type, obj) in swh_storage.journal_writer.journal.objects if obj_type == "content" ] assert len(contents) == 1 for obj in contents: assert insertion_start_time <= obj.ctime assert obj.ctime <= insertion_end_time assert attr.evolve(obj, ctime=None).to_dict() == expected_cont.to_dict() swh_storage.refresh_stat_counters() assert swh_storage.stat_counters()["content"] == 1 def test_content_get_data_missing(self, swh_storage, sample_data): cont, cont2 = sample_data.contents[:2] swh_storage.content_add([cont]) # Query a single missing content actual_content_data = swh_storage.content_get_data(cont2.sha1) assert actual_content_data is None # Check content_get does not abort after finding a missing content actual_content_data = swh_storage.content_get_data(cont.sha1) assert actual_content_data == cont.data actual_content_data = swh_storage.content_get_data(cont2.sha1) assert actual_content_data is None def test_content_add_different_input(self, swh_storage, sample_data): cont, cont2 = sample_data.contents[:2] actual_result = swh_storage.content_add([cont, cont2]) assert actual_result == { "content:add": 2, "content:add:bytes": cont.length + cont2.length, } def test_content_add_twice(self, swh_storage, sample_data): cont, cont2 = sample_data.contents[:2] actual_result = swh_storage.content_add([cont]) assert actual_result == { "content:add": 1, "content:add:bytes": cont.length, } assert len(swh_storage.journal_writer.journal.objects) == 1 actual_result = swh_storage.content_add([cont, cont2]) assert actual_result == { "content:add": 1, "content:add:bytes": cont2.length, } assert 2 <= len(swh_storage.journal_writer.journal.objects) <= 3 assert len(swh_storage.content_find(cont.to_dict())) == 1 assert len(swh_storage.content_find(cont2.to_dict())) == 1 def test_content_add_collision(self, swh_storage, sample_data): cont1 = sample_data.content # create (corrupted) content with same sha1{,_git} but != sha256 sha256_array = bytearray(cont1.sha256) sha256_array[0] += 1 cont1b = attr.evolve(cont1, sha256=bytes(sha256_array)) with pytest.raises(HashCollision) as cm: swh_storage.content_add([cont1, cont1b]) exc = cm.value actual_algo = exc.algo assert actual_algo in ["sha1", "sha1_git"] actual_id = exc.hash_id assert actual_id == getattr(cont1, actual_algo).hex() collisions = exc.args[2] assert len(collisions) == 2 assert collisions == [ content_hex_hashes(cont1.hashes()), content_hex_hashes(cont1b.hashes()), ] assert exc.colliding_content_hashes() == [ cont1.hashes(), cont1b.hashes(), ] def test_content_add_duplicate(self, swh_storage, sample_data): cont = sample_data.content swh_storage.content_add([cont, cont]) assert swh_storage.content_get_data(cont.sha1) == cont.data def test_content_update(self, swh_storage, sample_data): cont1 = sample_data.content if hasattr(swh_storage, "journal_writer"): swh_storage.journal_writer.journal = None # TODO, not supported swh_storage.content_add([cont1]) # alter the sha1_git for example cont1b = attr.evolve( cont1, sha1_git=hash_to_bytes("3a60a5275d0333bf13468e8b3dcab90f4046e654") ) swh_storage.content_update([cont1b.to_dict()], keys=["sha1_git"]) actual_contents = swh_storage.content_get([cont1.sha1]) expected_content = attr.evolve(cont1b, data=None) assert actual_contents == [expected_content] def test_content_add_metadata(self, swh_storage, sample_data): cont = attr.evolve(sample_data.content, data=None, ctime=now()) actual_result = swh_storage.content_add_metadata([cont]) assert actual_result == { "content:add": 1, } expected_cont = cont assert swh_storage.content_get([cont.sha1]) == [expected_cont] contents = [ obj for (obj_type, obj) in swh_storage.journal_writer.journal.objects if obj_type == "content" ] assert len(contents) == 1 for obj in contents: obj = attr.evolve(obj, ctime=None) assert obj == cont def test_content_add_metadata_different_input(self, swh_storage, sample_data): contents = sample_data.contents[:2] cont = attr.evolve(contents[0], data=None, ctime=now()) cont2 = attr.evolve(contents[1], data=None, ctime=now()) actual_result = swh_storage.content_add_metadata([cont, cont2]) assert actual_result == { "content:add": 2, } def test_content_add_metadata_collision(self, swh_storage, sample_data): cont1 = attr.evolve(sample_data.content, data=None, ctime=now()) # create (corrupted) content with same sha1{,_git} but != sha256 sha1_git_array = bytearray(cont1.sha256) sha1_git_array[0] += 1 cont1b = attr.evolve(cont1, sha256=bytes(sha1_git_array)) with pytest.raises(HashCollision) as cm: swh_storage.content_add_metadata([cont1, cont1b]) exc = cm.value actual_algo = exc.algo assert actual_algo in ["sha1", "sha1_git", "blake2s256"] actual_id = exc.hash_id assert actual_id == getattr(cont1, actual_algo).hex() collisions = exc.args[2] assert len(collisions) == 2 assert collisions == [ content_hex_hashes(cont1.hashes()), content_hex_hashes(cont1b.hashes()), ] assert exc.colliding_content_hashes() == [ cont1.hashes(), cont1b.hashes(), ] def test_skipped_content_add(self, swh_storage, sample_data): contents = sample_data.skipped_contents[:2] cont = contents[0] cont2 = attr.evolve(contents[1], blake2s256=None) contents_dict = [c.to_dict() for c in [cont, cont2]] missing = list(swh_storage.skipped_content_missing(contents_dict)) assert missing == [cont.hashes(), cont2.hashes()] actual_result = swh_storage.skipped_content_add([cont, cont, cont2]) assert 2 <= actual_result.pop("skipped_content:add") <= 3 assert actual_result == {} missing = list(swh_storage.skipped_content_missing(contents_dict)) assert missing == [] def test_skipped_content_add_missing_hashes(self, swh_storage, sample_data): cont, cont2 = [ attr.evolve(c, sha1_git=None) for c in sample_data.skipped_contents[:2] ] contents_dict = [c.to_dict() for c in [cont, cont2]] missing = list(swh_storage.skipped_content_missing(contents_dict)) assert len(missing) == 2 actual_result = swh_storage.skipped_content_add([cont, cont, cont2]) assert 2 <= actual_result.pop("skipped_content:add") <= 3 assert actual_result == {} missing = list(swh_storage.skipped_content_missing(contents_dict)) assert missing == [] def test_skipped_content_missing_partial_hash(self, swh_storage, sample_data): cont = sample_data.skipped_content cont2 = attr.evolve(cont, sha1_git=None) contents_dict = [c.to_dict() for c in [cont, cont2]] missing = list(swh_storage.skipped_content_missing(contents_dict)) assert len(missing) == 2 actual_result = swh_storage.skipped_content_add([cont]) assert actual_result.pop("skipped_content:add") == 1 assert actual_result == {} missing = list(swh_storage.skipped_content_missing(contents_dict)) assert missing == [cont2.hashes()] @pytest.mark.property_based @settings(deadline=None) # this test is very slow @given( strategies.sets( elements=strategies.sampled_from(["sha256", "sha1_git", "blake2s256"]), min_size=0, ) ) def test_content_missing(self, swh_storage, sample_data, algos): algos |= {"sha1"} content, missing_content = [sample_data.content2, sample_data.skipped_content] swh_storage.content_add([content]) test_contents = [content.to_dict()] missing_per_hash = defaultdict(list) for i in range(256): test_content = missing_content.to_dict() for hash in algos: test_content[hash] = bytes([i]) + test_content[hash][1:] missing_per_hash[hash].append(test_content[hash]) test_contents.append(test_content) assert set(swh_storage.content_missing(test_contents)) == set( missing_per_hash["sha1"] ) for hash in algos: assert set( swh_storage.content_missing(test_contents, key_hash=hash) ) == set(missing_per_hash[hash]) @pytest.mark.property_based @given( strategies.sets( elements=strategies.sampled_from(["sha256", "sha1_git", "blake2s256"]), min_size=0, ) ) def test_content_missing_unknown_algo(self, swh_storage, sample_data, algos): algos |= {"sha1"} content, missing_content = [sample_data.content2, sample_data.skipped_content] swh_storage.content_add([content]) test_contents = [content.to_dict()] missing_per_hash = defaultdict(list) for i in range(16): test_content = missing_content.to_dict() for hash in algos: test_content[hash] = bytes([i]) + test_content[hash][1:] missing_per_hash[hash].append(test_content[hash]) test_content["nonexisting_algo"] = b"\x00" test_contents.append(test_content) assert set(swh_storage.content_missing(test_contents)) == set( missing_per_hash["sha1"] ) for hash in algos: assert set( swh_storage.content_missing(test_contents, key_hash=hash) ) == set(missing_per_hash[hash]) def test_content_missing_per_sha1(self, swh_storage, sample_data): # given cont = sample_data.content cont2 = sample_data.content2 missing_cont = sample_data.skipped_content missing_cont2 = sample_data.skipped_content2 swh_storage.content_add([cont, cont2]) # when gen = swh_storage.content_missing_per_sha1( [cont.sha1, missing_cont.sha1, cont2.sha1, missing_cont2.sha1] ) # then assert list(gen) == [missing_cont.sha1, missing_cont2.sha1] def test_content_missing_per_sha1_git(self, swh_storage, sample_data): cont, cont2 = sample_data.contents[:2] missing_cont = sample_data.skipped_content swh_storage.content_add([cont, cont2]) contents = [cont.sha1_git, cont2.sha1_git, missing_cont.sha1_git] missing_contents = swh_storage.content_missing_per_sha1_git(contents) assert list(missing_contents) == [missing_cont.sha1_git] def test_content_get_partition(self, swh_storage, swh_contents): """content_get_partition paginates results if limit exceeded""" expected_contents = [ attr.evolve(c, data=None) for c in swh_contents if c.status != "absent" ] actual_contents = [] for i in range(16): actual_result = swh_storage.content_get_partition(i, 16) assert actual_result.next_page_token is None actual_contents.extend(actual_result.results) assert len(actual_contents) == len(expected_contents) for content in actual_contents: assert content in expected_contents def test_content_get_partition_full(self, swh_storage, swh_contents): """content_get_partition for a single partition returns all available contents """ expected_contents = [ attr.evolve(c, data=None) for c in swh_contents if c.status != "absent" ] actual_result = swh_storage.content_get_partition(0, 1) assert actual_result.next_page_token is None actual_contents = actual_result.results assert len(actual_contents) == len(expected_contents) for content in actual_contents: assert content in expected_contents def test_content_get_partition_empty(self, swh_storage, swh_contents): """content_get_partition when at least one of the partitions is empty""" expected_contents = { cont.sha1 for cont in swh_contents if cont.status != "absent" } # nb_partitions = smallest power of 2 such that at least one of # the partitions is empty nb_partitions = 1 << math.floor(math.log2(len(swh_contents)) + 1) seen_sha1s = [] for i in range(nb_partitions): actual_result = swh_storage.content_get_partition( i, nb_partitions, limit=len(swh_contents) + 1 ) for content in actual_result.results: seen_sha1s.append(content.sha1) # Limit is higher than the max number of results assert actual_result.next_page_token is None assert set(seen_sha1s) == expected_contents def test_content_get_partition_limit_none(self, swh_storage): """content_get_partition call with wrong limit input should fail""" with pytest.raises(StorageArgumentException, match="limit should not be None"): swh_storage.content_get_partition(1, 16, limit=None) def test_content_get_partition_pagination_generate(self, swh_storage, swh_contents): """content_get_partition returns contents within range provided""" expected_contents = [ attr.evolve(c, data=None) for c in swh_contents if c.status != "absent" ] # retrieve contents actual_contents = [] for i in range(4): page_token = None while True: actual_result = swh_storage.content_get_partition( i, 4, limit=3, page_token=page_token ) actual_contents.extend(actual_result.results) page_token = actual_result.next_page_token if page_token is None: break assert len(actual_contents) == len(expected_contents) for content in actual_contents: assert content in expected_contents def test_content_get(self, swh_storage, sample_data): cont1, cont2 = sample_data.contents[:2] swh_storage.content_add([cont1, cont2]) actual_contents = swh_storage.content_get([cont1.sha1, cont2.sha1]) # we only retrieve the metadata so no data nor ctime within expected_contents = [attr.evolve(c, data=None) for c in [cont1, cont2]] assert actual_contents == expected_contents def test_content_get_missing_sha1(self, swh_storage, sample_data): cont1, cont2 = sample_data.contents[:2] assert cont1.sha1 != cont2.sha1 missing_cont = sample_data.skipped_content swh_storage.content_add([cont1, cont2]) actual_contents = swh_storage.content_get( [cont1.sha1, cont2.sha1, missing_cont.sha1] ) expected_contents = [ attr.evolve(c, data=None) if c else None for c in [cont1, cont2, None] ] assert actual_contents == expected_contents def test_content_get_random(self, swh_storage, sample_data): cont, cont2, cont3 = sample_data.contents[:3] swh_storage.content_add([cont, cont2, cont3]) assert swh_storage.content_get_random() in { cont.sha1_git, cont2.sha1_git, cont3.sha1_git, } def test_directory_add(self, swh_storage, sample_data): content = sample_data.content directory = sample_data.directories[1] assert directory.entries[0].target == content.sha1_git swh_storage.content_add([content]) init_missing = list(swh_storage.directory_missing([directory.id])) assert [directory.id] == init_missing actual_result = swh_storage.directory_add([directory]) assert actual_result == {"directory:add": 1} assert ("directory", directory) in list( swh_storage.journal_writer.journal.objects ) actual_data = list(swh_storage.directory_ls(directory.id)) expected_data = list(transform_entries(swh_storage, directory)) for data in actual_data: assert data in expected_data after_missing = list(swh_storage.directory_missing([directory.id])) assert after_missing == [] swh_storage.refresh_stat_counters() assert swh_storage.stat_counters()["directory"] == 1 def test_directory_add_twice(self, swh_storage, sample_data): directory = sample_data.directories[1] actual_result = swh_storage.directory_add([directory]) assert actual_result == {"directory:add": 1} assert list(swh_storage.journal_writer.journal.objects) == [ ("directory", directory) ] actual_result = swh_storage.directory_add([directory]) assert actual_result == {"directory:add": 0} assert list(swh_storage.journal_writer.journal.objects) == [ ("directory", directory) ] def test_directory_ls_recursive(self, swh_storage, sample_data): # create consistent dataset regarding the directories we want to list content, content2 = sample_data.contents[:2] swh_storage.content_add([content, content2]) dir1, dir2, dir3 = sample_data.directories[:3] dir_ids = [d.id for d in [dir1, dir2, dir3]] init_missing = list(swh_storage.directory_missing(dir_ids)) assert init_missing == dir_ids actual_result = swh_storage.directory_add([dir1, dir2, dir3]) assert actual_result == {"directory:add": 3} # List directory containing one file actual_data = list(swh_storage.directory_ls(dir1.id, recursive=True)) expected_data = list(transform_entries(swh_storage, dir1)) for data in actual_data: assert data in expected_data # List directory containing a file and an unknown subdirectory actual_data = list(swh_storage.directory_ls(dir2.id, recursive=True)) expected_data = list(transform_entries(swh_storage, dir2)) for data in actual_data: assert data in expected_data # List directory containing both a known and unknown subdirectory, entries # should be both those of the directory and of the known subdir (up to contents) actual_data = list(swh_storage.directory_ls(dir3.id, recursive=True)) expected_data = list( itertools.chain( transform_entries(swh_storage, dir3), transform_entries(swh_storage, dir2, prefix=b"subdir/"), ) ) for data in actual_data: assert data in expected_data def test_directory_ls_non_recursive(self, swh_storage, sample_data): # create consistent dataset regarding the directories we want to list content, content2 = sample_data.contents[:2] swh_storage.content_add([content, content2]) dir1, dir2, dir3, _, dir5 = sample_data.directories[:5] dir_ids = [d.id for d in [dir1, dir2, dir3, dir5]] init_missing = list(swh_storage.directory_missing(dir_ids)) assert init_missing == dir_ids actual_result = swh_storage.directory_add([dir1, dir2, dir3, dir5]) assert actual_result == {"directory:add": 4} # List directory containing a file and an unknown subdirectory actual_data = list(swh_storage.directory_ls(dir1.id)) expected_data = list(transform_entries(swh_storage, dir1)) for data in actual_data: assert data in expected_data # List directory containing a single file actual_data = list(swh_storage.directory_ls(dir2.id)) expected_data = list(transform_entries(swh_storage, dir2)) for data in actual_data: assert data in expected_data # List directory containing a known subdirectory, entries should # only be those of the parent directory, not of the subdir actual_data = list(swh_storage.directory_ls(dir3.id)) expected_data = list(transform_entries(swh_storage, dir3)) for data in actual_data: assert data in expected_data def test_directory_entry_get_by_path(self, swh_storage, sample_data): cont, content2 = sample_data.contents[:2] dir1, dir2, dir3, dir4, dir5 = sample_data.directories[:5] # given dir_ids = [d.id for d in [dir1, dir2, dir3, dir4, dir5]] init_missing = list(swh_storage.directory_missing(dir_ids)) assert init_missing == dir_ids actual_result = swh_storage.directory_add([dir3, dir4]) assert actual_result == {"directory:add": 2} expected_entries = [ { "dir_id": dir3.id, "name": b"foo", "type": "file", "target": cont.sha1_git, "sha1": None, "sha1_git": None, "sha256": None, "status": None, "perms": from_disk.DentryPerms.content, "length": None, }, { "dir_id": dir3.id, "name": b"subdir", "type": "dir", "target": dir2.id, "sha1": None, "sha1_git": None, "sha256": None, "status": None, "perms": from_disk.DentryPerms.directory, "length": None, }, { "dir_id": dir3.id, "name": b"hello", "type": "file", "target": content2.sha1_git, "sha1": None, "sha1_git": None, "sha256": None, "status": None, "perms": from_disk.DentryPerms.content, "length": None, }, ] # when (all must be found here) for entry, expected_entry in zip(dir3.entries, expected_entries): actual_entry = swh_storage.directory_entry_get_by_path( dir3.id, [entry.name] ) assert actual_entry == expected_entry # same, but deeper for entry, expected_entry in zip(dir3.entries, expected_entries): actual_entry = swh_storage.directory_entry_get_by_path( dir4.id, [b"subdir1", entry.name] ) expected_entry = expected_entry.copy() expected_entry["name"] = b"subdir1/" + expected_entry["name"] assert actual_entry == expected_entry # when (nothing should be found here since `dir` is not persisted.) for entry in dir2.entries: actual_entry = swh_storage.directory_entry_get_by_path( dir2.id, [entry.name] ) assert actual_entry is None def test_directory_get_random(self, swh_storage, sample_data): dir1, dir2, dir3 = sample_data.directories[:3] swh_storage.directory_add([dir1, dir2, dir3]) assert swh_storage.directory_get_random() in { dir1.id, dir2.id, dir3.id, } def test_revision_add(self, swh_storage, sample_data): revision = sample_data.revision init_missing = swh_storage.revision_missing([revision.id]) assert list(init_missing) == [revision.id] actual_result = swh_storage.revision_add([revision]) assert actual_result == {"revision:add": 1} end_missing = swh_storage.revision_missing([revision.id]) assert list(end_missing) == [] assert list(swh_storage.journal_writer.journal.objects) == [ ("revision", revision) ] # already there so nothing added actual_result = swh_storage.revision_add([revision]) assert actual_result == {"revision:add": 0} swh_storage.refresh_stat_counters() assert swh_storage.stat_counters()["revision"] == 1 def test_revision_add_twice(self, swh_storage, sample_data): revision, revision2 = sample_data.revisions[:2] actual_result = swh_storage.revision_add([revision]) assert actual_result == {"revision:add": 1} assert list(swh_storage.journal_writer.journal.objects) == [ ("revision", revision) ] actual_result = swh_storage.revision_add([revision, revision2]) assert actual_result == {"revision:add": 1} assert list(swh_storage.journal_writer.journal.objects) == [ ("revision", revision), ("revision", revision2), ] def test_revision_add_name_clash(self, swh_storage, sample_data): revision, revision2 = sample_data.revisions[:2] revision1 = attr.evolve( revision, author=Person( fullname=b"John Doe ", name=b"John Doe", email=b"john.doe@example.com", ), ) revision2 = attr.evolve( revision2, author=Person( fullname=b"John Doe ", name=b"John Doe ", email=b"john.doe@example.com ", ), ) actual_result = swh_storage.revision_add([revision1, revision2]) assert actual_result == {"revision:add": 2} def test_revision_get_order(self, swh_storage, sample_data): revision, revision2 = sample_data.revisions[:2] add_result = swh_storage.revision_add([revision, revision2]) assert add_result == {"revision:add": 2} # order 1 res1 = swh_storage.revision_get([revision.id, revision2.id]) assert [Revision.from_dict(r) for r in res1] == [revision, revision2] # order 2 res2 = swh_storage.revision_get([revision2.id, revision.id]) assert [Revision.from_dict(r) for r in res2] == [revision2, revision] def test_revision_log(self, swh_storage, sample_data): revision1, revision2, revision3, revision4 = sample_data.revisions[:4] # rev4 -is-child-of-> rev3 -> rev1, (rev2 -> rev1) swh_storage.revision_add([revision1, revision2, revision3, revision4]) # when results = list(swh_storage.revision_log([revision4.id])) # for comparison purposes actual_results = [Revision.from_dict(r) for r in results] assert len(actual_results) == 4 # rev4 -child-> rev3 -> rev1, (rev2 -> rev1) assert actual_results == [revision4, revision3, revision1, revision2] def test_revision_log_with_limit(self, swh_storage, sample_data): revision1, revision2, revision3, revision4 = sample_data.revisions[:4] # revision4 -is-child-of-> revision3 swh_storage.revision_add([revision3, revision4]) results = list(swh_storage.revision_log([revision4.id], 1)) actual_results = [Revision.from_dict(r) for r in results] assert len(actual_results) == 1 assert actual_results[0] == revision4 def test_revision_log_unknown_revision(self, swh_storage, sample_data): revision = sample_data.revision rev_log = list(swh_storage.revision_log([revision.id])) assert rev_log == [] def test_revision_shortlog(self, swh_storage, sample_data): revision1, revision2, revision3, revision4 = sample_data.revisions[:4] # rev4 -is-child-of-> rev3 -> (rev1, rev2); rev2 -> rev1 swh_storage.revision_add([revision1, revision2, revision3, revision4]) results = list(swh_storage.revision_shortlog([revision4.id])) actual_results = [[id, tuple(parents)] for (id, parents) in results] assert len(actual_results) == 4 assert actual_results == [ [revision4.id, revision4.parents], [revision3.id, revision3.parents], [revision1.id, revision1.parents], [revision2.id, revision2.parents], ] def test_revision_shortlog_with_limit(self, swh_storage, sample_data): revision1, revision2, revision3, revision4 = sample_data.revisions[:4] # revision4 -is-child-of-> revision3 swh_storage.revision_add([revision1, revision2, revision3, revision4]) results = list(swh_storage.revision_shortlog([revision4.id], 1)) actual_results = [[id, tuple(parents)] for (id, parents) in results] assert len(actual_results) == 1 assert list(actual_results[0]) == [revision4.id, revision4.parents] def test_revision_get(self, swh_storage, sample_data): revision, revision2 = sample_data.revisions[:2] swh_storage.revision_add([revision]) actual_revisions = list(swh_storage.revision_get([revision.id, revision2.id])) assert len(actual_revisions) == 2 assert Revision.from_dict(actual_revisions[0]) == revision assert actual_revisions[1] is None def test_revision_get_no_parents(self, swh_storage, sample_data): revision = sample_data.revision swh_storage.revision_add([revision]) get = list(swh_storage.revision_get([revision.id])) assert len(get) == 1 assert revision.parents == () assert tuple(get[0]["parents"]) == () # no parents on this one def test_revision_get_random(self, swh_storage, sample_data): revision1, revision2, revision3 = sample_data.revisions[:3] swh_storage.revision_add([revision1, revision2, revision3]) assert swh_storage.revision_get_random() in { revision1.id, revision2.id, revision3.id, } def test_release_add(self, swh_storage, sample_data): release, release2 = sample_data.releases[:2] init_missing = swh_storage.release_missing([release.id, release2.id]) assert list(init_missing) == [release.id, release2.id] actual_result = swh_storage.release_add([release, release2]) assert actual_result == {"release:add": 2} end_missing = swh_storage.release_missing([release.id, release2.id]) assert list(end_missing) == [] assert list(swh_storage.journal_writer.journal.objects) == [ ("release", release), ("release", release2), ] # already present so nothing added actual_result = swh_storage.release_add([release, release2]) assert actual_result == {"release:add": 0} swh_storage.refresh_stat_counters() assert swh_storage.stat_counters()["release"] == 2 def test_release_add_no_author_date(self, swh_storage, sample_data): full_release = sample_data.release release = attr.evolve(full_release, author=None, date=None) actual_result = swh_storage.release_add([release]) assert actual_result == {"release:add": 1} end_missing = swh_storage.release_missing([release.id]) assert list(end_missing) == [] assert list(swh_storage.journal_writer.journal.objects) == [ ("release", release) ] def test_release_add_twice(self, swh_storage, sample_data): release, release2 = sample_data.releases[:2] actual_result = swh_storage.release_add([release]) assert actual_result == {"release:add": 1} assert list(swh_storage.journal_writer.journal.objects) == [ ("release", release) ] actual_result = swh_storage.release_add([release, release2, release, release2]) assert actual_result == {"release:add": 1} assert set(swh_storage.journal_writer.journal.objects) == set( [("release", release), ("release", release2),] ) def test_release_add_name_clash(self, swh_storage, sample_data): release, release2 = [ attr.evolve( c, author=Person( fullname=b"John Doe ", name=b"John Doe", email=b"john.doe@example.com", ), ) for c in sample_data.releases[:2] ] actual_result = swh_storage.release_add([release, release2]) assert actual_result == {"release:add": 2} def test_release_get(self, swh_storage, sample_data): release, release2, release3 = sample_data.releases[:3] # given swh_storage.release_add([release, release2]) # when releases = list(swh_storage.release_get([release.id, release2.id])) actual_releases = [Release.from_dict(r) for r in releases] # then assert actual_releases == [release, release2] unknown_releases = list(swh_storage.release_get([release3.id])) assert unknown_releases[0] is None def test_release_get_order(self, swh_storage, sample_data): release, release2 = sample_data.releases[:2] add_result = swh_storage.release_add([release, release2]) assert add_result == {"release:add": 2} # order 1 res1 = swh_storage.release_get([release.id, release2.id]) assert list(res1) == [release.to_dict(), release2.to_dict()] # order 2 res2 = swh_storage.release_get([release2.id, release.id]) assert list(res2) == [release2.to_dict(), release.to_dict()] def test_release_get_random(self, swh_storage, sample_data): release, release2, release3 = sample_data.releases[:3] swh_storage.release_add([release, release2, release3]) assert swh_storage.release_get_random() in { release.id, release2.id, release3.id, } def test_origin_add(self, swh_storage, sample_data): origins = list(sample_data.origins[:2]) origin_urls = [o.url for o in origins] assert swh_storage.origin_get(origin_urls) == [None, None] stats = swh_storage.origin_add(origins) assert stats == {"origin:add": 2} actual_origins = swh_storage.origin_get(origin_urls) assert actual_origins == origins assert set(swh_storage.journal_writer.journal.objects) == set( [("origin", origins[0]), ("origin", origins[1]),] ) swh_storage.refresh_stat_counters() assert swh_storage.stat_counters()["origin"] == 2 def test_origin_add_twice(self, swh_storage, sample_data): origin, origin2 = sample_data.origins[:2] add1 = swh_storage.origin_add([origin, origin2]) assert set(swh_storage.journal_writer.journal.objects) == set( [("origin", origin), ("origin", origin2),] ) assert add1 == {"origin:add": 2} add2 = swh_storage.origin_add([origin, origin2]) assert set(swh_storage.journal_writer.journal.objects) == set( [("origin", origin), ("origin", origin2),] ) assert add2 == {"origin:add": 0} def test_origin_get(self, swh_storage, sample_data): origin, origin2 = sample_data.origins[:2] assert swh_storage.origin_get([origin.url]) == [None] swh_storage.origin_add([origin]) actual_origins = swh_storage.origin_get([origin.url]) assert actual_origins == [origin] actual_origins = swh_storage.origin_get([origin.url, "not://exists"]) assert actual_origins == [origin, None] def _generate_random_visits(self, nb_visits=100, start=0, end=7): """Generate random visits within the last 2 months (to avoid computations) """ visits = [] today = now() for weeks in range(nb_visits, 0, -1): hours = random.randint(0, 24) minutes = random.randint(0, 60) seconds = random.randint(0, 60) days = random.randint(0, 28) weeks = random.randint(start, end) date_visit = today - timedelta( weeks=weeks, hours=hours, minutes=minutes, seconds=seconds, days=days ) visits.append(date_visit) return visits def test_origin_visit_get__unknown_origin(self, swh_storage): actual_page = swh_storage.origin_visit_get("foo") assert actual_page.next_page_token is None assert actual_page.results == [] assert actual_page == PagedResult() def test_origin_visit_get__validation_failure(self, swh_storage, sample_data): origin = sample_data.origin swh_storage.origin_add([origin]) with pytest.raises( StorageArgumentException, match="page_token must be a string" ): swh_storage.origin_visit_get(origin.url, page_token=10) # not bytes with pytest.raises( StorageArgumentException, match="order must be a ListOrder value" ): swh_storage.origin_visit_get(origin.url, order="foobar") # wrong order def test_origin_visit_get_all(self, swh_storage, sample_data): origin = sample_data.origin swh_storage.origin_add([origin]) ov1, ov2, ov3 = swh_storage.origin_visit_add( [ OriginVisit( origin=origin.url, date=sample_data.date_visit1, type=sample_data.type_visit1, ), OriginVisit( origin=origin.url, date=sample_data.date_visit2, type=sample_data.type_visit2, ), OriginVisit( origin=origin.url, date=sample_data.date_visit2, type=sample_data.type_visit2, ), ] ) # order asc, no token, no limit actual_page = swh_storage.origin_visit_get(origin.url) assert actual_page.next_page_token is None assert actual_page.results == [ov1, ov2, ov3] # order asc, no token, limit actual_page = swh_storage.origin_visit_get(origin.url, limit=2) next_page_token = actual_page.next_page_token assert next_page_token is not None assert actual_page.results == [ov1, ov2] # order asc, token, no limit actual_page = swh_storage.origin_visit_get( origin.url, page_token=next_page_token ) assert actual_page.next_page_token is None assert actual_page.results == [ov3] # order asc, no token, limit actual_page = swh_storage.origin_visit_get(origin.url, limit=1) next_page_token = actual_page.next_page_token assert next_page_token is not None assert actual_page.results == [ov1] # order asc, token, no limit actual_page = swh_storage.origin_visit_get( origin.url, page_token=next_page_token ) assert actual_page.next_page_token is None assert actual_page.results == [ov2, ov3] # order asc, token, limit actual_page = swh_storage.origin_visit_get( origin.url, page_token=next_page_token, limit=2 ) assert actual_page.next_page_token is None assert actual_page.results == [ov2, ov3] actual_page = swh_storage.origin_visit_get( origin.url, page_token=next_page_token, limit=1 ) next_page_token = actual_page.next_page_token assert next_page_token is not None assert actual_page.results == [ov2] actual_page = swh_storage.origin_visit_get( origin.url, page_token=next_page_token, limit=1 ) assert actual_page.next_page_token is None assert actual_page.results == [ov3] # order desc, no token, no limit actual_page = swh_storage.origin_visit_get(origin.url, order=ListOrder.DESC) assert actual_page.next_page_token is None assert actual_page.results == [ov3, ov2, ov1] # order desc, no token, limit actual_page = swh_storage.origin_visit_get( origin.url, limit=2, order=ListOrder.DESC ) next_page_token = actual_page.next_page_token assert next_page_token is not None assert actual_page.results == [ov3, ov2] # order desc, token, no limit actual_page = swh_storage.origin_visit_get( origin.url, page_token=next_page_token, order=ListOrder.DESC ) assert actual_page.next_page_token is None assert actual_page.results == [ov1] # order desc, no token, limit actual_page = swh_storage.origin_visit_get( origin.url, limit=1, order=ListOrder.DESC ) next_page_token = actual_page.next_page_token assert next_page_token is not None assert actual_page.results == [ov3] # order desc, token, no limit actual_page = swh_storage.origin_visit_get( origin.url, page_token=next_page_token, order=ListOrder.DESC ) assert actual_page.next_page_token is None assert actual_page.results == [ov2, ov1] # order desc, token, limit actual_page = swh_storage.origin_visit_get( origin.url, page_token=next_page_token, order=ListOrder.DESC, limit=1 ) next_page_token = actual_page.next_page_token assert next_page_token is not None assert actual_page.results == [ov2] actual_page = swh_storage.origin_visit_get( origin.url, page_token=next_page_token, order=ListOrder.DESC ) assert actual_page.next_page_token is None assert actual_page.results == [ov1] def test_origin_visit_status_get__unknown_cases(self, swh_storage, sample_data): origin = sample_data.origin actual_page = swh_storage.origin_visit_status_get("foobar", 1) assert actual_page.next_page_token is None assert actual_page.results == [] actual_page = swh_storage.origin_visit_status_get(origin.url, 1) assert actual_page.next_page_token is None assert actual_page.results == [] origin = sample_data.origin swh_storage.origin_add([origin]) ov1 = swh_storage.origin_visit_add( [ OriginVisit( origin=origin.url, date=sample_data.date_visit1, type=sample_data.type_visit1, ), ] )[0] actual_page = swh_storage.origin_visit_status_get(origin.url, ov1.visit + 10) assert actual_page.next_page_token is None assert actual_page.results == [] def test_origin_visit_status_get_all(self, swh_storage, sample_data): origin = sample_data.origin swh_storage.origin_add([origin]) date_visit3 = round_to_milliseconds(now()) date_visit1 = date_visit3 - datetime.timedelta(hours=2) date_visit2 = date_visit3 - datetime.timedelta(hours=1) assert date_visit1 < date_visit2 < date_visit3 ov1 = swh_storage.origin_visit_add( [ OriginVisit( origin=origin.url, date=date_visit1, type=sample_data.type_visit1, ), ] )[0] ovs1 = OriginVisitStatus( origin=origin.url, visit=ov1.visit, date=date_visit1, status="created", snapshot=None, ) ovs2 = OriginVisitStatus( origin=origin.url, visit=ov1.visit, date=date_visit2, status="partial", snapshot=None, ) ovs3 = OriginVisitStatus( origin=origin.url, visit=ov1.visit, date=date_visit3, status="full", snapshot=sample_data.snapshot.id, metadata={}, ) swh_storage.origin_visit_status_add([ovs2, ovs3]) # order asc, no token, no limit actual_page = swh_storage.origin_visit_status_get(origin.url, ov1.visit) assert actual_page.next_page_token is None assert actual_page.results == [ovs1, ovs2, ovs3] # order asc, no token, limit actual_page = swh_storage.origin_visit_status_get( origin.url, ov1.visit, limit=2 ) next_page_token = actual_page.next_page_token assert next_page_token is not None assert actual_page.results == [ovs1, ovs2] # order asc, token, no limit actual_page = swh_storage.origin_visit_status_get( origin.url, ov1.visit, page_token=next_page_token ) assert actual_page.next_page_token is None assert actual_page.results == [ovs3] # order asc, no token, limit actual_page = swh_storage.origin_visit_status_get( origin.url, ov1.visit, limit=1 ) next_page_token = actual_page.next_page_token assert next_page_token is not None assert actual_page.results == [ovs1] actual_page = swh_storage.origin_visit_status_get( origin.url, ov1.visit, page_token=next_page_token ) assert actual_page.next_page_token is None assert actual_page.results == [ovs2, ovs3] # order asc, token, limit actual_page = swh_storage.origin_visit_status_get( origin.url, ov1.visit, page_token=next_page_token, limit=2 ) assert actual_page.next_page_token is None assert actual_page.results == [ovs2, ovs3] # order asc, no token, limit actual_page = swh_storage.origin_visit_status_get( origin.url, ov1.visit, limit=2 ) next_page_token = actual_page.next_page_token assert next_page_token is not None assert actual_page.results == [ovs1, ovs2] actual_page = swh_storage.origin_visit_status_get( origin.url, ov1.visit, page_token=next_page_token, limit=1 ) assert actual_page.next_page_token is None assert actual_page.results == [ovs3] # order desc, no token, no limit actual_page = swh_storage.origin_visit_status_get( origin.url, ov1.visit, order=ListOrder.DESC ) assert actual_page.next_page_token is None assert actual_page.results == [ovs3, ovs2, ovs1] # order desc, no token, limit actual_page = swh_storage.origin_visit_status_get( origin.url, ov1.visit, limit=2, order=ListOrder.DESC ) next_page_token = actual_page.next_page_token assert next_page_token is not None assert actual_page.results == [ovs3, ovs2] actual_page = swh_storage.origin_visit_status_get( origin.url, ov1.visit, page_token=next_page_token, order=ListOrder.DESC ) assert actual_page.next_page_token is None assert actual_page.results == [ovs1] # order desc, no token, limit actual_page = swh_storage.origin_visit_status_get( origin.url, ov1.visit, order=ListOrder.DESC, limit=1 ) next_page_token = actual_page.next_page_token assert next_page_token is not None assert actual_page.results == [ovs3] # order desc, token, no limit actual_page = swh_storage.origin_visit_status_get( origin.url, ov1.visit, page_token=next_page_token, order=ListOrder.DESC ) assert actual_page.next_page_token is None assert actual_page.results == [ovs2, ovs1] # order desc, token, limit actual_page = swh_storage.origin_visit_status_get( origin.url, ov1.visit, page_token=next_page_token, order=ListOrder.DESC, limit=1, ) next_page_token = actual_page.next_page_token assert next_page_token is not None assert actual_page.results == [ovs2] actual_page = swh_storage.origin_visit_status_get( origin.url, ov1.visit, page_token=next_page_token, order=ListOrder.DESC ) assert actual_page.next_page_token is None assert actual_page.results == [ovs1] def test_origin_visit_status_get_random(self, swh_storage, sample_data): origins = sample_data.origins[:2] swh_storage.origin_add(origins) # Add some random visits within the selection range visits = self._generate_random_visits() visit_type = "git" # Add visits to those origins for origin in origins: for date_visit in visits: visit = swh_storage.origin_visit_add( [OriginVisit(origin=origin.url, date=date_visit, type=visit_type,)] )[0] swh_storage.origin_visit_status_add( [ OriginVisitStatus( origin=origin.url, visit=visit.visit, date=now(), status="full", snapshot=None, ) ] ) swh_storage.refresh_stat_counters() stats = swh_storage.stat_counters() assert stats["origin"] == len(origins) assert stats["origin_visit"] == len(origins) * len(visits) random_ov, random_ovs = swh_storage.origin_visit_status_get_random(visit_type) assert random_ov and random_ovs assert random_ov.origin is not None assert random_ov.origin == random_ovs.origin assert random_ov.origin in [o.url for o in origins] def test_origin_visit_status_get_random_nothing_found( self, swh_storage, sample_data ): origins = sample_data.origins swh_storage.origin_add(origins) visit_type = "hg" # Add some visits outside of the random generation selection so nothing # will be found by the random selection visits = self._generate_random_visits(nb_visits=3, start=13, end=24) for origin in origins: for date_visit in visits: visit = swh_storage.origin_visit_add( [OriginVisit(origin=origin.url, date=date_visit, type=visit_type,)] )[0] swh_storage.origin_visit_status_add( [ OriginVisitStatus( origin=origin.url, visit=visit.visit, date=now(), status="full", snapshot=None, ) ] ) random_origin_visit = swh_storage.origin_visit_status_get_random(visit_type) assert random_origin_visit is None def test_origin_get_by_sha1(self, swh_storage, sample_data): origin = sample_data.origin assert swh_storage.origin_get([origin.url])[0] is None swh_storage.origin_add([origin]) origins = list(swh_storage.origin_get_by_sha1([sha1(origin.url)])) assert len(origins) == 1 assert origins[0]["url"] == origin.url def test_origin_get_by_sha1_not_found(self, swh_storage, sample_data): unknown_origin = sample_data.origin assert swh_storage.origin_get([unknown_origin.url])[0] is None origins = list(swh_storage.origin_get_by_sha1([sha1(unknown_origin.url)])) assert len(origins) == 1 assert origins[0] is None def test_origin_search_single_result(self, swh_storage, sample_data): origin, origin2 = sample_data.origins[:2] actual_page = swh_storage.origin_search(origin.url) assert actual_page.next_page_token is None assert actual_page.results == [] actual_page = swh_storage.origin_search(origin.url, regexp=True) assert actual_page.next_page_token is None assert actual_page.results == [] swh_storage.origin_add([origin]) actual_page = swh_storage.origin_search(origin.url) assert actual_page.next_page_token is None assert actual_page.results == [origin] actual_page = swh_storage.origin_search(f".{origin.url[1:-1]}.", regexp=True) assert actual_page.next_page_token is None assert actual_page.results == [origin] swh_storage.origin_add([origin2]) actual_page = swh_storage.origin_search(origin2.url) assert actual_page.next_page_token is None assert actual_page.results == [origin2] actual_page = swh_storage.origin_search(f".{origin2.url[1:-1]}.", regexp=True) assert actual_page.next_page_token is None assert actual_page.results == [origin2] def test_origin_search_no_regexp(self, swh_storage, sample_data): origin, origin2 = sample_data.origins[:2] swh_storage.origin_add([origin, origin2]) # no pagination actual_page = swh_storage.origin_search("/") assert actual_page.next_page_token is None assert actual_page.results == [origin, origin2] # offset=0 actual_page = swh_storage.origin_search("/", page_token=None, limit=1) next_page_token = actual_page.next_page_token assert next_page_token is not None assert actual_page.results == [origin] # offset=1 actual_page = swh_storage.origin_search( "/", page_token=next_page_token, limit=1 ) assert actual_page.next_page_token is None assert actual_page.results == [origin2] def test_origin_search_regexp_substring(self, swh_storage, sample_data): origin, origin2 = sample_data.origins[:2] swh_storage.origin_add([origin, origin2]) # no pagination actual_page = swh_storage.origin_search("/", regexp=True) assert actual_page.next_page_token is None assert actual_page.results == [origin, origin2] # offset=0 actual_page = swh_storage.origin_search( "/", page_token=None, limit=1, regexp=True ) next_page_token = actual_page.next_page_token assert next_page_token is not None assert actual_page.results == [origin] # offset=1 actual_page = swh_storage.origin_search( "/", page_token=next_page_token, limit=1, regexp=True ) assert actual_page.next_page_token is None assert actual_page.results == [origin2] def test_origin_search_regexp_fullstring(self, swh_storage, sample_data): origin, origin2 = sample_data.origins[:2] swh_storage.origin_add([origin, origin2]) # no pagination actual_page = swh_storage.origin_search(".*/.*", regexp=True) assert actual_page.next_page_token is None assert actual_page.results == [origin, origin2] # offset=0 actual_page = swh_storage.origin_search( ".*/.*", page_token=None, limit=1, regexp=True ) next_page_token = actual_page.next_page_token assert next_page_token is not None assert actual_page.results == [origin] # offset=1 actual_page = swh_storage.origin_search( ".*/.*", page_token=next_page_token, limit=1, regexp=True ) assert actual_page.next_page_token is None assert actual_page.results == [origin2] def test_origin_visit_add(self, swh_storage, sample_data): origin1 = sample_data.origins[1] swh_storage.origin_add([origin1]) date_visit = now() date_visit2 = date_visit + datetime.timedelta(minutes=1) date_visit = round_to_milliseconds(date_visit) date_visit2 = round_to_milliseconds(date_visit2) visit1 = OriginVisit( origin=origin1.url, date=date_visit, type=sample_data.type_visit1, ) visit2 = OriginVisit( origin=origin1.url, date=date_visit2, type=sample_data.type_visit2, ) # add once ov1, ov2 = swh_storage.origin_visit_add([visit1, visit2]) # then again (will be ignored as they already exist) origin_visit1, origin_visit2 = swh_storage.origin_visit_add([ov1, ov2]) assert ov1 == origin_visit1 assert ov2 == origin_visit2 ovs1 = OriginVisitStatus( origin=origin1.url, visit=ov1.visit, date=date_visit, status="created", snapshot=None, ) ovs2 = OriginVisitStatus( origin=origin1.url, visit=ov2.visit, date=date_visit2, status="created", snapshot=None, ) actual_visits = swh_storage.origin_visit_get(origin1.url).results expected_visits = [ov1, ov2] assert len(expected_visits) == len(actual_visits) for visit in expected_visits: assert visit in actual_visits actual_objects = list(swh_storage.journal_writer.journal.objects) expected_objects = list( [("origin", origin1)] + [("origin_visit", visit) for visit in expected_visits] * 2 + [("origin_visit_status", ovs) for ovs in [ovs1, ovs2]] ) for obj in expected_objects: assert obj in actual_objects def test_origin_visit_add_validation(self, swh_storage, sample_data): """Unknown origin when adding visits should raise""" visit = attr.evolve(sample_data.origin_visit, origin="something-unknonw") with pytest.raises(StorageArgumentException, match="Unknown origin"): swh_storage.origin_visit_add([visit]) objects = list(swh_storage.journal_writer.journal.objects) assert not objects def test_origin_visit_status_add_validation(self, swh_storage): """Wrong origin_visit_status input should raise storage argument error""" date_visit = now() visit_status1 = OriginVisitStatus( origin="unknown-origin-url", visit=10, date=date_visit, status="full", snapshot=None, ) with pytest.raises(StorageArgumentException, match="Unknown origin"): swh_storage.origin_visit_status_add([visit_status1]) objects = list(swh_storage.journal_writer.journal.objects) assert not objects def test_origin_visit_status_add(self, swh_storage, sample_data): """Correct origin visit statuses should add a new visit status """ snapshot = sample_data.snapshot origin1 = sample_data.origins[1] origin2 = Origin(url="new-origin") swh_storage.origin_add([origin1, origin2]) ov1, ov2 = swh_storage.origin_visit_add( [ OriginVisit( origin=origin1.url, date=sample_data.date_visit1, type=sample_data.type_visit1, ), OriginVisit( origin=origin2.url, date=sample_data.date_visit2, type=sample_data.type_visit2, ), ] ) ovs1 = OriginVisitStatus( origin=origin1.url, visit=ov1.visit, date=sample_data.date_visit1, status="created", snapshot=None, ) ovs2 = OriginVisitStatus( origin=origin2.url, visit=ov2.visit, date=sample_data.date_visit2, status="created", snapshot=None, ) date_visit_now = round_to_milliseconds(now()) visit_status1 = OriginVisitStatus( origin=ov1.origin, visit=ov1.visit, date=date_visit_now, status="full", snapshot=snapshot.id, ) date_visit_now = round_to_milliseconds(now()) visit_status2 = OriginVisitStatus( origin=ov2.origin, visit=ov2.visit, date=date_visit_now, status="ongoing", snapshot=None, metadata={"intrinsic": "something"}, ) swh_storage.origin_visit_status_add([visit_status1, visit_status2]) visit = swh_storage.origin_visit_get_latest(origin1.url, require_snapshot=True) visit_status = swh_storage.origin_visit_status_get_latest( origin1.url, visit.visit, require_snapshot=True ) assert visit_status == visit_status1 visit = swh_storage.origin_visit_get_latest(origin2.url, require_snapshot=False) visit_status = swh_storage.origin_visit_status_get_latest( origin2.url, visit.visit, require_snapshot=False ) assert origin2.url != origin1.url assert visit_status == visit_status2 actual_objects = list(swh_storage.journal_writer.journal.objects) expected_origins = [origin1, origin2] expected_visits = [ov1, ov2] expected_visit_statuses = [ovs1, ovs2, visit_status1, visit_status2] expected_objects = ( [("origin", o) for o in expected_origins] + [("origin_visit", v) for v in expected_visits] + [("origin_visit_status", ovs) for ovs in expected_visit_statuses] ) for obj in expected_objects: assert obj in actual_objects def test_origin_visit_status_add_twice(self, swh_storage, sample_data): """Correct origin visit statuses should add a new visit status """ snapshot = sample_data.snapshot origin1 = sample_data.origins[1] swh_storage.origin_add([origin1]) ov1 = swh_storage.origin_visit_add( [ OriginVisit( origin=origin1.url, date=sample_data.date_visit1, type=sample_data.type_visit1, ), ] )[0] ovs1 = OriginVisitStatus( origin=origin1.url, visit=ov1.visit, date=sample_data.date_visit1, status="created", snapshot=None, ) date_visit_now = round_to_milliseconds(now()) visit_status1 = OriginVisitStatus( origin=ov1.origin, visit=ov1.visit, date=date_visit_now, status="full", snapshot=snapshot.id, ) swh_storage.origin_visit_status_add([visit_status1]) # second call will ignore existing entries (will send to storage though) swh_storage.origin_visit_status_add([visit_status1]) visit_status = swh_storage.origin_visit_status_get_latest(ov1.origin, ov1.visit) assert visit_status == visit_status1 actual_objects = list(swh_storage.journal_writer.journal.objects) expected_origins = [origin1] expected_visits = [ov1] expected_visit_statuses = [ovs1, visit_status1, visit_status1] # write twice in the journal expected_objects = ( [("origin", o) for o in expected_origins] + [("origin_visit", v) for v in expected_visits] + [("origin_visit_status", ovs) for ovs in expected_visit_statuses] ) for obj in expected_objects: assert obj in actual_objects def test_origin_visit_find_by_date(self, swh_storage, sample_data): origin = sample_data.origin swh_storage.origin_add([origin]) visit1 = OriginVisit( origin=origin.url, date=sample_data.date_visit2, type=sample_data.type_visit1, ) visit2 = OriginVisit( origin=origin.url, date=sample_data.date_visit3, type=sample_data.type_visit2, ) visit3 = OriginVisit( origin=origin.url, date=sample_data.date_visit2, type=sample_data.type_visit3, ) ov1, ov2, ov3 = swh_storage.origin_visit_add([visit1, visit2, visit3]) ovs1 = OriginVisitStatus( origin=origin.url, visit=ov1.visit, date=sample_data.date_visit2, status="ongoing", snapshot=None, ) ovs2 = OriginVisitStatus( origin=origin.url, visit=ov2.visit, date=sample_data.date_visit3, status="ongoing", snapshot=None, ) ovs3 = OriginVisitStatus( origin=origin.url, visit=ov3.visit, date=sample_data.date_visit2, status="ongoing", snapshot=None, ) swh_storage.origin_visit_status_add([ovs1, ovs2, ovs3]) # Simple case actual_visit = swh_storage.origin_visit_find_by_date( origin.url, sample_data.date_visit3 ) assert actual_visit == ov2 # There are two visits at the same date, the latest must be returned actual_visit = swh_storage.origin_visit_find_by_date( origin.url, sample_data.date_visit2 ) assert actual_visit == ov3 def test_origin_visit_find_by_date__unknown_origin(self, swh_storage, sample_data): actual_visit = swh_storage.origin_visit_find_by_date( "foo", sample_data.date_visit2 ) assert actual_visit is None def test_origin_visit_get_by(self, swh_storage, sample_data): snapshot = sample_data.snapshot origins = sample_data.origins[:2] swh_storage.origin_add(origins) origin_url, origin_url2 = [o.url for o in origins] visit = OriginVisit( origin=origin_url, date=sample_data.date_visit2, type=sample_data.type_visit2, ) origin_visit1 = swh_storage.origin_visit_add([visit])[0] swh_storage.snapshot_add([snapshot]) swh_storage.origin_visit_status_add( [ OriginVisitStatus( origin=origin_url, visit=origin_visit1.visit, date=now(), status="ongoing", snapshot=snapshot.id, ) ] ) # Add some other {origin, visit} entries visit2 = OriginVisit( origin=origin_url, date=sample_data.date_visit3, type=sample_data.type_visit3, ) visit3 = OriginVisit( origin=origin_url2, date=sample_data.date_visit3, type=sample_data.type_visit3, ) swh_storage.origin_visit_add([visit2, visit3]) # when visit1_metadata = { "contents": 42, "directories": 22, } swh_storage.origin_visit_status_add( [ OriginVisitStatus( origin=origin_url, visit=origin_visit1.visit, date=now(), status="full", snapshot=snapshot.id, metadata=visit1_metadata, ) ] ) actual_visit = swh_storage.origin_visit_get_by(origin_url, origin_visit1.visit) assert actual_visit == origin_visit1 def test_origin_visit_get_by__no_result(self, swh_storage, sample_data): actual_visit = swh_storage.origin_visit_get_by("unknown", 10) # unknown origin assert actual_visit is None origin = sample_data.origin swh_storage.origin_add([origin]) actual_visit = swh_storage.origin_visit_get_by(origin.url, 999) # unknown visit assert actual_visit is None def test_origin_visit_get_latest_edge_cases(self, swh_storage, sample_data): # unknown origin so no result assert swh_storage.origin_visit_get_latest("unknown-origin") is None # unknown type so no result origin = sample_data.origin swh_storage.origin_add([origin]) assert swh_storage.origin_visit_get_latest(origin.url, type="unknown") is None # unknown allowed statuses should raise with pytest.raises(StorageArgumentException, match="Unknown allowed statuses"): swh_storage.origin_visit_get_latest( origin.url, allowed_statuses=["unknown"] ) def test_origin_visit_get_latest_filter_type(self, swh_storage, sample_data): """Filtering origin visit get latest with filter type should be ok """ origin = sample_data.origin swh_storage.origin_add([origin]) visit1 = OriginVisit( origin=origin.url, date=sample_data.date_visit1, type="git", ) visit2 = OriginVisit( origin=origin.url, date=sample_data.date_visit2, type="hg", ) date_now = round_to_milliseconds(now()) visit3 = OriginVisit(origin=origin.url, date=date_now, type="hg",) assert sample_data.date_visit1 < sample_data.date_visit2 assert sample_data.date_visit2 < date_now ov1, ov2, ov3 = swh_storage.origin_visit_add([visit1, visit2, visit3]) # Check type filter is ok actual_visit = swh_storage.origin_visit_get_latest(origin.url, type="git") assert actual_visit == ov1 actual_visit = swh_storage.origin_visit_get_latest(origin.url, type="hg") assert actual_visit == ov3 actual_visit_unknown_type = swh_storage.origin_visit_get_latest( origin.url, type="npm", # no visit matching that type ) assert actual_visit_unknown_type is None def test_origin_visit_get_latest(self, swh_storage, sample_data): empty_snapshot, complete_snapshot = sample_data.snapshots[1:3] origin = sample_data.origin swh_storage.origin_add([origin]) visit1 = OriginVisit( origin=origin.url, date=sample_data.date_visit1, type="git", ) visit2 = OriginVisit( origin=origin.url, date=sample_data.date_visit2, type="hg", ) date_now = round_to_milliseconds(now()) visit3 = OriginVisit(origin=origin.url, date=date_now, type="hg",) assert visit1.date < visit2.date assert visit2.date < visit3.date ov1, ov2, ov3 = swh_storage.origin_visit_add([visit1, visit2, visit3]) # no filters, latest visit is the last one (whose date is most recent) actual_visit = swh_storage.origin_visit_get_latest(origin.url) assert actual_visit == ov3 # 3 visits, none has snapshot so nothing is returned actual_visit = swh_storage.origin_visit_get_latest( origin.url, require_snapshot=True ) assert actual_visit is None # visit are created with "created" status, so nothing will get returned actual_visit = swh_storage.origin_visit_get_latest( origin.url, allowed_statuses=["partial"] ) assert actual_visit is None # visit are created with "created" status, so most recent again actual_visit = swh_storage.origin_visit_get_latest( origin.url, allowed_statuses=["created"] ) assert actual_visit == ov3 # Add snapshot to visit1; require_snapshot=True makes it return first visit swh_storage.snapshot_add([complete_snapshot]) visit_status_with_snapshot = OriginVisitStatus( origin=origin.url, visit=ov1.visit, date=round_to_milliseconds(now()), status="ongoing", snapshot=complete_snapshot.id, ) swh_storage.origin_visit_status_add([visit_status_with_snapshot]) # only the first visit has a snapshot now actual_visit = swh_storage.origin_visit_get_latest( origin.url, require_snapshot=True ) assert actual_visit == ov1 # only the first visit has a status ongoing now actual_visit = swh_storage.origin_visit_get_latest( origin.url, allowed_statuses=["ongoing"] ) assert actual_visit == ov1 actual_visit_status = swh_storage.origin_visit_status_get_latest( origin.url, ov1.visit, require_snapshot=True ) assert actual_visit_status == visit_status_with_snapshot # ... and require_snapshot=False (defaults) still returns latest visit (3rd) actual_visit = swh_storage.origin_visit_get_latest( origin.url, require_snapshot=False ) assert actual_visit == ov3 # no specific filter, this returns as before the latest visit actual_visit = swh_storage.origin_visit_get_latest(origin.url) assert actual_visit == ov3 # Status filter: all three visits are status=ongoing, so no visit # returned actual_visit = swh_storage.origin_visit_get_latest( origin.url, allowed_statuses=["full"] ) assert actual_visit is None visit_status1_full = OriginVisitStatus( origin=origin.url, visit=ov1.visit, date=round_to_milliseconds(now()), status="full", snapshot=complete_snapshot.id, ) # Mark the first visit as completed and check status filter again swh_storage.origin_visit_status_add([visit_status1_full]) # only the first visit has the full status actual_visit = swh_storage.origin_visit_get_latest( origin.url, allowed_statuses=["full"] ) assert actual_visit == ov1 actual_visit_status = swh_storage.origin_visit_status_get_latest( origin.url, ov1.visit, allowed_statuses=["full"] ) assert actual_visit_status == visit_status1_full # no specific filter, this returns as before the latest visit actual_visit = swh_storage.origin_visit_get_latest(origin.url) assert actual_visit == ov3 # Add snapshot to visit2 and check that the new snapshot is returned swh_storage.snapshot_add([empty_snapshot]) visit_status2_full = OriginVisitStatus( origin=origin.url, visit=ov2.visit, date=round_to_milliseconds(now()), status="ongoing", snapshot=empty_snapshot.id, ) swh_storage.origin_visit_status_add([visit_status2_full]) actual_visit = swh_storage.origin_visit_get_latest( origin.url, require_snapshot=True ) # 2nd visit is most recent with a snapshot assert actual_visit == ov2 actual_visit_status = swh_storage.origin_visit_status_get_latest( origin.url, ov2.visit, require_snapshot=True ) assert actual_visit_status == visit_status2_full # no specific filter, this returns as before the latest visit, 3rd one actual_origin = swh_storage.origin_visit_get_latest(origin.url) assert actual_origin == ov3 # full status is still the first visit actual_visit = swh_storage.origin_visit_get_latest( origin.url, allowed_statuses=["full"] ) assert actual_visit == ov1 # Add snapshot to visit3 (same date as visit2) visit_status3_with_snapshot = OriginVisitStatus( origin=origin.url, visit=ov3.visit, date=round_to_milliseconds(now()), status="ongoing", snapshot=complete_snapshot.id, ) swh_storage.origin_visit_status_add([visit_status3_with_snapshot]) # full status is still the first visit actual_visit = swh_storage.origin_visit_get_latest( origin.url, allowed_statuses=["full"], require_snapshot=True, ) assert actual_visit == ov1 actual_visit_status = swh_storage.origin_visit_status_get_latest( origin.url, visit=actual_visit.visit, allowed_statuses=["full"], require_snapshot=True, ) assert actual_visit_status == visit_status1_full # most recent is still the 3rd visit actual_visit = swh_storage.origin_visit_get_latest(origin.url) assert actual_visit == ov3 # 3rd visit has a snapshot now, so it's elected actual_visit = swh_storage.origin_visit_get_latest( origin.url, require_snapshot=True ) assert actual_visit == ov3 actual_visit_status = swh_storage.origin_visit_status_get_latest( origin.url, ov3.visit, require_snapshot=True ) assert actual_visit_status == visit_status3_with_snapshot def test_origin_visit_get_latest__same_date(self, swh_storage, sample_data): empty_snapshot, complete_snapshot = sample_data.snapshots[1:3] origin = sample_data.origin swh_storage.origin_add([origin]) visit1 = OriginVisit( origin=origin.url, date=sample_data.date_visit1, type="git", ) visit2 = OriginVisit( origin=origin.url, date=sample_data.date_visit1, type="hg", ) ov1, ov2 = swh_storage.origin_visit_add([visit1, visit2]) # ties should be broken by using the visit id actual_visit = swh_storage.origin_visit_get_latest(origin.url) assert actual_visit == ov2 def test_origin_visit_get_latest__not_last(self, swh_storage, sample_data): origin = sample_data.origin swh_storage.origin_add([origin]) visit1, visit2 = sample_data.origin_visits[:2] assert visit1.origin == origin.url swh_storage.origin_visit_add([visit1]) ov1 = swh_storage.origin_visit_get_latest(origin.url) # Add snapshot to visit1, latest snapshot = visit 1 snapshot complete_snapshot = sample_data.snapshots[2] swh_storage.snapshot_add([complete_snapshot]) swh_storage.origin_visit_status_add( [ OriginVisitStatus( origin=origin.url, visit=ov1.visit, date=visit2.date, status="partial", snapshot=None, ) ] ) assert visit1.date < visit2.date # no snapshot associated to the visit, so None visit = swh_storage.origin_visit_get_latest( origin.url, allowed_statuses=["partial"], require_snapshot=True, ) assert visit is None date_now = now() assert visit2.date < date_now swh_storage.origin_visit_status_add( [ OriginVisitStatus( origin=origin.url, visit=ov1.visit, date=date_now, status="full", snapshot=complete_snapshot.id, ) ] ) swh_storage.origin_visit_add( [OriginVisit(origin=origin.url, date=now(), type=visit1.type,)] ) visit = swh_storage.origin_visit_get_latest(origin.url, require_snapshot=True) assert visit is not None def test_origin_visit_status_get_latest__validation(self, swh_storage, sample_data): origin = sample_data.origin swh_storage.origin_add([origin]) visit1 = OriginVisit( origin=origin.url, date=sample_data.date_visit1, type="git", ) # unknown allowed statuses should raise with pytest.raises(StorageArgumentException, match="Unknown allowed statuses"): swh_storage.origin_visit_status_get_latest( origin.url, visit1.visit, allowed_statuses=["unknown"] ) def test_origin_visit_status_get_latest(self, swh_storage, sample_data): snapshot = sample_data.snapshots[2] origin1 = sample_data.origin swh_storage.origin_add([origin1]) # to have some reference visits ov1, ov2 = swh_storage.origin_visit_add( [ OriginVisit( origin=origin1.url, date=sample_data.date_visit1, type=sample_data.type_visit1, ), OriginVisit( origin=origin1.url, date=sample_data.date_visit2, type=sample_data.type_visit2, ), ] ) swh_storage.snapshot_add([snapshot]) date_now = round_to_milliseconds(now()) assert sample_data.date_visit1 < sample_data.date_visit2 assert sample_data.date_visit2 < date_now ovs1 = OriginVisitStatus( origin=origin1.url, visit=ov1.visit, date=sample_data.date_visit1, status="partial", snapshot=None, ) ovs2 = OriginVisitStatus( origin=origin1.url, visit=ov1.visit, date=sample_data.date_visit2, status="ongoing", snapshot=None, ) ovs3 = OriginVisitStatus( origin=origin1.url, visit=ov2.visit, date=sample_data.date_visit2 + datetime.timedelta(minutes=1), # to not be ignored status="ongoing", snapshot=None, ) ovs4 = OriginVisitStatus( origin=origin1.url, visit=ov2.visit, date=date_now, status="full", snapshot=snapshot.id, metadata={"something": "wicked"}, ) swh_storage.origin_visit_status_add([ovs1, ovs2, ovs3, ovs4]) # unknown origin so no result actual_origin_visit = swh_storage.origin_visit_status_get_latest( "unknown-origin", ov1.visit ) assert actual_origin_visit is None # unknown visit so no result actual_origin_visit = swh_storage.origin_visit_status_get_latest( ov1.origin, ov1.visit + 10 ) assert actual_origin_visit is None # Two visits, both with no snapshot, take the most recent actual_origin_visit2 = swh_storage.origin_visit_status_get_latest( origin1.url, ov1.visit ) assert isinstance(actual_origin_visit2, OriginVisitStatus) assert actual_origin_visit2 == ovs2 assert ovs2.origin == origin1.url assert ovs2.visit == ov1.visit actual_origin_visit = swh_storage.origin_visit_status_get_latest( origin1.url, ov1.visit, require_snapshot=True ) # there is no visit with snapshot yet for that visit assert actual_origin_visit is None actual_origin_visit2 = swh_storage.origin_visit_status_get_latest( origin1.url, ov1.visit, allowed_statuses=["partial", "ongoing"] ) # visit status with partial status visit elected assert actual_origin_visit2 == ovs2 assert actual_origin_visit2.status == "ongoing" actual_origin_visit4 = swh_storage.origin_visit_status_get_latest( origin1.url, ov2.visit, require_snapshot=True ) assert actual_origin_visit4 == ovs4 assert actual_origin_visit4.snapshot == snapshot.id actual_origin_visit = swh_storage.origin_visit_status_get_latest( origin1.url, ov2.visit, require_snapshot=True, allowed_statuses=["ongoing"] ) # nothing matches so nothing assert actual_origin_visit is None # there is no visit with status full actual_origin_visit3 = swh_storage.origin_visit_status_get_latest( origin1.url, ov2.visit, allowed_statuses=["ongoing"] ) assert actual_origin_visit3 == ovs3 def test_person_fullname_unicity(self, swh_storage, sample_data): revision, rev2 = sample_data.revisions[0:2] # create a revision with same committer fullname but wo name and email revision2 = attr.evolve( rev2, committer=Person( fullname=revision.committer.fullname, name=None, email=None ), ) swh_storage.revision_add([revision, revision2]) # when getting added revisions revisions = list(swh_storage.revision_get([revision.id, revision2.id])) # then check committers are the same assert revisions[0]["committer"] == revisions[1]["committer"] def test_snapshot_add_get_empty(self, swh_storage, sample_data): empty_snapshot = sample_data.snapshots[1] empty_snapshot_dict = empty_snapshot.to_dict() origin = sample_data.origin swh_storage.origin_add([origin]) ov1 = swh_storage.origin_visit_add( [ OriginVisit( origin=origin.url, date=sample_data.date_visit1, type=sample_data.type_visit1, ) ] )[0] actual_result = swh_storage.snapshot_add([empty_snapshot]) assert actual_result == {"snapshot:add": 1} date_now = now() swh_storage.origin_visit_status_add( [ OriginVisitStatus( origin=origin.url, visit=ov1.visit, date=date_now, status="full", snapshot=empty_snapshot.id, ) ] ) by_id = swh_storage.snapshot_get(empty_snapshot.id) assert by_id == {**empty_snapshot_dict, "next_branch": None} ovs1 = OriginVisitStatus.from_dict( { "origin": origin.url, "date": sample_data.date_visit1, "visit": ov1.visit, "status": "created", "snapshot": None, "metadata": None, } ) ovs2 = OriginVisitStatus.from_dict( { "origin": origin.url, "date": date_now, "visit": ov1.visit, "status": "full", "metadata": None, "snapshot": empty_snapshot.id, } ) actual_objects = list(swh_storage.journal_writer.journal.objects) expected_objects = [ ("origin", origin), ("origin_visit", ov1), ("origin_visit_status", ovs1,), ("snapshot", empty_snapshot), ("origin_visit_status", ovs2,), ] for obj in expected_objects: assert obj in actual_objects def test_snapshot_add_get_complete(self, swh_storage, sample_data): complete_snapshot = sample_data.snapshots[2] complete_snapshot_dict = complete_snapshot.to_dict() origin = sample_data.origin swh_storage.origin_add([origin]) visit = OriginVisit( origin=origin.url, date=sample_data.date_visit1, type=sample_data.type_visit1, ) origin_visit1 = swh_storage.origin_visit_add([visit])[0] actual_result = swh_storage.snapshot_add([complete_snapshot]) swh_storage.origin_visit_status_add( [ OriginVisitStatus( origin=origin.url, visit=origin_visit1.visit, date=now(), status="ongoing", snapshot=complete_snapshot.id, ) ] ) assert actual_result == {"snapshot:add": 1} by_id = swh_storage.snapshot_get(complete_snapshot.id) assert by_id == {**complete_snapshot_dict, "next_branch": None} def test_snapshot_add_many(self, swh_storage, sample_data): snapshot, _, complete_snapshot = sample_data.snapshots[:3] actual_result = swh_storage.snapshot_add([snapshot, complete_snapshot]) assert actual_result == {"snapshot:add": 2} assert swh_storage.snapshot_get(complete_snapshot.id) == { **complete_snapshot.to_dict(), "next_branch": None, } assert swh_storage.snapshot_get(snapshot.id) == { **snapshot.to_dict(), "next_branch": None, } swh_storage.refresh_stat_counters() assert swh_storage.stat_counters()["snapshot"] == 2 def test_snapshot_add_many_incremental(self, swh_storage, sample_data): snapshot, _, complete_snapshot = sample_data.snapshots[:3] actual_result = swh_storage.snapshot_add([complete_snapshot]) assert actual_result == {"snapshot:add": 1} actual_result2 = swh_storage.snapshot_add([snapshot, complete_snapshot]) assert actual_result2 == {"snapshot:add": 1} assert swh_storage.snapshot_get(complete_snapshot.id) == { **complete_snapshot.to_dict(), "next_branch": None, } assert swh_storage.snapshot_get(snapshot.id) == { **snapshot.to_dict(), "next_branch": None, } def test_snapshot_add_twice(self, swh_storage, sample_data): snapshot, empty_snapshot = sample_data.snapshots[:2] actual_result = swh_storage.snapshot_add([empty_snapshot]) assert actual_result == {"snapshot:add": 1} assert list(swh_storage.journal_writer.journal.objects) == [ ("snapshot", empty_snapshot) ] actual_result = swh_storage.snapshot_add([snapshot]) assert actual_result == {"snapshot:add": 1} assert list(swh_storage.journal_writer.journal.objects) == [ ("snapshot", empty_snapshot), ("snapshot", snapshot), ] def test_snapshot_add_count_branches(self, swh_storage, sample_data): complete_snapshot = sample_data.snapshots[2] actual_result = swh_storage.snapshot_add([complete_snapshot]) assert actual_result == {"snapshot:add": 1} snp_size = swh_storage.snapshot_count_branches(complete_snapshot.id) expected_snp_size = { "alias": 1, "content": 1, "directory": 2, "release": 1, "revision": 1, "snapshot": 1, None: 1, } assert snp_size == expected_snp_size def test_snapshot_add_get_paginated(self, swh_storage, sample_data): complete_snapshot = sample_data.snapshots[2] swh_storage.snapshot_add([complete_snapshot]) snp_id = complete_snapshot.id branches = complete_snapshot.branches branch_names = list(sorted(branches)) # Test branch_from snapshot = swh_storage.snapshot_get_branches(snp_id, branches_from=b"release") rel_idx = branch_names.index(b"release") expected_snapshot = { "id": snp_id, "branches": {name: branches[name] for name in branch_names[rel_idx:]}, "next_branch": None, } assert snapshot == expected_snapshot # Test branches_count snapshot = swh_storage.snapshot_get_branches(snp_id, branches_count=1) expected_snapshot = { "id": snp_id, "branches": {branch_names[0]: branches[branch_names[0]],}, "next_branch": b"content", } assert snapshot == expected_snapshot # test branch_from + branches_count snapshot = swh_storage.snapshot_get_branches( snp_id, branches_from=b"directory", branches_count=3 ) dir_idx = branch_names.index(b"directory") expected_snapshot = { "id": snp_id, "branches": { name: branches[name] for name in branch_names[dir_idx : dir_idx + 3] }, "next_branch": branch_names[dir_idx + 3], } assert snapshot == expected_snapshot def test_snapshot_add_get_filtered(self, swh_storage, sample_data): origin = sample_data.origin complete_snapshot = sample_data.snapshots[2] swh_storage.origin_add([origin]) visit = OriginVisit( origin=origin.url, date=sample_data.date_visit1, type=sample_data.type_visit1, ) origin_visit1 = swh_storage.origin_visit_add([visit])[0] swh_storage.snapshot_add([complete_snapshot]) swh_storage.origin_visit_status_add( [ OriginVisitStatus( origin=origin.url, visit=origin_visit1.visit, date=now(), status="ongoing", snapshot=complete_snapshot.id, ) ] ) snp_id = complete_snapshot.id branches = complete_snapshot.branches snapshot = swh_storage.snapshot_get_branches( snp_id, target_types=["release", "revision"] ) expected_snapshot = { "id": snp_id, "branches": { name: tgt for name, tgt in branches.items() if tgt and tgt.target_type in [TargetType.RELEASE, TargetType.REVISION] }, "next_branch": None, } assert snapshot == expected_snapshot snapshot = swh_storage.snapshot_get_branches(snp_id, target_types=["alias"]) expected_snapshot = { "id": snp_id, "branches": { name: tgt for name, tgt in branches.items() if tgt and tgt.target_type == TargetType.ALIAS }, "next_branch": None, } assert snapshot == expected_snapshot def test_snapshot_add_get_filtered_and_paginated(self, swh_storage, sample_data): complete_snapshot = sample_data.snapshots[2] swh_storage.snapshot_add([complete_snapshot]) snp_id = complete_snapshot.id branches = complete_snapshot.branches branch_names = list(sorted(branches)) # Test branch_from snapshot = swh_storage.snapshot_get_branches( snp_id, target_types=["directory", "release"], branches_from=b"directory2" ) expected_snapshot = { "id": snp_id, "branches": {name: branches[name] for name in (b"directory2", b"release")}, "next_branch": None, } assert snapshot == expected_snapshot # Test branches_count snapshot = swh_storage.snapshot_get_branches( snp_id, target_types=["directory", "release"], branches_count=1 ) expected_snapshot = { "id": snp_id, "branches": {b"directory": branches[b"directory"]}, "next_branch": b"directory2", } assert snapshot == expected_snapshot # Test branches_count snapshot = swh_storage.snapshot_get_branches( snp_id, target_types=["directory", "release"], branches_count=2 ) expected_snapshot = { "id": snp_id, "branches": { name: branches[name] for name in (b"directory", b"directory2") }, "next_branch": b"release", } assert snapshot == expected_snapshot # test branch_from + branches_count snapshot = swh_storage.snapshot_get_branches( snp_id, target_types=["directory", "release"], branches_from=b"directory2", branches_count=1, ) dir_idx = branch_names.index(b"directory2") expected_snapshot = { "id": snp_id, "branches": {branch_names[dir_idx]: branches[branch_names[dir_idx]],}, "next_branch": b"release", } assert snapshot == expected_snapshot def test_snapshot_add_get_branch_by_type(self, swh_storage, sample_data): complete_snapshot = sample_data.snapshots[2] snapshot = complete_snapshot.to_dict() alias1 = b"alias1" alias2 = b"alias2" target1 = random.choice(list(snapshot["branches"].keys())) target2 = random.choice(list(snapshot["branches"].keys())) snapshot["branches"][alias2] = { "target": target2, "target_type": "alias", } snapshot["branches"][alias1] = { "target": target1, "target_type": "alias", } new_snapshot = Snapshot.from_dict(snapshot) swh_storage.snapshot_add([new_snapshot]) branches = swh_storage.snapshot_get_branches( new_snapshot.id, target_types=["alias"], branches_from=alias1, branches_count=1, )["branches"] assert len(branches) == 1 assert alias1 in branches def test_snapshot_add_get(self, swh_storage, sample_data): snapshot = sample_data.snapshot origin = sample_data.origin swh_storage.origin_add([origin]) visit = OriginVisit( origin=origin.url, date=sample_data.date_visit1, type=sample_data.type_visit1, ) ov1 = swh_storage.origin_visit_add([visit])[0] swh_storage.snapshot_add([snapshot]) swh_storage.origin_visit_status_add( [ OriginVisitStatus( origin=origin.url, visit=ov1.visit, date=now(), status="ongoing", snapshot=snapshot.id, ) ] ) expected_snapshot = {**snapshot.to_dict(), "next_branch": None} by_id = swh_storage.snapshot_get(snapshot.id) assert by_id == expected_snapshot actual_visit = swh_storage.origin_visit_get_by(origin.url, ov1.visit) assert actual_visit == ov1 visit_status = swh_storage.origin_visit_status_get_latest( origin.url, ov1.visit, require_snapshot=True ) assert visit_status.snapshot == snapshot.id def test_snapshot_get_random(self, swh_storage, sample_data): snapshot, empty_snapshot, complete_snapshot = sample_data.snapshots[:3] swh_storage.snapshot_add([snapshot, empty_snapshot, complete_snapshot]) assert swh_storage.snapshot_get_random() in { snapshot.id, empty_snapshot.id, complete_snapshot.id, } def test_snapshot_missing(self, swh_storage, sample_data): snapshot, missing_snapshot = sample_data.snapshots[:2] snapshots = [snapshot.id, missing_snapshot.id] swh_storage.snapshot_add([snapshot]) missing_snapshots = swh_storage.snapshot_missing(snapshots) assert list(missing_snapshots) == [missing_snapshot.id] def test_stat_counters(self, swh_storage, sample_data): origin = sample_data.origin snapshot = sample_data.snapshot revision = sample_data.revision release = sample_data.release directory = sample_data.directory content = sample_data.content expected_keys = ["content", "directory", "origin", "revision"] # Initially, all counters are 0 swh_storage.refresh_stat_counters() counters = swh_storage.stat_counters() assert set(expected_keys) <= set(counters) for key in expected_keys: assert counters[key] == 0 # Add a content. Only the content counter should increase. swh_storage.content_add([content]) swh_storage.refresh_stat_counters() counters = swh_storage.stat_counters() assert set(expected_keys) <= set(counters) for key in expected_keys: if key != "content": assert counters[key] == 0 assert counters["content"] == 1 # Add other objects. Check their counter increased as well. swh_storage.origin_add([origin]) visit = OriginVisit( origin=origin.url, date=sample_data.date_visit2, type=sample_data.type_visit2, ) origin_visit1 = swh_storage.origin_visit_add([visit])[0] swh_storage.snapshot_add([snapshot]) swh_storage.origin_visit_status_add( [ OriginVisitStatus( origin=origin.url, visit=origin_visit1.visit, date=now(), status="ongoing", snapshot=snapshot.id, ) ] ) swh_storage.directory_add([directory]) swh_storage.revision_add([revision]) swh_storage.release_add([release]) swh_storage.refresh_stat_counters() counters = swh_storage.stat_counters() assert counters["content"] == 1 assert counters["directory"] == 1 assert counters["snapshot"] == 1 assert counters["origin"] == 1 assert counters["origin_visit"] == 1 assert counters["revision"] == 1 assert counters["release"] == 1 assert counters["snapshot"] == 1 if "person" in counters: assert counters["person"] == 3 def test_content_find_ctime(self, swh_storage, sample_data): origin_content = sample_data.content ctime = round_to_milliseconds(now()) content = attr.evolve(origin_content, data=None, ctime=ctime) swh_storage.content_add_metadata([content]) actually_present = swh_storage.content_find({"sha1": content.sha1}) assert actually_present[0] == content def test_content_find_with_present_content(self, swh_storage, sample_data): content = sample_data.content expected_content = attr.evolve(content, data=None) # 1. with something to find swh_storage.content_add([content]) actually_present = swh_storage.content_find({"sha1": content.sha1}) assert 1 == len(actually_present) assert actually_present[0] == expected_content # 2. with something to find actually_present = swh_storage.content_find({"sha1_git": content.sha1_git}) assert 1 == len(actually_present) assert actually_present[0] == expected_content # 3. with something to find actually_present = swh_storage.content_find({"sha256": content.sha256}) assert 1 == len(actually_present) assert actually_present[0] == expected_content # 4. with something to find actually_present = swh_storage.content_find(content.hashes()) assert 1 == len(actually_present) assert actually_present[0] == expected_content def test_content_find_with_non_present_content(self, swh_storage, sample_data): missing_content = sample_data.skipped_content # 1. with something that does not exist actually_present = swh_storage.content_find({"sha1": missing_content.sha1}) assert actually_present == [] # 2. with something that does not exist actually_present = swh_storage.content_find( {"sha1_git": missing_content.sha1_git} ) assert actually_present == [] # 3. with something that does not exist actually_present = swh_storage.content_find({"sha256": missing_content.sha256}) assert actually_present == [] def test_content_find_with_duplicate_input(self, swh_storage, sample_data): content = sample_data.content # Create fake data with colliding sha256 and blake2s256 sha1_array = bytearray(content.sha1) sha1_array[0] += 1 sha1git_array = bytearray(content.sha1_git) sha1git_array[0] += 1 duplicated_content = attr.evolve( content, sha1=bytes(sha1_array), sha1_git=bytes(sha1git_array) ) # Inject the data swh_storage.content_add([content, duplicated_content]) actual_result = swh_storage.content_find( { "blake2s256": duplicated_content.blake2s256, "sha256": duplicated_content.sha256, } ) expected_content = attr.evolve(content, data=None) expected_duplicated_content = attr.evolve(duplicated_content, data=None) for result in actual_result: assert result in [expected_content, expected_duplicated_content] def test_content_find_with_duplicate_sha256(self, swh_storage, sample_data): content = sample_data.content hashes = {} # Create fake data with colliding sha256 for hashalgo in ("sha1", "sha1_git", "blake2s256"): value = bytearray(getattr(content, hashalgo)) value[0] += 1 hashes[hashalgo] = bytes(value) duplicated_content = attr.evolve( content, sha1=hashes["sha1"], sha1_git=hashes["sha1_git"], blake2s256=hashes["blake2s256"], ) swh_storage.content_add([content, duplicated_content]) actual_result = swh_storage.content_find({"sha256": duplicated_content.sha256}) assert len(actual_result) == 2 expected_content = attr.evolve(content, data=None) expected_duplicated_content = attr.evolve(duplicated_content, data=None) for result in actual_result: assert result in [expected_content, expected_duplicated_content] # Find with both sha256 and blake2s256 actual_result = swh_storage.content_find( { "sha256": duplicated_content.sha256, "blake2s256": duplicated_content.blake2s256, } ) assert len(actual_result) == 1 assert actual_result == [expected_duplicated_content] def test_content_find_with_duplicate_blake2s256(self, swh_storage, sample_data): content = sample_data.content # Create fake data with colliding sha256 and blake2s256 sha1_array = bytearray(content.sha1) sha1_array[0] += 1 sha1git_array = bytearray(content.sha1_git) sha1git_array[0] += 1 sha256_array = bytearray(content.sha256) sha256_array[0] += 1 duplicated_content = attr.evolve( content, sha1=bytes(sha1_array), sha1_git=bytes(sha1git_array), sha256=bytes(sha256_array), ) swh_storage.content_add([content, duplicated_content]) actual_result = swh_storage.content_find( {"blake2s256": duplicated_content.blake2s256} ) expected_content = attr.evolve(content, data=None) expected_duplicated_content = attr.evolve(duplicated_content, data=None) for result in actual_result: assert result in [expected_content, expected_duplicated_content] # Find with both sha256 and blake2s256 actual_result = swh_storage.content_find( { "sha256": duplicated_content.sha256, "blake2s256": duplicated_content.blake2s256, } ) assert actual_result == [expected_duplicated_content] def test_content_find_bad_input(self, swh_storage): # 1. with no hash to lookup with pytest.raises(StorageArgumentException): swh_storage.content_find({}) # need at least one hash # 2. with bad hash with pytest.raises(StorageArgumentException): swh_storage.content_find({"unknown-sha1": "something"}) # not the right key def test_object_find_by_sha1_git(self, swh_storage, sample_data): content = sample_data.content directory = sample_data.directory revision = sample_data.revision release = sample_data.release sha1_gits = [b"00000000000000000000"] expected = { b"00000000000000000000": [], } swh_storage.content_add([content]) sha1_gits.append(content.sha1_git) expected[content.sha1_git] = [ {"sha1_git": content.sha1_git, "type": "content",} ] swh_storage.directory_add([directory]) sha1_gits.append(directory.id) expected[directory.id] = [{"sha1_git": directory.id, "type": "directory",}] swh_storage.revision_add([revision]) sha1_gits.append(revision.id) expected[revision.id] = [{"sha1_git": revision.id, "type": "revision",}] swh_storage.release_add([release]) sha1_gits.append(release.id) expected[release.id] = [{"sha1_git": release.id, "type": "release",}] ret = swh_storage.object_find_by_sha1_git(sha1_gits) assert expected == ret def test_metadata_fetcher_add_get(self, swh_storage, sample_data): fetcher = sample_data.metadata_fetcher actual_fetcher = swh_storage.metadata_fetcher_get(fetcher.name, fetcher.version) assert actual_fetcher is None # does not exist swh_storage.metadata_fetcher_add([fetcher]) res = swh_storage.metadata_fetcher_get(fetcher.name, fetcher.version) assert res == fetcher actual_objects = list(swh_storage.journal_writer.journal.objects) expected_objects = [ ("metadata_fetcher", fetcher), ] for obj in expected_objects: assert obj in actual_objects def test_metadata_fetcher_add_zero(self, swh_storage, sample_data): fetcher = sample_data.metadata_fetcher actual_fetcher = swh_storage.metadata_fetcher_get(fetcher.name, fetcher.version) assert actual_fetcher is None # does not exist swh_storage.metadata_fetcher_add([]) def test_metadata_authority_add_get(self, swh_storage, sample_data): authority = sample_data.metadata_authority actual_authority = swh_storage.metadata_authority_get( authority.type, authority.url ) assert actual_authority is None # does not exist swh_storage.metadata_authority_add([authority]) res = swh_storage.metadata_authority_get(authority.type, authority.url) assert res == authority actual_objects = list(swh_storage.journal_writer.journal.objects) expected_objects = [ ("metadata_authority", authority), ] for obj in expected_objects: assert obj in actual_objects def test_metadata_authority_add_zero(self, swh_storage, sample_data): authority = sample_data.metadata_authority actual_authority = swh_storage.metadata_authority_get( authority.type, authority.url ) assert actual_authority is None # does not exist swh_storage.metadata_authority_add([]) def test_content_metadata_add(self, swh_storage, sample_data): content = sample_data.content fetcher = sample_data.metadata_fetcher authority = sample_data.metadata_authority content_metadata = sample_data.content_metadata[:2] content_swhid = SWHID( object_type="content", object_id=hash_to_bytes(content.sha1_git) ) swh_storage.metadata_fetcher_add([fetcher]) swh_storage.metadata_authority_add([authority]) swh_storage.raw_extrinsic_metadata_add(content_metadata) result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.CONTENT, content_swhid, authority ) assert result.next_page_token is None assert list(sorted(result.results, key=lambda x: x.discovery_date,)) == list( content_metadata ) actual_objects = list(swh_storage.journal_writer.journal.objects) expected_objects = [ ("metadata_authority", authority), ("metadata_fetcher", fetcher), ] + [("raw_extrinsic_metadata", item) for item in content_metadata] for obj in expected_objects: assert obj in actual_objects def test_content_metadata_add_duplicate(self, swh_storage, sample_data): """Duplicates should be silently updated.""" content = sample_data.content fetcher = sample_data.metadata_fetcher authority = sample_data.metadata_authority content_metadata, content_metadata2 = sample_data.content_metadata[:2] content_swhid = SWHID( object_type="content", object_id=hash_to_bytes(content.sha1_git) ) new_content_metadata2 = attr.evolve( content_metadata2, format="new-format", metadata=b"new-metadata", ) swh_storage.metadata_fetcher_add([fetcher]) swh_storage.metadata_authority_add([authority]) swh_storage.raw_extrinsic_metadata_add([content_metadata, content_metadata2]) swh_storage.raw_extrinsic_metadata_add([new_content_metadata2]) result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.CONTENT, content_swhid, authority ) assert result.next_page_token is None expected_results1 = (content_metadata, new_content_metadata2) expected_results2 = (content_metadata, content_metadata2) assert tuple(sorted(result.results, key=lambda x: x.discovery_date,)) in ( expected_results1, # cassandra expected_results2, # postgresql ) def test_content_metadata_get(self, swh_storage, sample_data): content, content2 = sample_data.contents[:2] fetcher, fetcher2 = sample_data.fetchers[:2] authority, authority2 = sample_data.authorities[:2] ( content1_metadata1, content1_metadata2, content1_metadata3, ) = sample_data.content_metadata[:3] content1_swhid = SWHID(object_type="content", object_id=content.sha1_git) content2_swhid = SWHID(object_type="content", object_id=content2.sha1_git) content2_metadata = attr.evolve(content1_metadata2, id=content2_swhid) swh_storage.metadata_authority_add([authority, authority2]) swh_storage.metadata_fetcher_add([fetcher, fetcher2]) swh_storage.raw_extrinsic_metadata_add( [ content1_metadata1, content1_metadata2, content1_metadata3, content2_metadata, ] ) result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.CONTENT, content1_swhid, authority ) assert result.next_page_token is None assert [content1_metadata1, content1_metadata2] == list( sorted(result.results, key=lambda x: x.discovery_date,) ) result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.CONTENT, content1_swhid, authority2 ) assert result.next_page_token is None assert [content1_metadata3] == list( sorted(result.results, key=lambda x: x.discovery_date,) ) result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.CONTENT, content2_swhid, authority ) assert result.next_page_token is None assert [content2_metadata] == list(result.results,) def test_content_metadata_get_after(self, swh_storage, sample_data): content = sample_data.content fetcher = sample_data.metadata_fetcher authority = sample_data.metadata_authority content_metadata, content_metadata2 = sample_data.content_metadata[:2] content_swhid = SWHID(object_type="content", object_id=content.sha1_git) swh_storage.metadata_fetcher_add([fetcher]) swh_storage.metadata_authority_add([authority]) swh_storage.raw_extrinsic_metadata_add([content_metadata, content_metadata2]) result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.CONTENT, content_swhid, authority, after=content_metadata.discovery_date - timedelta(seconds=1), ) assert result.next_page_token is None assert [content_metadata, content_metadata2] == list( sorted(result.results, key=lambda x: x.discovery_date,) ) result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.CONTENT, content_swhid, authority, after=content_metadata.discovery_date, ) assert result.next_page_token is None assert result.results == [content_metadata2] result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.CONTENT, content_swhid, authority, after=content_metadata2.discovery_date, ) assert result.next_page_token is None assert result.results == [] def test_content_metadata_get_paginate(self, swh_storage, sample_data): content = sample_data.content fetcher = sample_data.metadata_fetcher authority = sample_data.metadata_authority content_metadata, content_metadata2 = sample_data.content_metadata[:2] content_swhid = SWHID(object_type="content", object_id=content.sha1_git) swh_storage.metadata_fetcher_add([fetcher]) swh_storage.metadata_authority_add([authority]) swh_storage.raw_extrinsic_metadata_add([content_metadata, content_metadata2]) swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.CONTENT, content_swhid, authority ) result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.CONTENT, content_swhid, authority, limit=1 ) assert result.next_page_token is not None assert result.results == [content_metadata] result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.CONTENT, content_swhid, authority, limit=1, page_token=result.next_page_token, ) assert result.next_page_token is None assert result.results == [content_metadata2] def test_content_metadata_get_paginate_same_date(self, swh_storage, sample_data): content = sample_data.content fetcher1, fetcher2 = sample_data.fetchers[:2] authority = sample_data.metadata_authority content_metadata, content_metadata2 = sample_data.content_metadata[:2] content_swhid = SWHID(object_type="content", object_id=content.sha1_git) swh_storage.metadata_fetcher_add([fetcher1, fetcher2]) swh_storage.metadata_authority_add([authority]) new_content_metadata2 = attr.evolve( content_metadata2, discovery_date=content_metadata2.discovery_date, fetcher=attr.evolve(fetcher2, metadata=None), ) swh_storage.raw_extrinsic_metadata_add( [content_metadata, new_content_metadata2] ) result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.CONTENT, content_swhid, authority, limit=1 ) assert result.next_page_token is not None assert result.results == [content_metadata] result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.CONTENT, content_swhid, authority, limit=1, page_token=result.next_page_token, ) assert result.next_page_token is None assert result.results == [new_content_metadata2] def test_content_metadata_get__invalid_id(self, swh_storage, sample_data): origin = sample_data.origin fetcher = sample_data.metadata_fetcher authority = sample_data.metadata_authority content_metadata, content_metadata2 = sample_data.content_metadata[:2] swh_storage.metadata_fetcher_add([fetcher]) swh_storage.metadata_authority_add([authority]) swh_storage.raw_extrinsic_metadata_add([content_metadata, content_metadata2]) with pytest.raises(StorageArgumentException, match="SWHID"): swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.CONTENT, origin.url, authority ) def test_origin_metadata_add(self, swh_storage, sample_data): origin = sample_data.origin fetcher = sample_data.metadata_fetcher authority = sample_data.metadata_authority origin_metadata, origin_metadata2 = sample_data.origin_metadata[:2] assert swh_storage.origin_add([origin]) == {"origin:add": 1} swh_storage.metadata_fetcher_add([fetcher]) swh_storage.metadata_authority_add([authority]) swh_storage.raw_extrinsic_metadata_add([origin_metadata, origin_metadata2]) result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.ORIGIN, origin.url, authority ) assert result.next_page_token is None assert list(sorted(result.results, key=lambda x: x.discovery_date)) == [ origin_metadata, origin_metadata2, ] actual_objects = list(swh_storage.journal_writer.journal.objects) expected_objects = [ ("metadata_authority", authority), ("metadata_fetcher", fetcher), ("raw_extrinsic_metadata", origin_metadata), ("raw_extrinsic_metadata", origin_metadata2), ] for obj in expected_objects: assert obj in actual_objects def test_origin_metadata_add_duplicate(self, swh_storage, sample_data): """Duplicates should be silently updated.""" origin = sample_data.origin fetcher = sample_data.metadata_fetcher authority = sample_data.metadata_authority origin_metadata, origin_metadata2 = sample_data.origin_metadata[:2] assert swh_storage.origin_add([origin]) == {"origin:add": 1} new_origin_metadata2 = attr.evolve( origin_metadata2, format="new-format", metadata=b"new-metadata", ) swh_storage.metadata_fetcher_add([fetcher]) swh_storage.metadata_authority_add([authority]) swh_storage.raw_extrinsic_metadata_add([origin_metadata, origin_metadata2]) swh_storage.raw_extrinsic_metadata_add([new_origin_metadata2]) result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.ORIGIN, origin.url, authority ) assert result.next_page_token is None # which of the two behavior happens is backend-specific. expected_results1 = (origin_metadata, new_origin_metadata2) expected_results2 = (origin_metadata, origin_metadata2) assert tuple(sorted(result.results, key=lambda x: x.discovery_date,)) in ( expected_results1, # cassandra expected_results2, # postgresql ) def test_origin_metadata_get(self, swh_storage, sample_data): origin, origin2 = sample_data.origins[:2] fetcher, fetcher2 = sample_data.fetchers[:2] authority, authority2 = sample_data.authorities[:2] ( origin1_metadata1, origin1_metadata2, origin1_metadata3, ) = sample_data.origin_metadata[:3] assert swh_storage.origin_add([origin, origin2]) == {"origin:add": 2} origin2_metadata = attr.evolve(origin1_metadata2, id=origin2.url) swh_storage.metadata_authority_add([authority, authority2]) swh_storage.metadata_fetcher_add([fetcher, fetcher2]) swh_storage.raw_extrinsic_metadata_add( [origin1_metadata1, origin1_metadata2, origin1_metadata3, origin2_metadata] ) result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.ORIGIN, origin.url, authority ) assert result.next_page_token is None assert [origin1_metadata1, origin1_metadata2] == list( sorted(result.results, key=lambda x: x.discovery_date,) ) result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.ORIGIN, origin.url, authority2 ) assert result.next_page_token is None assert [origin1_metadata3] == list( sorted(result.results, key=lambda x: x.discovery_date,) ) result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.ORIGIN, origin2.url, authority ) assert result.next_page_token is None assert [origin2_metadata] == list(result.results,) def test_origin_metadata_get_after(self, swh_storage, sample_data): origin = sample_data.origin fetcher = sample_data.metadata_fetcher authority = sample_data.metadata_authority origin_metadata, origin_metadata2 = sample_data.origin_metadata[:2] assert swh_storage.origin_add([origin]) == {"origin:add": 1} swh_storage.metadata_fetcher_add([fetcher]) swh_storage.metadata_authority_add([authority]) swh_storage.raw_extrinsic_metadata_add([origin_metadata, origin_metadata2]) result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.ORIGIN, origin.url, authority, after=origin_metadata.discovery_date - timedelta(seconds=1), ) assert result.next_page_token is None assert list(sorted(result.results, key=lambda x: x.discovery_date,)) == [ origin_metadata, origin_metadata2, ] result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.ORIGIN, origin.url, authority, after=origin_metadata.discovery_date, ) assert result.next_page_token is None assert result.results == [origin_metadata2] result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.ORIGIN, origin.url, authority, after=origin_metadata2.discovery_date, ) assert result.next_page_token is None assert result.results == [] def test_origin_metadata_get_paginate(self, swh_storage, sample_data): origin = sample_data.origin fetcher = sample_data.metadata_fetcher authority = sample_data.metadata_authority origin_metadata, origin_metadata2 = sample_data.origin_metadata[:2] assert swh_storage.origin_add([origin]) == {"origin:add": 1} swh_storage.metadata_fetcher_add([fetcher]) swh_storage.metadata_authority_add([authority]) swh_storage.raw_extrinsic_metadata_add([origin_metadata, origin_metadata2]) swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.ORIGIN, origin.url, authority ) result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.ORIGIN, origin.url, authority, limit=1 ) assert result.next_page_token is not None assert result.results == [origin_metadata] result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.ORIGIN, origin.url, authority, limit=1, page_token=result.next_page_token, ) assert result.next_page_token is None assert result.results == [origin_metadata2] def test_origin_metadata_get_paginate_same_date(self, swh_storage, sample_data): origin = sample_data.origin fetcher1, fetcher2 = sample_data.fetchers[:2] authority = sample_data.metadata_authority origin_metadata, origin_metadata2 = sample_data.origin_metadata[:2] assert swh_storage.origin_add([origin]) == {"origin:add": 1} swh_storage.metadata_fetcher_add([fetcher1, fetcher2]) swh_storage.metadata_authority_add([authority]) new_origin_metadata2 = attr.evolve( origin_metadata2, discovery_date=origin_metadata2.discovery_date, fetcher=attr.evolve(fetcher2, metadata=None), ) swh_storage.raw_extrinsic_metadata_add([origin_metadata, new_origin_metadata2]) result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.ORIGIN, origin.url, authority, limit=1 ) assert result.next_page_token is not None assert result.results == [origin_metadata] result = swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.ORIGIN, origin.url, authority, limit=1, page_token=result.next_page_token, ) assert result.next_page_token is None assert result.results == [new_origin_metadata2] def test_origin_metadata_add_missing_authority(self, swh_storage, sample_data): origin = sample_data.origin fetcher = sample_data.metadata_fetcher origin_metadata, origin_metadata2 = sample_data.origin_metadata[:2] assert swh_storage.origin_add([origin]) == {"origin:add": 1} swh_storage.metadata_fetcher_add([fetcher]) with pytest.raises(StorageArgumentException, match="authority"): swh_storage.raw_extrinsic_metadata_add([origin_metadata, origin_metadata2]) def test_origin_metadata_add_missing_fetcher(self, swh_storage, sample_data): origin = sample_data.origin authority = sample_data.metadata_authority origin_metadata, origin_metadata2 = sample_data.origin_metadata[:2] assert swh_storage.origin_add([origin]) == {"origin:add": 1} swh_storage.metadata_authority_add([authority]) with pytest.raises(StorageArgumentException, match="fetcher"): swh_storage.raw_extrinsic_metadata_add([origin_metadata, origin_metadata2]) def test_origin_metadata_get__invalid_id_type(self, swh_storage, sample_data): origin = sample_data.origin authority = sample_data.metadata_authority fetcher = sample_data.metadata_fetcher origin_metadata, origin_metadata2 = sample_data.origin_metadata[:2] content_metadata = sample_data.content_metadata[0] assert swh_storage.origin_add([origin]) == {"origin:add": 1} swh_storage.metadata_fetcher_add([fetcher]) swh_storage.metadata_authority_add([authority]) swh_storage.raw_extrinsic_metadata_add([origin_metadata, origin_metadata2]) with pytest.raises(StorageArgumentException, match="SWHID"): swh_storage.raw_extrinsic_metadata_get( MetadataTargetType.ORIGIN, content_metadata.id, authority, ) class TestStorageGeneratedData: def test_generate_content_get_data(self, swh_storage, swh_contents): contents_with_data = [c for c in swh_contents if c.status != "absent"] # retrieve contents for content in contents_with_data: actual_content_data = swh_storage.content_get_data(content.sha1) assert actual_content_data is not None assert actual_content_data == content.data def test_generate_content_get(self, swh_storage, swh_contents): expected_contents = [ attr.evolve(c, data=None) for c in swh_contents if c.status != "absent" ] actual_contents = swh_storage.content_get([c.sha1 for c in expected_contents]) assert len(actual_contents) == len(expected_contents) assert actual_contents == expected_contents @pytest.mark.parametrize("limit", [1, 7, 10, 100, 1000]) def test_origin_list(self, swh_storage, swh_origins, limit): returned_origins = [] page_token = None i = 0 while True: actual_page = swh_storage.origin_list(page_token=page_token, limit=limit) assert len(actual_page.results) <= limit returned_origins.extend(actual_page.results) i += 1 page_token = actual_page.next_page_token if page_token is None: assert i * limit >= len(swh_origins) break else: assert len(actual_page.results) == limit assert sorted(returned_origins) == sorted(swh_origins) def test_origin_count(self, swh_storage, sample_data): swh_storage.origin_add(sample_data.origins) assert swh_storage.origin_count("github") == 3 assert swh_storage.origin_count("gitlab") == 2 assert swh_storage.origin_count(".*user.*", regexp=True) == 5 assert swh_storage.origin_count(".*user.*", regexp=False) == 0 assert swh_storage.origin_count(".*user1.*", regexp=True) == 2 assert swh_storage.origin_count(".*user1.*", regexp=False) == 0 def test_origin_count_with_visit_no_visits(self, swh_storage, sample_data): swh_storage.origin_add(sample_data.origins) # none of them have visits, so with_visit=True => 0 assert swh_storage.origin_count("github", with_visit=True) == 0 assert swh_storage.origin_count("gitlab", with_visit=True) == 0 assert swh_storage.origin_count(".*user.*", regexp=True, with_visit=True) == 0 assert swh_storage.origin_count(".*user.*", regexp=False, with_visit=True) == 0 assert swh_storage.origin_count(".*user1.*", regexp=True, with_visit=True) == 0 assert swh_storage.origin_count(".*user1.*", regexp=False, with_visit=True) == 0 def test_origin_count_with_visit_with_visits_no_snapshot( self, swh_storage, sample_data ): swh_storage.origin_add(sample_data.origins) origin_url = "https://github.com/user1/repo1" visit = OriginVisit(origin=origin_url, date=now(), type="git",) swh_storage.origin_visit_add([visit]) assert swh_storage.origin_count("github", with_visit=False) == 3 # it has a visit, but no snapshot, so with_visit=True => 0 assert swh_storage.origin_count("github", with_visit=True) == 0 assert swh_storage.origin_count("gitlab", with_visit=False) == 2 # these gitlab origins have no visit assert swh_storage.origin_count("gitlab", with_visit=True) == 0 assert ( swh_storage.origin_count("github.*user1", regexp=True, with_visit=False) == 1 ) assert ( swh_storage.origin_count("github.*user1", regexp=True, with_visit=True) == 0 ) assert swh_storage.origin_count("github", regexp=True, with_visit=True) == 0 def test_origin_count_with_visit_with_visits_and_snapshot( self, swh_storage, sample_data ): snapshot = sample_data.snapshot swh_storage.origin_add(sample_data.origins) swh_storage.snapshot_add([snapshot]) origin_url = "https://github.com/user1/repo1" visit = OriginVisit(origin=origin_url, date=now(), type="git",) visit = swh_storage.origin_visit_add([visit])[0] swh_storage.origin_visit_status_add( [ OriginVisitStatus( origin=origin_url, visit=visit.visit, date=now(), status="ongoing", snapshot=snapshot.id, ) ] ) assert swh_storage.origin_count("github", with_visit=False) == 3 # github/user1 has a visit and a snapshot, so with_visit=True => 1 assert swh_storage.origin_count("github", with_visit=True) == 1 assert ( swh_storage.origin_count("github.*user1", regexp=True, with_visit=False) == 1 ) assert ( swh_storage.origin_count("github.*user1", regexp=True, with_visit=True) == 1 ) assert swh_storage.origin_count("github", regexp=True, with_visit=True) == 1 @settings(suppress_health_check=[HealthCheck.too_slow]) @given(strategies.lists(objects(split_content=True), max_size=2)) def test_add_arbitrary(self, swh_storage, objects): for (obj_type, obj) in objects: if obj.object_type == "origin_visit": swh_storage.origin_add([Origin(url=obj.origin)]) visit = OriginVisit(origin=obj.origin, date=obj.date, type=obj.type,) swh_storage.origin_visit_add([visit]) else: method = getattr(swh_storage, obj_type + "_add") try: method([obj]) except HashCollision: pass @pytest.mark.db class TestLocalStorage: """Test the local storage""" # This test is only relevant on the local storage, with an actual # objstorage raising an exception def test_content_add_objstorage_exception(self, swh_storage, sample_data): content = sample_data.content swh_storage.objstorage.content_add = Mock( side_effect=Exception("mocked broken objstorage") ) with pytest.raises(Exception, match="mocked broken"): swh_storage.content_add([content]) missing = list(swh_storage.content_missing([content.hashes()])) assert missing == [content.sha1] @pytest.mark.db class TestStorageRaceConditions: @pytest.mark.xfail def test_content_add_race(self, swh_storage, sample_data): content = sample_data.content results = queue.Queue() def thread(): try: with db_transaction(swh_storage) as (db, cur): ret = swh_storage.content_add([content], db=db, cur=cur) results.put((threading.get_ident(), "data", ret)) except Exception as e: results.put((threading.get_ident(), "exc", e)) t1 = threading.Thread(target=thread) t2 = threading.Thread(target=thread) t1.start() # this avoids the race condition # import time # time.sleep(1) t2.start() t1.join() t2.join() r1 = results.get(block=False) r2 = results.get(block=False) with pytest.raises(queue.Empty): results.get(block=False) assert r1[0] != r2[0] assert r1[1] == "data", "Got exception %r in Thread%s" % (r1[2], r1[0]) assert r2[1] == "data", "Got exception %r in Thread%s" % (r2[2], r2[0]) @pytest.mark.db class TestPgStorage: """This class is dedicated for the rare case where the schema needs to be altered dynamically. Otherwise, the tests could be blocking when ran altogether. """ def test_content_update_with_new_cols(self, swh_storage, sample_data): content, content2 = sample_data.contents[:2] swh_storage.journal_writer.journal = None # TODO, not supported with db_transaction(swh_storage) as (_, cur): cur.execute( """alter table content add column test text default null, add column test2 text default null""" ) swh_storage.content_add([content]) cont = content.to_dict() cont["test"] = "value-1" cont["test2"] = "value-2" swh_storage.content_update([cont], keys=["test", "test2"]) with db_transaction(swh_storage) as (_, cur): cur.execute( """SELECT sha1, sha1_git, sha256, length, status, test, test2 FROM content WHERE sha1 = %s""", (cont["sha1"],), ) datum = cur.fetchone() assert datum == ( cont["sha1"], cont["sha1_git"], cont["sha256"], cont["length"], "visible", cont["test"], cont["test2"], ) with db_transaction(swh_storage) as (_, cur): cur.execute( """alter table content drop column test, drop column test2""" ) def test_content_add_db(self, swh_storage, sample_data): content = sample_data.content actual_result = swh_storage.content_add([content]) assert actual_result == { "content:add": 1, "content:add:bytes": content.length, } if hasattr(swh_storage, "objstorage"): assert content.sha1 in swh_storage.objstorage.objstorage with db_transaction(swh_storage) as (_, cur): cur.execute( "SELECT sha1, sha1_git, sha256, length, status" " FROM content WHERE sha1 = %s", (content.sha1,), ) datum = cur.fetchone() assert datum == ( content.sha1, content.sha1_git, content.sha256, content.length, "visible", ) contents = [ obj for (obj_type, obj) in swh_storage.journal_writer.journal.objects if obj_type == "content" ] assert len(contents) == 1 assert contents[0] == attr.evolve(content, data=None) def test_content_add_metadata_db(self, swh_storage, sample_data): content = attr.evolve(sample_data.content, data=None, ctime=now()) actual_result = swh_storage.content_add_metadata([content]) assert actual_result == { "content:add": 1, } if hasattr(swh_storage, "objstorage"): assert content.sha1 not in swh_storage.objstorage.objstorage with db_transaction(swh_storage) as (_, cur): cur.execute( "SELECT sha1, sha1_git, sha256, length, status" " FROM content WHERE sha1 = %s", (content.sha1,), ) datum = cur.fetchone() assert datum == ( content.sha1, content.sha1_git, content.sha256, content.length, "visible", ) contents = [ obj for (obj_type, obj) in swh_storage.journal_writer.journal.objects if obj_type == "content" ] assert len(contents) == 1 assert contents[0] == content def test_skipped_content_add_db(self, swh_storage, sample_data): content, cont2 = sample_data.skipped_contents[:2] content2 = attr.evolve(cont2, blake2s256=None) actual_result = swh_storage.skipped_content_add([content, content, content2]) assert 2 <= actual_result.pop("skipped_content:add") <= 3 assert actual_result == {} with db_transaction(swh_storage) as (_, cur): cur.execute( "SELECT sha1, sha1_git, sha256, blake2s256, " "length, status, reason " "FROM skipped_content ORDER BY sha1_git" ) dbdata = cur.fetchall() assert len(dbdata) == 2 assert dbdata[0] == ( content.sha1, content.sha1_git, content.sha256, content.blake2s256, content.length, "absent", "Content too long", ) assert dbdata[1] == ( content2.sha1, content2.sha1_git, content2.sha256, content2.blake2s256, content2.length, "absent", "Content too long", ) def test_clear_buffers(self, swh_storage): """Calling clear buffers on real storage does nothing """ assert swh_storage.clear_buffers() is None def test_flush(self, swh_storage): """Calling clear buffers on real storage does nothing """ assert swh_storage.flush() == {} diff --git a/swh/storage/tests/test_api_client.py b/swh/storage/tests/test_api_client.py index 2ca1f375..f19ba7ba 100644 --- a/swh/storage/tests/test_api_client.py +++ b/swh/storage/tests/test_api_client.py @@ -1,97 +1,97 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import pytest import swh.storage.api.server as server -import swh.storage.storage +import swh.storage from swh.storage import get_storage from swh.storage.tests.test_storage import ( TestStorageGeneratedData as _TestStorageGeneratedData, ) from swh.storage.tests.test_storage import TestStorage as _TestStorage # tests are executed using imported classes (TestStorage and # TestStorageGeneratedData) using overloaded swh_storage fixture # below @pytest.fixture def app_server(): server.storage = swh.storage.get_storage( cls="memory", journal_writer={"cls": "memory"} ) yield server @pytest.fixture def app(app_server): return app_server.app @pytest.fixture def swh_rpc_client_class(): def storage_factory(**kwargs): storage_config = { "cls": "remote", **kwargs, } return get_storage(**storage_config) return storage_factory @pytest.fixture def swh_storage(swh_rpc_client, app_server): # This version of the swh_storage fixture uses the swh_rpc_client fixture # to instantiate a RemoteStorage (see swh_rpc_client_class above) that # proxies, via the swh.core RPC mechanism, the local (in memory) storage # configured in the app_server fixture above. # # Also note that, for the sake of # making it easier to write tests, the in-memory journal writer of the # in-memory backend storage is attached to the RemoteStorage as its # journal_writer attribute. storage = swh_rpc_client journal_writer = getattr(storage, "journal_writer", None) storage.journal_writer = app_server.storage.journal_writer yield storage storage.journal_writer = journal_writer class TestStorageApi(_TestStorage): @pytest.mark.skip( 'The "person" table of the pgsql is a legacy thing, and not ' "supported by the cassandra backend." ) def test_person_fullname_unicity(self): pass @pytest.mark.skip("content_update is not yet implemented for Cassandra") def test_content_update(self): pass @pytest.mark.skip("Not supported by Cassandra") def test_origin_count(self): pass class TestStorageApiGeneratedData(_TestStorageGeneratedData): @pytest.mark.skip("Not supported by Cassandra") def test_origin_count(self): pass @pytest.mark.skip("Not supported by Cassandra") def test_origin_count_with_visit_no_visits(self): pass @pytest.mark.skip("Not supported by Cassandra") def test_origin_count_with_visit_with_visits_and_snapshot(self): pass @pytest.mark.skip("Not supported by Cassandra") def test_origin_count_with_visit_with_visits_no_snapshot(self): pass diff --git a/swh/storage/tests/test_in_memory.py b/swh/storage/tests/test_in_memory.py index adb094d4..c6cc3ad8 100644 --- a/swh/storage/tests/test_in_memory.py +++ b/swh/storage/tests/test_in_memory.py @@ -1,130 +1,130 @@ # Copyright (C) 2018-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import dataclasses import pytest from swh.storage.cassandra.model import BaseRow from swh.storage.in_memory import Table -from swh.storage.tests.test_storage import TestStorage as _TestStorage -from swh.storage.tests.test_storage import ( +from swh.storage.tests.storage_tests import TestStorage as _TestStorage +from swh.storage.tests.storage_tests import ( TestStorageGeneratedData as _TestStorageGeneratedData, ) # tests are executed using imported classes (TestStorage and # TestStorageGeneratedData) using overloaded swh_storage fixture # below @pytest.fixture def swh_storage_backend_config(): yield { "cls": "memory", "journal_writer": {"cls": "memory",}, } @dataclasses.dataclass class Row(BaseRow): PARTITION_KEY = ("col1", "col2") CLUSTERING_KEY = ("col3", "col4") col1: str col2: str col3: str col4: str col5: str col6: int def test_table_keys(): table = Table(Row) primary_key = ("foo", "bar", "baz", "qux") partition_key = ("foo", "bar") clustering_key = ("baz", "qux") row = Row(col1="foo", col2="bar", col3="baz", col4="qux", col5="quux", col6=4) assert table.partition_key(row) == partition_key assert table.clustering_key(row) == clustering_key assert table.primary_key(row) == primary_key assert table.primary_key_from_dict(row.to_dict()) == primary_key assert table.split_primary_key(primary_key) == (partition_key, clustering_key) def test_table(): table = Table(Row) row1 = Row(col1="foo", col2="bar", col3="baz", col4="qux", col5="quux", col6=4) row2 = Row(col1="foo", col2="bar", col3="baz", col4="qux2", col5="quux", col6=4) row3 = Row(col1="foo", col2="bar", col3="baz", col4="qux1", col5="quux", col6=4) row4 = Row(col1="foo", col2="bar2", col3="baz", col4="qux1", col5="quux", col6=4) partition_key = ("foo", "bar") partition_key4 = ("foo", "bar2") primary_key1 = ("foo", "bar", "baz", "qux") primary_key2 = ("foo", "bar", "baz", "qux2") primary_key3 = ("foo", "bar", "baz", "qux1") primary_key4 = ("foo", "bar2", "baz", "qux1") table.insert(row1) table.insert(row2) table.insert(row3) table.insert(row4) assert table.get_from_primary_key(primary_key1) == row1 assert table.get_from_primary_key(primary_key2) == row2 assert table.get_from_primary_key(primary_key3) == row3 assert table.get_from_primary_key(primary_key4) == row4 # order matters assert list(table.get_from_token(table.token(partition_key))) == [row1, row3, row2] # order matters assert list(table.get_from_partition_key(partition_key)) == [row1, row3, row2] assert list(table.get_from_partition_key(partition_key4)) == [row4] all_rows = list(table.iter_all()) assert len(all_rows) == 4 for row in (row1, row2, row3, row4): assert (table.primary_key(row), row) in all_rows class TestInMemoryStorage(_TestStorage): @pytest.mark.skip( 'The "person" table of the pgsql is a legacy thing, and not ' "supported by the cassandra backend." ) def test_person_fullname_unicity(self): pass @pytest.mark.skip("content_update is not yet implemented for Cassandra") def test_content_update(self): pass @pytest.mark.skip("Not supported by Cassandra") def test_origin_count(self): pass class TestInMemoryStorageGeneratedData(_TestStorageGeneratedData): @pytest.mark.skip("Not supported by Cassandra") def test_origin_count(self): pass @pytest.mark.skip("Not supported by Cassandra") def test_origin_count_with_visit_no_visits(self): pass @pytest.mark.skip("Not supported by Cassandra") def test_origin_count_with_visit_with_visits_and_snapshot(self): pass @pytest.mark.skip("Not supported by Cassandra") def test_origin_count_with_visit_with_visits_no_snapshot(self): pass diff --git a/swh/storage/tests/test_init.py b/swh/storage/tests/test_init.py index 7d9eb6b7..de487261 100644 --- a/swh/storage/tests/test_init.py +++ b/swh/storage/tests/test_init.py @@ -1,107 +1,107 @@ # Copyright (C) 2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import pytest from unittest.mock import patch from swh.storage import get_storage from swh.storage.api.client import RemoteStorage -from swh.storage.storage import Storage as DbStorage +from swh.storage.postgresql.storage import Storage as DbStorage from swh.storage.in_memory import InMemoryStorage from swh.storage.buffer import BufferingProxyStorage from swh.storage.filter import FilteringProxyStorage from swh.storage.retry import RetryingProxyStorage -@patch("swh.storage.storage.psycopg2.pool") +@patch("swh.storage.postgresql.storage.psycopg2.pool") def test_get_storage(mock_pool): """Instantiating an existing storage should be ok """ mock_pool.ThreadedConnectionPool.return_value = None for cls, real_class, dummy_args in [ ("remote", RemoteStorage, {"url": "url"}), ("memory", InMemoryStorage, {}), ( "local", DbStorage, {"db": "postgresql://db", "objstorage": {"cls": "memory", "args": {},},}, ), ("filter", FilteringProxyStorage, {"storage": {"cls": "memory"}}), ("buffer", BufferingProxyStorage, {"storage": {"cls": "memory"}}), ("retry", RetryingProxyStorage, {"storage": {"cls": "memory"}}), ]: actual_storage = get_storage(cls, **dummy_args) assert actual_storage is not None assert isinstance(actual_storage, real_class) -@patch("swh.storage.storage.psycopg2.pool") +@patch("swh.storage.postgresql.storage.psycopg2.pool") def test_get_storage_legacy_args(mock_pool): """Instantiating an existing storage should be ok even with the legacy explicit 'args' keys """ mock_pool.ThreadedConnectionPool.return_value = None for cls, real_class, dummy_args in [ ("remote", RemoteStorage, {"url": "url"}), ("memory", InMemoryStorage, {}), ( "local", DbStorage, {"db": "postgresql://db", "objstorage": {"cls": "memory", "args": {},},}, ), ("filter", FilteringProxyStorage, {"storage": {"cls": "memory", "args": {}}}), ("buffer", BufferingProxyStorage, {"storage": {"cls": "memory", "args": {}}}), ]: with pytest.warns(DeprecationWarning): actual_storage = get_storage(cls, args=dummy_args) assert actual_storage is not None assert isinstance(actual_storage, real_class) def test_get_storage_failure(): """Instantiating an unknown storage should raise """ with pytest.raises(ValueError, match="Unknown storage class `unknown`"): get_storage("unknown", args=[]) def test_get_storage_pipeline(): config = { "cls": "pipeline", "steps": [ {"cls": "filter",}, {"cls": "buffer", "min_batch_size": {"content": 10,},}, {"cls": "memory",}, ], } storage = get_storage(**config) assert isinstance(storage, FilteringProxyStorage) assert isinstance(storage.storage, BufferingProxyStorage) assert isinstance(storage.storage.storage, InMemoryStorage) def test_get_storage_pipeline_legacy_args(): config = { "cls": "pipeline", "steps": [ {"cls": "filter",}, {"cls": "buffer", "args": {"min_batch_size": {"content": 10,},}}, {"cls": "memory",}, ], } with pytest.warns(DeprecationWarning): storage = get_storage(**config) assert isinstance(storage, FilteringProxyStorage) assert isinstance(storage.storage, BufferingProxyStorage) assert isinstance(storage.storage.storage, InMemoryStorage) diff --git a/swh/storage/tests/test_postgresql.py b/swh/storage/tests/test_postgresql.py new file mode 100644 index 00000000..9b096dd1 --- /dev/null +++ b/swh/storage/tests/test_postgresql.py @@ -0,0 +1,256 @@ +# Copyright (C) 2015-2020 The Software Heritage developers +# See the AUTHORS file at the top-level directory of this distribution +# License: GNU General Public License version 3, or any later version +# See top-level LICENSE file for more information + +from contextlib import contextmanager +import queue +import threading +from unittest.mock import Mock + +import attr +import pytest + +from swh.storage.tests.storage_tests import TestStorage # noqa +from swh.storage.tests.storage_tests import TestStorageGeneratedData # noqa +from swh.storage.utils import now + + +@contextmanager +def db_transaction(storage): + with storage.db() as db: + with db.transaction() as cur: + yield db, cur + + +@pytest.mark.db +class TestLocalStorage: + """Test the local storage""" + + # This test is only relevant on the local storage, with an actual + # objstorage raising an exception + def test_content_add_objstorage_exception(self, swh_storage, sample_data): + content = sample_data.content + + swh_storage.objstorage.content_add = Mock( + side_effect=Exception("mocked broken objstorage") + ) + + with pytest.raises(Exception, match="mocked broken"): + swh_storage.content_add([content]) + + missing = list(swh_storage.content_missing([content.hashes()])) + assert missing == [content.sha1] + + +@pytest.mark.db +class TestStorageRaceConditions: + @pytest.mark.xfail + def test_content_add_race(self, swh_storage, sample_data): + content = sample_data.content + + results = queue.Queue() + + def thread(): + try: + with db_transaction(swh_storage) as (db, cur): + ret = swh_storage.content_add([content], db=db, cur=cur) + results.put((threading.get_ident(), "data", ret)) + except Exception as e: + results.put((threading.get_ident(), "exc", e)) + + t1 = threading.Thread(target=thread) + t2 = threading.Thread(target=thread) + t1.start() + # this avoids the race condition + # import time + # time.sleep(1) + t2.start() + t1.join() + t2.join() + + r1 = results.get(block=False) + r2 = results.get(block=False) + + with pytest.raises(queue.Empty): + results.get(block=False) + assert r1[0] != r2[0] + assert r1[1] == "data", "Got exception %r in Thread%s" % (r1[2], r1[0]) + assert r2[1] == "data", "Got exception %r in Thread%s" % (r2[2], r2[0]) + + +@pytest.mark.db +class TestPgStorage: + """This class is dedicated for the rare case where the schema needs to + be altered dynamically. + + Otherwise, the tests could be blocking when ran altogether. + + """ + + def test_content_update_with_new_cols(self, swh_storage, sample_data): + content, content2 = sample_data.contents[:2] + + swh_storage.journal_writer.journal = None # TODO, not supported + + with db_transaction(swh_storage) as (_, cur): + cur.execute( + """alter table content + add column test text default null, + add column test2 text default null""" + ) + + swh_storage.content_add([content]) + + cont = content.to_dict() + cont["test"] = "value-1" + cont["test2"] = "value-2" + + swh_storage.content_update([cont], keys=["test", "test2"]) + with db_transaction(swh_storage) as (_, cur): + cur.execute( + """SELECT sha1, sha1_git, sha256, length, status, + test, test2 + FROM content WHERE sha1 = %s""", + (cont["sha1"],), + ) + + datum = cur.fetchone() + + assert datum == ( + cont["sha1"], + cont["sha1_git"], + cont["sha256"], + cont["length"], + "visible", + cont["test"], + cont["test2"], + ) + + with db_transaction(swh_storage) as (_, cur): + cur.execute( + """alter table content drop column test, + drop column test2""" + ) + + def test_content_add_db(self, swh_storage, sample_data): + content = sample_data.content + + actual_result = swh_storage.content_add([content]) + + assert actual_result == { + "content:add": 1, + "content:add:bytes": content.length, + } + + if hasattr(swh_storage, "objstorage"): + assert content.sha1 in swh_storage.objstorage.objstorage + + with db_transaction(swh_storage) as (_, cur): + cur.execute( + "SELECT sha1, sha1_git, sha256, length, status" + " FROM content WHERE sha1 = %s", + (content.sha1,), + ) + datum = cur.fetchone() + + assert datum == ( + content.sha1, + content.sha1_git, + content.sha256, + content.length, + "visible", + ) + + contents = [ + obj + for (obj_type, obj) in swh_storage.journal_writer.journal.objects + if obj_type == "content" + ] + assert len(contents) == 1 + assert contents[0] == attr.evolve(content, data=None) + + def test_content_add_metadata_db(self, swh_storage, sample_data): + content = attr.evolve(sample_data.content, data=None, ctime=now()) + + actual_result = swh_storage.content_add_metadata([content]) + + assert actual_result == { + "content:add": 1, + } + + if hasattr(swh_storage, "objstorage"): + assert content.sha1 not in swh_storage.objstorage.objstorage + with db_transaction(swh_storage) as (_, cur): + cur.execute( + "SELECT sha1, sha1_git, sha256, length, status" + " FROM content WHERE sha1 = %s", + (content.sha1,), + ) + datum = cur.fetchone() + assert datum == ( + content.sha1, + content.sha1_git, + content.sha256, + content.length, + "visible", + ) + + contents = [ + obj + for (obj_type, obj) in swh_storage.journal_writer.journal.objects + if obj_type == "content" + ] + assert len(contents) == 1 + assert contents[0] == content + + def test_skipped_content_add_db(self, swh_storage, sample_data): + content, cont2 = sample_data.skipped_contents[:2] + content2 = attr.evolve(cont2, blake2s256=None) + + actual_result = swh_storage.skipped_content_add([content, content, content2]) + + assert 2 <= actual_result.pop("skipped_content:add") <= 3 + assert actual_result == {} + + with db_transaction(swh_storage) as (_, cur): + cur.execute( + "SELECT sha1, sha1_git, sha256, blake2s256, " + "length, status, reason " + "FROM skipped_content ORDER BY sha1_git" + ) + + dbdata = cur.fetchall() + + assert len(dbdata) == 2 + assert dbdata[0] == ( + content.sha1, + content.sha1_git, + content.sha256, + content.blake2s256, + content.length, + "absent", + "Content too long", + ) + + assert dbdata[1] == ( + content2.sha1, + content2.sha1_git, + content2.sha256, + content2.blake2s256, + content2.length, + "absent", + "Content too long", + ) + + def test_clear_buffers(self, swh_storage): + """Calling clear buffers on real storage does nothing + + """ + assert swh_storage.clear_buffers() is None + + def test_flush(self, swh_storage): + """Calling clear buffers on real storage does nothing + + """ + assert swh_storage.flush() == {} diff --git a/swh/storage/tests/test_converters.py b/swh/storage/tests/test_postgresql_converters.py similarity index 99% rename from swh/storage/tests/test_converters.py rename to swh/storage/tests/test_postgresql_converters.py index f9347e92..2f263934 100644 --- a/swh/storage/tests/test_converters.py +++ b/swh/storage/tests/test_postgresql_converters.py @@ -1,167 +1,167 @@ # Copyright (C) 2015 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from swh.model.model import ( ObjectType, Person, Release, Revision, RevisionType, Timestamp, TimestampWithTimezone, ) -from swh.storage import converters +from swh.storage.postgresql import converters def test_date_to_db(): date_to_db = converters.date_to_db assert date_to_db(None) == {"timestamp": None, "offset": 0, "neg_utc_offset": None} assert date_to_db( TimestampWithTimezone( timestamp=Timestamp(seconds=1234567890, microseconds=0,), offset=120, negative_utc=False, ) ) == { "timestamp": "2009-02-13T23:31:30+00:00", "offset": 120, "neg_utc_offset": False, } assert date_to_db( TimestampWithTimezone( timestamp=Timestamp(seconds=1123456789, microseconds=0,), offset=0, negative_utc=True, ) ) == { "timestamp": "2005-08-07T23:19:49+00:00", "offset": 0, "neg_utc_offset": True, } assert date_to_db( TimestampWithTimezone( timestamp=Timestamp(seconds=1234567890, microseconds=0,), offset=42, negative_utc=False, ) ) == { "timestamp": "2009-02-13T23:31:30+00:00", "offset": 42, "neg_utc_offset": False, } assert date_to_db( TimestampWithTimezone( timestamp=Timestamp(seconds=1634366813, microseconds=0,), offset=-120, negative_utc=False, ) ) == { "timestamp": "2021-10-16T06:46:53+00:00", "offset": -120, "neg_utc_offset": False, } def test_db_to_author(): # when actual_author = converters.db_to_author(b"fullname", b"name", b"email") # then assert actual_author == Person(fullname=b"fullname", name=b"name", email=b"email",) def test_db_to_author_none(): # when actual_author = converters.db_to_author(None, None, None) # then assert actual_author is None def test_db_to_revision(): # when actual_revision = converters.db_to_revision( { "id": b"revision-id", "date": None, "date_offset": None, "date_neg_utc_offset": None, "committer_date": None, "committer_date_offset": None, "committer_date_neg_utc_offset": None, "type": "git", "directory": b"dir-sha1", "message": b"commit message", "author_fullname": b"auth-fullname", "author_name": b"auth-name", "author_email": b"auth-email", "committer_fullname": b"comm-fullname", "committer_name": b"comm-name", "committer_email": b"comm-email", "metadata": {}, "synthetic": False, "extra_headers": (), "parents": [b"123", b"456"], } ) # then assert actual_revision == Revision( id=b"revision-id", author=Person( fullname=b"auth-fullname", name=b"auth-name", email=b"auth-email", ), date=None, committer=Person( fullname=b"comm-fullname", name=b"comm-name", email=b"comm-email", ), committer_date=None, type=RevisionType.GIT, directory=b"dir-sha1", message=b"commit message", metadata={}, synthetic=False, extra_headers=(), parents=(b"123", b"456"), ) def test_db_to_release(): # when actual_release = converters.db_to_release( { "id": b"release-id", "target": b"revision-id", "target_type": "revision", "date": None, "date_offset": None, "date_neg_utc_offset": None, "name": b"release-name", "comment": b"release comment", "synthetic": True, "author_fullname": b"auth-fullname", "author_name": b"auth-name", "author_email": b"auth-email", } ) # then assert actual_release == Release( author=Person( fullname=b"auth-fullname", name=b"auth-name", email=b"auth-email", ), date=None, id=b"release-id", name=b"release-name", message=b"release comment", synthetic=True, target=b"revision-id", target_type=ObjectType.REVISION, ) diff --git a/swh/storage/tests/test_revision_bw_compat.py b/swh/storage/tests/test_revision_bw_compat.py index 83216a34..8da40547 100644 --- a/swh/storage/tests/test_revision_bw_compat.py +++ b/swh/storage/tests/test_revision_bw_compat.py @@ -1,47 +1,47 @@ # Copyright (C) 2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import attr from swh.core.utils import decode_with_escape from swh.model.model import Revision from swh.storage import get_storage -from swh.storage.tests.test_storage import db_transaction +from swh.storage.tests.test_postgresql import db_transaction def headers_to_db(git_headers): return [[key, decode_with_escape(value)] for key, value in git_headers] def test_revision_extra_header_in_metadata(swh_storage_backend_config, sample_data): storage = get_storage(**swh_storage_backend_config) rev = sample_data.revision md_w_extra = dict( rev.metadata.items(), extra_headers=headers_to_db( [ ["gpgsig", b"test123"], ["mergetag", b"foo\\bar"], ["mergetag", b"\x22\xaf\x89\x80\x01\x00"], ] ), ) bw_rev = attr.evolve(rev, extra_headers=()) object.__setattr__(bw_rev, "metadata", md_w_extra) assert bw_rev.extra_headers == () assert storage.revision_add([bw_rev]) == {"revision:add": 1} # check data in the db are old format with db_transaction(storage) as (_, cur): cur.execute("SELECT metadata, extra_headers FROM revision") metadata, extra_headers = cur.fetchone() assert extra_headers == [] assert metadata == bw_rev.metadata # check the Revision build from revision_get is the original, "new style", Revision assert [Revision.from_dict(x) for x in storage.revision_get([rev.id])] == [rev]