diff --git a/sql/upgrades/140.sql b/sql/upgrades/140.sql new file mode 100644 index 00000000..6dc26078 --- /dev/null +++ b/sql/upgrades/140.sql @@ -0,0 +1,11 @@ +-- SWH DB schema upgrade +-- from_version: 139 +-- to_version: 140 +-- description: Add constraint checking that release.author is null implies that release.date is null. + +insert into dbversion(version, release, description) + values(140, now(), 'Work In Progress'); + +alter table release add constraint release_author_date_check check ((date is null) or (author is not null)) not valid; +alter table release validate constraint release_author_date_check; + diff --git a/swh/storage/in_memory.py b/swh/storage/in_memory.py index 0a9d241b..e52faba5 100644 --- a/swh/storage/in_memory.py +++ b/swh/storage/in_memory.py @@ -1,1720 +1,1724 @@ # Copyright (C) 2015-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import os import re import bisect import dateutil import collections from collections import defaultdict import copy import datetime import itertools import random import warnings import attr -from swh.model.model import Content, Directory +from swh.model.model import Content, Directory, Revision, Release from swh.model.hashutil import DEFAULT_ALGORITHMS -from swh.model.identifiers import normalize_timestamp from swh.objstorage import get_objstorage from swh.objstorage.exc import ObjNotFoundError from .journal_writer import get_journal_writer # Max block size of contents to return BULK_BLOCK_CONTENT_LEN_MAX = 10000 def now(): return datetime.datetime.now(tz=datetime.timezone.utc) ENABLE_ORIGIN_IDS = \ os.environ.get('SWH_STORAGE_IN_MEMORY_ENABLE_ORIGIN_IDS', 'true') == 'true' class Storage: def __init__(self, journal_writer=None): self._contents = {} self._content_indexes = defaultdict(lambda: defaultdict(set)) self._skipped_contents = {} self._skipped_content_indexes = defaultdict(lambda: defaultdict(set)) self.reset() if journal_writer: self.journal_writer = get_journal_writer(**journal_writer) else: self.journal_writer = None def reset(self): self._directories = {} self._revisions = {} self._releases = {} self._snapshots = {} self._origins = {} self._origins_by_id = [] self._origin_visits = {} self._persons = [] self._origin_metadata = defaultdict(list) self._tools = {} self._metadata_providers = {} self._objects = defaultdict(list) # ideally we would want a skip list for both fast inserts and searches self._sorted_sha1s = [] self.objstorage = get_objstorage('memory', {}) def check_config(self, *, check_write): """Check that the storage is configured and ready to go.""" return True def _content_add(self, contents, with_data): if self.journal_writer: for content in contents: content = attr.evolve(content, data=None) self.journal_writer.write_addition('content', content) content_with_data = [] content_without_data = [] for content in contents: if content.status is None: content.status = 'visible' if content.length is None: content.length = -1 if content.status == 'visible': content_with_data.append(content) elif content.status == 'absent': content_without_data.append(content) count_content_added, count_content_bytes_added = \ self._content_add_present(content_with_data, with_data) count_skipped_content_added = self._content_add_absent( content_without_data ) summary = { 'content:add': count_content_added, 'skipped_content:add': count_skipped_content_added, } if with_data: summary['content:add:bytes'] = count_content_bytes_added return summary def _content_add_present(self, contents, with_data): count_content_added = 0 count_content_bytes_added = 0 for content in contents: key = self._content_key(content) if key in self._contents: continue for algorithm in DEFAULT_ALGORITHMS: hash_ = content.get_hash(algorithm) if hash_ in self._content_indexes[algorithm]\ and (algorithm not in {'blake2s256', 'sha256'}): from . import HashCollision raise HashCollision(algorithm, hash_, key) for algorithm in DEFAULT_ALGORITHMS: hash_ = content.get_hash(algorithm) self._content_indexes[algorithm][hash_].add(key) self._objects[content.sha1_git].append( ('content', content.sha1)) self._contents[key] = content bisect.insort(self._sorted_sha1s, content.sha1) count_content_added += 1 if with_data: content_data = self._contents[key].data self._contents[key].data = None count_content_bytes_added += len(content_data) self.objstorage.add(content_data, content.sha1) return (count_content_added, count_content_bytes_added) def _content_add_absent(self, contents): count = 0 skipped_content_missing = self.skipped_content_missing(contents) for content in skipped_content_missing: key = self._content_key(content) for algo in DEFAULT_ALGORITHMS: self._skipped_content_indexes[algo][content.get_hash(algo)] \ .add(key) self._skipped_contents[key] = content count += 1 return count def content_add(self, content): """Add content blobs to the storage Args: content (iterable): iterable of dictionaries representing individual pieces of content to add. Each dictionary has the following keys: - data (bytes): the actual content - length (int): content length (default: -1) - one key for each checksum algorithm in :data:`swh.model.hashutil.DEFAULT_ALGORITHMS`, mapped to the corresponding checksum - status (str): one of visible, hidden, absent - reason (str): if status = absent, the reason why - origin (int): if status = absent, the origin we saw the content in Raises: HashCollision in case of collision Returns: Summary dict with the following key and associated values: content:add: New contents added content_bytes:add: Sum of the contents' length data skipped_content:add: New skipped contents (no data) added """ content = [Content.from_dict(c) for c in content] now = datetime.datetime.now(tz=datetime.timezone.utc) for item in content: item.ctime = now return self._content_add(content, with_data=True) def content_add_metadata(self, content): """Add content metadata to the storage (like `content_add`, but without inserting to the objstorage). Args: content (iterable): iterable of dictionaries representing individual pieces of content to add. Each dictionary has the following keys: - length (int): content length (default: -1) - one key for each checksum algorithm in :data:`swh.model.hashutil.DEFAULT_ALGORITHMS`, mapped to the corresponding checksum - status (str): one of visible, hidden, absent - reason (str): if status = absent, the reason why - origin (int): if status = absent, the origin we saw the content in - ctime (datetime): time of insertion in the archive Raises: HashCollision in case of collision Returns: Summary dict with the following key and associated values: content:add: New contents added skipped_content:add: New skipped contents (no data) added """ content = [Content.from_dict(c) for c in content] return self._content_add(content, with_data=False) def content_get(self, content): """Retrieve in bulk contents and their data. This function may yield more blobs than provided sha1 identifiers, in case they collide. Args: content: iterables of sha1 Yields: Dict[str, bytes]: Generates streams of contents as dict with their raw data: - sha1 (bytes): content id - data (bytes): content's raw data Raises: ValueError in case of too much contents are required. cf. BULK_BLOCK_CONTENT_LEN_MAX """ # FIXME: Make this method support slicing the `data`. if len(content) > BULK_BLOCK_CONTENT_LEN_MAX: raise ValueError( "Sending at most %s contents." % BULK_BLOCK_CONTENT_LEN_MAX) for obj_id in content: try: data = self.objstorage.get(obj_id) except ObjNotFoundError: yield None continue yield {'sha1': obj_id, 'data': data} def content_get_range(self, start, end, limit=1000, db=None, cur=None): """Retrieve contents within range [start, end] bound by limit. Note that this function may return more than one blob per hash. The limit is enforced with multiplicity (ie. two blobs with the same hash will count twice toward the limit). Args: **start** (bytes): Starting identifier range (expected smaller than end) **end** (bytes): Ending identifier range (expected larger than start) **limit** (int): Limit result (default to 1000) Returns: a dict with keys: - contents [dict]: iterable of contents in between the range. - next (bytes): There remains content in the range starting from this next sha1 """ if limit is None: raise ValueError('Development error: limit should not be None') from_index = bisect.bisect_left(self._sorted_sha1s, start) sha1s = itertools.islice(self._sorted_sha1s, from_index, None) sha1s = ((sha1, content_key) for sha1 in sha1s for content_key in self._content_indexes['sha1'][sha1]) matched = [] next_content = None for sha1, key in sha1s: if sha1 > end: break if len(matched) >= limit: next_content = sha1 break matched.append(self._contents[key].to_dict()) return { 'contents': matched, 'next': next_content, } def content_get_metadata(self, content): """Retrieve content metadata in bulk Args: content: iterable of content identifiers (sha1) Returns: an iterable with content metadata corresponding to the given ids """ # FIXME: the return value should be a mapping from search key to found # content*s* for sha1 in content: if sha1 in self._content_indexes['sha1']: objs = self._content_indexes['sha1'][sha1] # FIXME: rather than selecting one of the objects with that # hash, we should return all of them. See: # https://forge.softwareheritage.org/D645?id=1994#inline-3389 key = random.sample(objs, 1)[0] d = self._contents[key].to_dict() del d['ctime'] yield d else: # FIXME: should really be None yield { 'sha1': sha1, 'sha1_git': None, 'sha256': None, 'blake2s256': None, 'length': None, 'status': None, } def content_find(self, content): if not set(content).intersection(DEFAULT_ALGORITHMS): raise ValueError('content keys must contain at least one of: ' '%s' % ', '.join(sorted(DEFAULT_ALGORITHMS))) found = [] for algo in DEFAULT_ALGORITHMS: hash = content.get(algo) if hash and hash in self._content_indexes[algo]: found.append(self._content_indexes[algo][hash]) if not found: return [] keys = list(set.intersection(*found)) return [self._contents[key].to_dict() for key in keys] def content_missing(self, content, key_hash='sha1'): """List content missing from storage Args: contents ([dict]): iterable of dictionaries whose keys are either 'length' or an item of :data:`swh.model.hashutil.ALGORITHMS`; mapped to the corresponding checksum (or length). key_hash (str): name of the column to use as hash id result (default: 'sha1') Returns: iterable ([bytes]): missing content ids (as per the key_hash column) """ for cont in content: for (algo, hash_) in cont.items(): if algo not in DEFAULT_ALGORITHMS: continue if hash_ not in self._content_indexes.get(algo, []): yield cont[key_hash] break else: for result in self.content_find(cont): if result['status'] == 'missing': yield cont[key_hash] def content_missing_per_sha1(self, contents): """List content missing from storage based only on sha1. Args: contents: Iterable of sha1 to check for absence. Returns: iterable: missing ids Raises: TODO: an exception when we get a hash collision. """ for content in contents: if content not in self._content_indexes['sha1']: yield content def skipped_content_missing(self, contents): """List all skipped_content missing from storage Args: contents: Iterable of sha1 to check for skipped content entry Returns: iterable: dict of skipped content entry """ for content in contents: for (key, algorithm) in self._content_key_algorithm(content): if algorithm == 'blake2s256': continue if key not in self._skipped_content_indexes[algorithm]: # index must contain hashes of algos except blake2s256 # else the content is considered skipped yield content break def directory_add(self, directories): """Add directories to the storage Args: directories (iterable): iterable of dictionaries representing the individual directories to add. Each dict has the following keys: - id (sha1_git): the id of the directory to add - entries (list): list of dicts for each entry in the directory. Each dict has the following keys: - name (bytes) - type (one of 'file', 'dir', 'rev'): type of the directory entry (file, directory, revision) - target (sha1_git): id of the object pointed at by the directory entry - perms (int): entry permissions Returns: Summary dict of keys with associated count as values: directory:add: Number of directories actually added """ if self.journal_writer: self.journal_writer.write_additions('directory', directories) directories = [Directory.from_dict(d) for d in directories] count = 0 for directory in directories: if directory.id not in self._directories: count += 1 self._directories[directory.id] = directory self._objects[directory.id].append( ('directory', directory.id)) return {'directory:add': count} def directory_missing(self, directories): """List directories missing from storage Args: directories (iterable): an iterable of directory ids Yields: missing directory ids """ for id in directories: if id not in self._directories: yield id def _join_dentry_to_content(self, dentry): keys = ( 'status', 'sha1', 'sha1_git', 'sha256', 'length', ) ret = dict.fromkeys(keys) ret.update(dentry) if ret['type'] == 'file': # TODO: Make it able to handle more than one content content = self.content_find({'sha1_git': ret['target']}) if content: content = content[0] for key in keys: ret[key] = content[key] return ret def _directory_ls(self, directory_id, recursive, prefix=b''): if directory_id in self._directories: for entry in self._directories[directory_id].entries: ret = self._join_dentry_to_content(entry.to_dict()) ret['name'] = prefix + ret['name'] ret['dir_id'] = directory_id yield ret if recursive and ret['type'] == 'dir': yield from self._directory_ls( ret['target'], True, prefix + ret['name'] + b'/') def directory_ls(self, directory, recursive=False): """Get entries for one directory. Args: - directory: the directory to list entries from. - recursive: if flag on, this list recursively from this directory. Returns: List of entries for such directory. If `recursive=True`, names in the path of a dir/file not at the root are concatenated with a slash (`/`). """ yield from self._directory_ls(directory, recursive) def directory_entry_get_by_path(self, directory, paths): """Get the directory entry (either file or dir) from directory with path. Args: - directory: sha1 of the top level directory - paths: path to lookup from the top level directory. From left (top) to right (bottom). Returns: The corresponding directory entry if found, None otherwise. """ return self._directory_entry_get_by_path(directory, paths, b'') def _directory_entry_get_by_path(self, directory, paths, prefix): if not paths: return contents = list(self.directory_ls(directory)) if not contents: return def _get_entry(entries, name): for entry in entries: if entry['name'] == name: entry = entry.copy() entry['name'] = prefix + entry['name'] return entry first_item = _get_entry(contents, paths[0]) if len(paths) == 1: return first_item if not first_item or first_item['type'] != 'dir': return return self._directory_entry_get_by_path( first_item['target'], paths[1:], prefix + paths[0] + b'/') def revision_add(self, revisions): """Add revisions to the storage Args: revisions (Iterable[dict]): iterable of dictionaries representing the individual revisions to add. Each dict has the following keys: - **id** (:class:`sha1_git`): id of the revision to add - **date** (:class:`dict`): date the revision was written - **committer_date** (:class:`dict`): date the revision got added to the origin - **type** (one of 'git', 'tar'): type of the revision added - **directory** (:class:`sha1_git`): the directory the revision points at - **message** (:class:`bytes`): the message associated with the revision - **author** (:class:`Dict[str, bytes]`): dictionary with keys: name, fullname, email - **committer** (:class:`Dict[str, bytes]`): dictionary with keys: name, fullname, email - **metadata** (:class:`jsonb`): extra information as dictionary - **synthetic** (:class:`bool`): revision's nature (tarball, directory creates synthetic revision`) - **parents** (:class:`list[sha1_git]`): the parents of this revision date dictionaries have the form defined in :mod:`swh.model`. Returns: Summary dict of keys with associated count as values revision_added: New objects actually stored in db """ if self.journal_writer: self.journal_writer.write_additions('revision', revisions) + revisions = [Revision.from_dict(rev) for rev in revisions] + count = 0 for revision in revisions: - if revision['id'] not in self._revisions: - self._revisions[revision['id']] = rev = copy.deepcopy(revision) - self._person_add(rev['committer']) - self._person_add(rev['author']) - rev['date'] = normalize_timestamp(rev.get('date')) - rev['committer_date'] = normalize_timestamp( - rev.get('committer_date')) - self._objects[revision['id']].append( - ('revision', revision['id'])) + if revision.id not in self._revisions: + revision.committer = self._person_add(revision.committer) + revision.author = self._person_add(revision.author) + self._revisions[revision.id] = revision + self._objects[revision.id].append( + ('revision', revision.id)) count += 1 return {'revision:add': count} def revision_missing(self, revisions): """List revisions missing from storage Args: revisions (iterable): revision ids Yields: missing revision ids """ for id in revisions: if id not in self._revisions: yield id def revision_get(self, revisions): for id in revisions: - yield copy.deepcopy(self._revisions.get(id)) + if id in self._revisions: + yield self._revisions.get(id).to_dict() + else: + yield None def _get_parent_revs(self, rev_id, seen, limit): if limit and len(seen) >= limit: return if rev_id in seen or rev_id not in self._revisions: return seen.add(rev_id) - yield self._revisions[rev_id] - for parent in self._revisions[rev_id]['parents']: + yield self._revisions[rev_id].to_dict() + for parent in self._revisions[rev_id].parents: yield from self._get_parent_revs(parent, seen, limit) def revision_log(self, revisions, limit=None): """Fetch revision entry from the given root revisions. Args: revisions: array of root revision to lookup limit: limitation on the output result. Default to None. Yields: List of revision log from such revisions root. """ seen = set() for rev_id in revisions: yield from self._get_parent_revs(rev_id, seen, limit) def revision_shortlog(self, revisions, limit=None): """Fetch the shortlog for the given revisions Args: revisions: list of root revisions to lookup limit: depth limitation for the output Yields: a list of (id, parents) tuples. """ yield from ((rev['id'], rev['parents']) for rev in self.revision_log(revisions, limit)) def release_add(self, releases): """Add releases to the storage Args: releases (Iterable[dict]): iterable of dictionaries representing the individual releases to add. Each dict has the following keys: - **id** (:class:`sha1_git`): id of the release to add - **revision** (:class:`sha1_git`): id of the revision the release points to - **date** (:class:`dict`): the date the release was made - **name** (:class:`bytes`): the name of the release - **comment** (:class:`bytes`): the comment associated with the release - **author** (:class:`Dict[str, bytes]`): dictionary with keys: name, fullname, email the date dictionary has the form defined in :mod:`swh.model`. Returns: Summary dict of keys with associated count as values release:add: New objects contents actually stored in db """ if self.journal_writer: self.journal_writer.write_additions('release', releases) + releases = [Release.from_dict(rel) for rel in releases] + count = 0 for rel in releases: - if rel['id'] not in self._releases: - rel = copy.deepcopy(rel) - rel['date'] = normalize_timestamp(rel['date']) - if rel['author']: - self._person_add(rel['author']) - self._objects[rel['id']].append( - ('release', rel['id'])) - self._releases[rel['id']] = rel + if rel.id not in self._releases: + if rel.author: + self._person_add(rel.author) + self._objects[rel.id].append( + ('release', rel.id)) + self._releases[rel.id] = rel count += 1 return {'release:add': count} def release_missing(self, releases): """List releases missing from storage Args: releases: an iterable of release ids Returns: a list of missing release ids """ yield from (rel for rel in releases if rel not in self._releases) def release_get(self, releases): """Given a list of sha1, return the releases's information Args: releases: list of sha1s Yields: dicts with the same keys as those given to `release_add` (or ``None`` if a release does not exist) """ for rel_id in releases: - yield copy.deepcopy(self._releases.get(rel_id)) + if rel_id in self._releases: + yield self._releases[rel_id].to_dict() + else: + yield None def snapshot_add(self, snapshots): """Add a snapshot to the storage Args: snapshot ([dict]): the snapshots to add, containing the following keys: - **id** (:class:`bytes`): id of the snapshot - **branches** (:class:`dict`): branches the snapshot contains, mapping the branch name (:class:`bytes`) to the branch target, itself a :class:`dict` (or ``None`` if the branch points to an unknown object) - **target_type** (:class:`str`): one of ``content``, ``directory``, ``revision``, ``release``, ``snapshot``, ``alias`` - **target** (:class:`bytes`): identifier of the target (currently a ``sha1_git`` for all object kinds, or the name of the target branch for aliases) Raises: ValueError: if the origin's or visit's identifier does not exist. Returns: Summary dict of keys with associated count as values snapshot_added: Count of object actually stored in db """ count = 0 for snapshot in snapshots: snapshot_id = snapshot['id'] if snapshot_id not in self._snapshots: if self.journal_writer: self.journal_writer.write_addition('snapshot', snapshot) self._snapshots[snapshot_id] = { 'id': snapshot_id, 'branches': copy.deepcopy(snapshot['branches']), '_sorted_branch_names': sorted(snapshot['branches']) } self._objects[snapshot_id].append(('snapshot', snapshot_id)) count += 1 return {'snapshot:add': count} def snapshot_get(self, snapshot_id): """Get the content, possibly partial, of a snapshot with the given id The branches of the snapshot are iterated in the lexicographical order of their names. .. warning:: At most 1000 branches contained in the snapshot will be returned for performance reasons. In order to browse the whole set of branches, the method :meth:`snapshot_get_branches` should be used instead. Args: snapshot_id (bytes): identifier of the snapshot Returns: dict: a dict with three keys: * **id**: identifier of the snapshot * **branches**: a dict of branches contained in the snapshot whose keys are the branches' names. * **next_branch**: the name of the first branch not returned or :const:`None` if the snapshot has less than 1000 branches. """ return self.snapshot_get_branches(snapshot_id) def snapshot_get_by_origin_visit(self, origin, visit): """Get the content, possibly partial, of a snapshot for the given origin visit The branches of the snapshot are iterated in the lexicographical order of their names. .. warning:: At most 1000 branches contained in the snapshot will be returned for performance reasons. In order to browse the whole set of branches, the method :meth:`snapshot_get_branches` should be used instead. Args: origin (int): the origin's identifier visit (int): the visit's identifier Returns: dict: None if the snapshot does not exist; a dict with three keys otherwise: * **id**: identifier of the snapshot * **branches**: a dict of branches contained in the snapshot whose keys are the branches' names. * **next_branch**: the name of the first branch not returned or :const:`None` if the snapshot has less than 1000 branches. """ origin_url = self._get_origin_url(origin) if not origin_url: return if origin_url not in self._origins or \ visit > len(self._origin_visits[origin_url]): return None snapshot_id = self._origin_visits[origin_url][visit-1]['snapshot'] if snapshot_id: return self.snapshot_get(snapshot_id) else: return None def snapshot_get_latest(self, origin, allowed_statuses=None): """Get the content, possibly partial, of the latest snapshot for the given origin, optionally only from visits that have one of the given allowed_statuses The branches of the snapshot are iterated in the lexicographical order of their names. .. warning:: At most 1000 branches contained in the snapshot will be returned for performance reasons. In order to browse the whole set of branches, the methods :meth:`origin_visit_get_latest` and :meth:`snapshot_get_branches` should be used instead. Args: origin (Union[str,int]): the origin's URL or identifier allowed_statuses (list of str): list of visit statuses considered to find the latest snapshot for the origin. For instance, ``allowed_statuses=['full']`` will only consider visits that have successfully run to completion. Returns: dict: a dict with three keys: * **id**: identifier of the snapshot * **branches**: a dict of branches contained in the snapshot whose keys are the branches' names. * **next_branch**: the name of the first branch not returned or :const:`None` if the snapshot has less than 1000 branches. """ origin_url = self._get_origin_url(origin) if not origin_url: return visit = self.origin_visit_get_latest( origin_url, allowed_statuses=allowed_statuses, require_snapshot=True) if visit and visit['snapshot']: snapshot = self.snapshot_get(visit['snapshot']) if not snapshot: raise ValueError( 'last origin visit references an unknown snapshot') return snapshot def snapshot_count_branches(self, snapshot_id, db=None, cur=None): """Count the number of branches in the snapshot with the given id Args: snapshot_id (bytes): identifier of the snapshot Returns: dict: A dict whose keys are the target types of branches and values their corresponding amount """ branches = list(self._snapshots[snapshot_id]['branches'].values()) return collections.Counter(branch['target_type'] if branch else None for branch in branches) def snapshot_get_branches(self, snapshot_id, branches_from=b'', branches_count=1000, target_types=None): """Get the content, possibly partial, of a snapshot with the given id The branches of the snapshot are iterated in the lexicographical order of their names. Args: snapshot_id (bytes): identifier of the snapshot branches_from (bytes): optional parameter used to skip branches whose name is lesser than it before returning them branches_count (int): optional parameter used to restrain the amount of returned branches target_types (list): optional parameter used to filter the target types of branch to return (possible values that can be contained in that list are `'content', 'directory', 'revision', 'release', 'snapshot', 'alias'`) Returns: dict: None if the snapshot does not exist; a dict with three keys otherwise: * **id**: identifier of the snapshot * **branches**: a dict of branches contained in the snapshot whose keys are the branches' names. * **next_branch**: the name of the first branch not returned or :const:`None` if the snapshot has less than `branches_count` branches after `branches_from` included. """ snapshot = self._snapshots.get(snapshot_id) if snapshot is None: return None sorted_branch_names = snapshot['_sorted_branch_names'] from_index = bisect.bisect_left( sorted_branch_names, branches_from) if target_types: next_branch = None branches = {} for branch_name in sorted_branch_names[from_index:]: branch = snapshot['branches'][branch_name] if branch and branch['target_type'] in target_types: if len(branches) < branches_count: branches[branch_name] = branch else: next_branch = branch_name break else: # As there is no 'target_types', we can do that much faster to_index = from_index + branches_count returned_branch_names = sorted_branch_names[from_index:to_index] branches = {branch_name: snapshot['branches'][branch_name] for branch_name in returned_branch_names} if to_index >= len(sorted_branch_names): next_branch = None else: next_branch = sorted_branch_names[to_index] return { 'id': snapshot_id, 'branches': branches, 'next_branch': next_branch, } def object_find_by_sha1_git(self, ids, db=None, cur=None): """Return the objects found with the given ids. Args: ids: a generator of sha1_gits Returns: dict: a mapping from id to the list of objects found. Each object found is itself a dict with keys: - sha1_git: the input id - type: the type of object found - id: the id of the object found - object_id: the numeric id of the object found. """ ret = {} for id_ in ids: objs = self._objects.get(id_, []) ret[id_] = [{ 'sha1_git': id_, 'type': obj[0], 'id': obj[1], 'object_id': id_, } for obj in objs] return ret def origin_get(self, origins): """Return origins, either all identified by their ids or all identified by tuples (type, url). If the url is given and the type is omitted, one of the origins with that url is returned. Args: origin: a list of dictionaries representing the individual origins to find. These dicts have either the key url (and optionally type): - type (FIXME: enum TBD): the origin type ('git', 'wget', ...) - url (bytes): the url the origin points to or the id: - id (int): the origin's identifier Returns: dict: the origin dictionary with the keys: - id: origin's id - type: origin's type - url: origin's url Raises: ValueError: if the keys does not match (url and type) nor id. """ if isinstance(origins, dict): # Old API return_single = True origins = [origins] else: return_single = False # Sanity check to be error-compatible with the pgsql backend if any('id' in origin for origin in origins) \ and not all('id' in origin for origin in origins): raise ValueError( 'Either all origins or none at all should have an "id".') if any('url' in origin for origin in origins) \ and not all('url' in origin for origin in origins): raise ValueError( 'Either all origins or none at all should have ' 'an "url" key.') results = [] for origin in origins: result = None if 'id' in origin: assert ENABLE_ORIGIN_IDS, 'origin ids are disabled' if origin['id'] <= len(self._origins_by_id): result = self._origins[self._origins_by_id[origin['id']-1]] elif 'url' in origin: if origin['url'] in self._origins: result = copy.deepcopy(self._origins[origin['url']]) else: raise ValueError( 'Origin must have either id or url.') results.append(result) if return_single: assert len(results) == 1 return results[0] else: return results def origin_get_range(self, origin_from=1, origin_count=100): """Retrieve ``origin_count`` origins whose ids are greater or equal than ``origin_from``. Origins are sorted by id before retrieving them. Args: origin_from (int): the minimum id of origins to retrieve origin_count (int): the maximum number of origins to retrieve Yields: dicts containing origin information as returned by :meth:`swh.storage.in_memory.Storage.origin_get`. """ origin_from = max(origin_from, 1) if origin_from <= len(self._origins_by_id): max_idx = origin_from + origin_count - 1 if max_idx > len(self._origins_by_id): max_idx = len(self._origins_by_id) for idx in range(origin_from-1, max_idx): yield copy.deepcopy(self._origins[self._origins_by_id[idx]]) def origin_search(self, url_pattern, offset=0, limit=50, regexp=False, with_visit=False, db=None, cur=None): """Search for origins whose urls contain a provided string pattern or match a provided regular expression. The search is performed in a case insensitive way. Args: url_pattern (str): the string pattern to search for in origin urls offset (int): number of found origins to skip before returning results limit (int): the maximum number of found origins to return regexp (bool): if True, consider the provided pattern as a regular expression and return origins whose urls match it with_visit (bool): if True, filter out origins with no visit Returns: An iterable of dict containing origin information as returned by :meth:`swh.storage.storage.Storage.origin_get`. """ origins = self._origins.values() if regexp: pat = re.compile(url_pattern) origins = [orig for orig in origins if pat.search(orig['url'])] else: origins = [orig for orig in origins if url_pattern in orig['url']] if with_visit: origins = [orig for orig in origins if len(self._origin_visits[orig['url']]) > 0] if ENABLE_ORIGIN_IDS: origins.sort(key=lambda origin: origin['id']) origins = copy.deepcopy(origins[offset:offset+limit]) return origins def origin_count(self, url_pattern, regexp=False, with_visit=False, db=None, cur=None): """Count origins whose urls contain a provided string pattern or match a provided regular expression. The pattern search in origin urls is performed in a case insensitive way. Args: url_pattern (str): the string pattern to search for in origin urls regexp (bool): if True, consider the provided pattern as a regular expression and return origins whose urls match it with_visit (bool): if True, filter out origins with no visit Returns: int: The number of origins matching the search criterion. """ return len(self.origin_search(url_pattern, regexp=regexp, with_visit=with_visit, limit=len(self._origins))) def origin_add(self, origins): """Add origins to the storage Args: origins: list of dictionaries representing the individual origins, with the following keys: - type: the origin type ('git', 'svn', 'deb', ...) - url (bytes): the url the origin points to Returns: list: given origins as dict updated with their id """ origins = copy.deepcopy(origins) for origin in origins: if ENABLE_ORIGIN_IDS: origin['id'] = self.origin_add_one(origin) else: self.origin_add_one(origin) return origins def origin_add_one(self, origin): """Add origin to the storage Args: origin: dictionary representing the individual origin to add. This dict has the following keys: - type (FIXME: enum TBD): the origin type ('git', 'wget', ...) - url (bytes): the url the origin points to Returns: the id of the added origin, or of the identical one that already exists. """ origin = copy.deepcopy(origin) assert 'id' not in origin if origin['url'] in self._origins: if ENABLE_ORIGIN_IDS: origin_id = self._origins[origin['url']]['id'] else: if self.journal_writer: self.journal_writer.write_addition('origin', origin) if ENABLE_ORIGIN_IDS: # origin ids are in the range [1, +inf[ origin_id = len(self._origins) + 1 origin['id'] = origin_id self._origins_by_id.append(origin['url']) assert len(self._origins_by_id) == origin_id self._origins[origin['url']] = origin self._origin_visits[origin['url']] = [] self._objects[origin['url']].append(('origin', origin['url'])) if ENABLE_ORIGIN_IDS: return origin_id else: return origin['url'] def fetch_history_start(self, origin_id): """Add an entry for origin origin_id in fetch_history. Returns the id of the added fetch_history entry """ assert not ENABLE_ORIGIN_IDS, 'origin ids are disabled' pass def fetch_history_end(self, fetch_history_id, data): """Close the fetch_history entry with id `fetch_history_id`, replacing its data with `data`. """ pass def fetch_history_get(self, fetch_history_id): """Get the fetch_history entry with id `fetch_history_id`. """ raise NotImplementedError('fetch_history_get is deprecated, use ' 'origin_visit_get instead.') def origin_visit_add(self, origin, date=None, type=None, *, ts=None): """Add an origin_visit for the origin at date with status 'ongoing'. For backward compatibility, `type` is optional and defaults to the origin's type. Args: origin (Union[int,str]): visited origin's identifier or URL date: timestamp of such visit type (str): the type of loader used for the visit (hg, git, ...) Returns: dict: dictionary with keys origin and visit where: - origin: origin's identifier - visit: the visit's identifier for the new visit occurrence """ if ts is None: if date is None: raise TypeError('origin_visit_add expected 2 arguments.') else: assert date is None warnings.warn("argument 'ts' of origin_visit_add was renamed " "to 'date' in v0.0.109.", DeprecationWarning) date = ts origin_url = self._get_origin_url(origin) if origin_url is None: raise ValueError('Unknown origin.') if isinstance(date, str): date = dateutil.parser.parse(date) visit_ret = None if origin_url in self._origins: origin = self._origins[origin_url] # visit ids are in the range [1, +inf[ visit_id = len(self._origin_visits[origin_url]) + 1 status = 'ongoing' visit = { 'origin': {'url': origin['url']}, 'date': date, 'type': type or origin['type'], 'status': status, 'snapshot': None, 'metadata': None, 'visit': visit_id } self._origin_visits[origin_url].append(visit) visit_ret = { 'origin': origin['id'] if ENABLE_ORIGIN_IDS else origin['url'], 'visit': visit_id, } self._objects[(origin_url, visit_id)].append( ('origin_visit', None)) if self.journal_writer: origin = self._origins[origin_url].copy() if 'id' in origin: del origin['id'] self.journal_writer.write_addition('origin_visit', { **visit, 'origin': origin}) return visit_ret def origin_visit_update(self, origin, visit_id, status=None, metadata=None, snapshot=None): """Update an origin_visit's status. Args: origin (Union[int,str]): visited origin's identifier or URL visit_id (int): visit's identifier status: visit's new status metadata: data associated to the visit snapshot (sha1_git): identifier of the snapshot to add to the visit Returns: None """ origin_url = self._get_origin_url(origin) if origin_url is None: raise ValueError('Unknown origin.') try: visit = self._origin_visits[origin_url][visit_id-1] except IndexError: raise ValueError('Unknown visit_id for this origin') \ from None if self.journal_writer: origin = self._origins[origin_url].copy() if 'id' in origin: del origin['id'] self.journal_writer.write_update('origin_visit', { 'origin': origin, 'type': origin['type'], 'visit': visit_id, 'status': status or visit['status'], 'date': visit['date'], 'metadata': metadata or visit['metadata'], 'snapshot': snapshot or visit['snapshot']}) if origin_url not in self._origin_visits or \ visit_id > len(self._origin_visits[origin_url]): return if status: visit['status'] = status if metadata: visit['metadata'] = metadata if snapshot: visit['snapshot'] = snapshot def origin_visit_upsert(self, visits): """Add a origin_visits with a specific id and with all its data. If there is already an origin_visit with the same `(origin_url, visit_id)`, updates it instead of inserting a new one. Args: visits: iterable of dicts with keys: origin: dict with keys either `id` or `url` visit: origin visit id type: type of loader used for the visit date: timestamp of such visit status: Visit's new status metadata: Data associated to the visit snapshot (sha1_git): identifier of the snapshot to add to the visit """ visits = copy.deepcopy(visits) for visit in visits: if isinstance(visit['date'], str): visit['date'] = dateutil.parser.parse(visit['date']) if self.journal_writer: for visit in visits: visit = visit.copy() visit['origin'] = self._origins[visit['origin']['url']].copy() if 'id' in visit['origin']: del visit['origin']['id'] self.journal_writer.write_addition('origin_visit', visit) for visit in visits: visit_id = visit['visit'] origin_url = visit['origin']['url'] self._objects[(origin_url, visit_id)].append( ('origin_visit', None)) while len(self._origin_visits[origin_url]) <= visit_id: self._origin_visits[origin_url].append(None) visit = visit.copy() visit['origin'] = {'url': visit['origin']['url']} visit = self._origin_visits[origin_url][visit_id-1] = visit def _convert_visit(self, visit): if visit is None: return visit = visit.copy() origin = self._origins[visit['origin']['url']] if ENABLE_ORIGIN_IDS: visit['origin'] = origin['id'] else: visit['origin'] = origin['url'] return visit def origin_visit_get(self, origin, last_visit=None, limit=None): """Retrieve all the origin's visit's information. Args: origin (int): the origin's identifier last_visit (int): visit's id from which listing the next ones, default to None limit (int): maximum number of results to return, default to None Yields: List of visits. """ origin_url = self._get_origin_url(origin) if origin_url in self._origin_visits: visits = self._origin_visits[origin_url] if last_visit is not None: visits = visits[last_visit:] if limit is not None: visits = visits[:limit] for visit in visits: if not visit: continue visit_id = visit['visit'] yield self._convert_visit( self._origin_visits[origin_url][visit_id-1]) def origin_visit_find_by_date(self, origin, visit_date): """Retrieves the origin visit whose date is closest to the provided timestamp. In case of a tie, the visit with largest id is selected. Args: origin (str): The occurrence's origin (URL). target (datetime): target timestamp Returns: A visit. """ origin_url = self._get_origin_url(origin) if origin_url in self._origin_visits: visits = self._origin_visits[origin_url] visit = min( visits, key=lambda v: (abs(v['date'] - visit_date), -v['visit'])) return self._convert_visit(visit) def origin_visit_get_by(self, origin, visit): """Retrieve origin visit's information. Args: origin (int): the origin's identifier Returns: The information on that particular (origin, visit) or None if it does not exist """ origin_url = self._get_origin_url(origin) if origin_url in self._origin_visits and \ visit <= len(self._origin_visits[origin_url]): return self._convert_visit( self._origin_visits[origin_url][visit-1]) def origin_visit_get_latest( self, origin, allowed_statuses=None, require_snapshot=False): """Get the latest origin visit for the given origin, optionally looking only for those with one of the given allowed_statuses or for those with a known snapshot. Args: origin (str): the origin's URL allowed_statuses (list of str): list of visit statuses considered to find the latest visit. For instance, ``allowed_statuses=['full']`` will only consider visits that have successfully run to completion. require_snapshot (bool): If True, only a visit with a snapshot will be returned. Returns: dict: a dict with the following keys: origin: the URL of the origin visit: origin visit id type: type of loader used for the visit date: timestamp of such visit status: Visit's new status metadata: Data associated to the visit snapshot (Optional[sha1_git]): identifier of the snapshot associated to the visit """ origin = self._origins.get(origin) if not origin: return visits = self._origin_visits[origin['url']] if allowed_statuses is not None: visits = [visit for visit in visits if visit['status'] in allowed_statuses] if require_snapshot: visits = [visit for visit in visits if visit['snapshot']] visit = max( visits, key=lambda v: (v['date'], v['visit']), default=None) return self._convert_visit(visit) def stat_counters(self): """compute statistics about the number of tuples in various tables Returns: dict: a dictionary mapping textual labels (e.g., content) to integer values (e.g., the number of tuples in table content) """ keys = ( 'content', 'directory', 'origin', 'origin_visit', 'person', 'release', 'revision', 'skipped_content', 'snapshot' ) stats = {key: 0 for key in keys} stats.update(collections.Counter( obj_type for (obj_type, obj_id) in itertools.chain(*self._objects.values()))) return stats def refresh_stat_counters(self): """Recomputes the statistics for `stat_counters`.""" pass def origin_metadata_add(self, origin_id, ts, provider, tool, metadata, db=None, cur=None): """ Add an origin_metadata for the origin at ts with provenance and metadata. Args: origin_id (int): the origin's id for which the metadata is added ts (datetime): timestamp of the found metadata provider: id of the provider of metadata (ex:'hal') tool: id of the tool used to extract metadata metadata (jsonb): the metadata retrieved at the time and location """ if isinstance(ts, str): ts = dateutil.parser.parse(ts) origin_metadata = { 'origin_id': origin_id, 'discovery_date': ts, 'tool_id': tool, 'metadata': metadata, 'provider_id': provider, } self._origin_metadata[origin_id].append(origin_metadata) return None def origin_metadata_get_by(self, origin_id, provider_type=None, db=None, cur=None): """Retrieve list of all origin_metadata entries for the origin_id Args: origin_id (int): the unique origin's identifier provider_type (str): (optional) type of provider Returns: list of dicts: the origin_metadata dictionary with the keys: - origin_id (int): origin's identifier - discovery_date (datetime): timestamp of discovery - tool_id (int): metadata's extracting tool - metadata (jsonb) - provider_id (int): metadata's provider - provider_name (str) - provider_type (str) - provider_url (str) """ metadata = [] for item in self._origin_metadata[origin_id]: item = copy.deepcopy(item) provider = self.metadata_provider_get(item['provider_id']) for attr_name in ('name', 'type', 'url'): item['provider_' + attr_name] = \ provider['provider_' + attr_name] metadata.append(item) return metadata def tool_add(self, tools): """Add new tools to the storage. Args: tools (iterable of :class:`dict`): Tool information to add to storage. Each tool is a :class:`dict` with the following keys: - name (:class:`str`): name of the tool - version (:class:`str`): version of the tool - configuration (:class:`dict`): configuration of the tool, must be json-encodable Returns: :class:`dict`: All the tools inserted in storage (including the internal ``id``). The order of the list is not guaranteed to match the order of the initial list. """ inserted = [] for tool in tools: key = self._tool_key(tool) assert 'id' not in tool record = copy.deepcopy(tool) record['id'] = key # TODO: remove this if key not in self._tools: self._tools[key] = record inserted.append(copy.deepcopy(self._tools[key])) return inserted def tool_get(self, tool): """Retrieve tool information. Args: tool (dict): Tool information we want to retrieve from storage. The dicts have the same keys as those used in :func:`tool_add`. Returns: dict: The full tool information if it exists (``id`` included), None otherwise. """ return self._tools.get(self._tool_key(tool)) def metadata_provider_add(self, provider_name, provider_type, provider_url, metadata): """Add a metadata provider. Args: provider_name (str): Its name provider_type (str): Its type provider_url (str): Its URL metadata: JSON-encodable object Returns: an identifier of the provider """ provider = { 'provider_name': provider_name, 'provider_type': provider_type, 'provider_url': provider_url, 'metadata': metadata, } key = self._metadata_provider_key(provider) provider['id'] = key self._metadata_providers[key] = provider return key def metadata_provider_get(self, provider_id, db=None, cur=None): """Get a metadata provider Args: provider_id: Its identifier, as given by `metadata_provider_add`. Returns: dict: same as `metadata_provider_add`; or None if it does not exist. """ return self._metadata_providers.get(provider_id) def metadata_provider_get_by(self, provider, db=None, cur=None): """Get a metadata provider Args: provider_name: Its name provider_url: Its URL Returns: dict: same as `metadata_provider_add`; or None if it does not exist. """ key = self._metadata_provider_key(provider) return self._metadata_providers.get(key) def _get_origin_url(self, origin): if isinstance(origin, str): return origin elif isinstance(origin, int): if origin <= len(self._origins_by_id): return self._origins_by_id[origin-1] else: return None else: raise TypeError('origin must be a string or an integer.') def _person_add(self, person): """Add a person in storage. Note: Private method, do not use outside of this class. Args: person: dictionary with keys fullname, name and email. """ - key = ('person', person['fullname']) + key = ('person', person.fullname) if key not in self._objects: person_id = len(self._persons) + 1 - self._persons.append(dict(person)) + self._persons.append(person) self._objects[key].append(('person', person_id)) else: person_id = self._objects[key][0][1] - p = self._persons[person_id-1] - person.update(p.items()) + person = self._persons[person_id-1] + return person @staticmethod def _content_key(content): """A stable key for a content""" return tuple(getattr(content, key) for key in sorted(DEFAULT_ALGORITHMS)) @staticmethod def _content_key_algorithm(content): """ A stable key and the algorithm for a content""" if isinstance(content, Content): content = content.to_dict() return tuple((content.get(key), key) for key in sorted(DEFAULT_ALGORITHMS)) @staticmethod def _tool_key(tool): return '%r %r %r' % (tool['name'], tool['version'], tuple(sorted(tool['configuration'].items()))) @staticmethod def _metadata_provider_key(provider): return '%r %r' % (provider['provider_name'], provider['provider_url']) diff --git a/swh/storage/sql/30-swh-schema.sql b/swh/storage/sql/30-swh-schema.sql index 41ecf277..b8b0260c 100644 --- a/swh/storage/sql/30-swh-schema.sql +++ b/swh/storage/sql/30-swh-schema.sql @@ -1,496 +1,496 @@ --- --- SQL implementation of the Software Heritage data model --- -- schema versions create table dbversion ( version int primary key, release timestamptz, description text ); comment on table dbversion is 'Details of current db version'; comment on column dbversion.version is 'SQL schema version'; comment on column dbversion.release is 'Version deployment timestamp'; comment on column dbversion.description is 'Release description'; -- latest schema version insert into dbversion(version, release, description) - values(139, now(), 'Work In Progress'); + values(140, now(), 'Work In Progress'); -- a SHA1 checksum create domain sha1 as bytea check (length(value) = 20); -- a Git object ID, i.e., a Git-style salted SHA1 checksum create domain sha1_git as bytea check (length(value) = 20); -- a SHA256 checksum create domain sha256 as bytea check (length(value) = 32); -- a blake2 checksum create domain blake2s256 as bytea check (length(value) = 32); -- UNIX path (absolute, relative, individual path component, etc.) create domain unix_path as bytea; -- a set of UNIX-like access permissions, as manipulated by, e.g., chmod create domain file_perms as int; -- Checksums about actual file content. Note that the content itself is not -- stored in the DB, but on external (key-value) storage. A single checksum is -- used as key there, but the other can be used to verify that we do not inject -- content collisions not knowingly. create table content ( sha1 sha1 not null, sha1_git sha1_git not null, sha256 sha256 not null, blake2s256 blake2s256, length bigint not null, ctime timestamptz not null default now(), -- creation time, i.e. time of (first) injection into the storage status content_status not null default 'visible', object_id bigserial ); comment on table content is 'Checksums of file content which is actually stored externally'; comment on column content.sha1 is 'Content sha1 hash'; comment on column content.sha1_git is 'Git object sha1 hash'; comment on column content.sha256 is 'Content Sha256 hash'; comment on column content.blake2s256 is 'Content blake2s hash'; comment on column content.length is 'Content length'; comment on column content.ctime is 'First seen time'; comment on column content.status is 'Content status (absent, visible, hidden)'; comment on column content.object_id is 'Content identifier'; -- An origin is a place, identified by an URL, where software source code -- artifacts can be found. We support different kinds of origins, e.g., git and -- other VCS repositories, web pages that list tarballs URLs (e.g., -- http://www.kernel.org), indirect tarball URLs (e.g., -- http://www.example.org/latest.tar.gz), etc. The key feature of an origin is -- that it can be *fetched* from (wget, git clone, svn checkout, etc.) to -- retrieve all the contained software. create table origin ( id bigserial not null, type text, -- TODO use an enum here (?) url text not null ); comment on column origin.id is 'Artifact origin id'; comment on column origin.type is 'Type of origin'; comment on column origin.url is 'URL of origin'; -- Content blobs observed somewhere, but not ingested into the archive for -- whatever reason. This table is separate from the content table as we might -- not have the sha1 checksum of skipped contents (for instance when we inject -- git repositories, objects that are too big will be skipped here, and we will -- only know their sha1_git). 'reason' contains the reason the content was -- skipped. origin is a nullable column allowing to find out which origin -- contains that skipped content. create table skipped_content ( sha1 sha1, sha1_git sha1_git, sha256 sha256, blake2s256 blake2s256, length bigint not null, ctime timestamptz not null default now(), status content_status not null default 'absent', reason text not null, origin bigint, object_id bigserial ); comment on table skipped_content is 'Content blobs observed, but not ingested in the archive'; comment on column skipped_content.sha1 is 'Skipped content sha1 hash'; comment on column skipped_content.sha1_git is 'Git object sha1 hash'; comment on column skipped_content.sha256 is 'Skipped content sha256 hash'; comment on column skipped_content.blake2s256 is 'Skipped content blake2s hash'; comment on column skipped_content.length is 'Skipped content length'; comment on column skipped_content.ctime is 'First seen time'; comment on column skipped_content.status is 'Skipped content status (absent, visible, hidden)'; comment on column skipped_content.reason is 'Reason for skipping'; comment on column skipped_content.origin is 'Origin table identifier'; comment on column skipped_content.object_id is 'Skipped content identifier'; -- Log of all origin fetches (i.e., origin crawling) that have been done in the -- past, or are still ongoing. Similar to list_history, but for origins. create table fetch_history ( id bigserial, origin bigint, date timestamptz not null, status boolean, -- true if and only if the fetch has been successful result jsonb, -- more detailed returned values, times, etc... stdout text, stderr text, -- null when status is true, filled otherwise duration interval -- fetch duration of NULL if still ongoing ); comment on table fetch_history is 'Log of all origin fetches'; comment on column fetch_history.id is 'Identifier for fetch history'; comment on column fetch_history.origin is 'Origin table identifier'; comment on column fetch_history.date is 'Fetch start time'; comment on column fetch_history.status is 'True indicates successful fetch'; comment on column fetch_history.result is 'Detailed return values, times etc'; comment on column fetch_history.stdout is 'Standard output of fetch operation'; comment on column fetch_history.stderr is 'Standard error of fetch operation'; comment on column fetch_history.duration is 'Time taken to complete fetch, NULL if ongoing'; -- A file-system directory. A directory is a list of directory entries (see -- tables: directory_entry_{dir,file}). -- -- To list the contents of a directory: -- 1. list the contained directory_entry_dir using array dir_entries -- 2. list the contained directory_entry_file using array file_entries -- 3. list the contained directory_entry_rev using array rev_entries -- 4. UNION -- -- Synonyms/mappings: -- * git: tree create table directory ( id sha1_git not null, dir_entries bigint[], -- sub-directories, reference directory_entry_dir file_entries bigint[], -- contained files, reference directory_entry_file rev_entries bigint[], -- mounted revisions, reference directory_entry_rev object_id bigserial -- short object identifier ); comment on table directory is 'Contents of a directory, synonymous to tree (git)'; comment on column directory.id is 'Git object sha1 hash'; comment on column directory.dir_entries is 'Sub-directories, reference directory_entry_dir'; comment on column directory.file_entries is 'Contained files, reference directory_entry_file'; comment on column directory.rev_entries is 'Mounted revisions, reference directory_entry_rev'; comment on column directory.object_id is 'Short object identifier'; -- A directory entry pointing to a (sub-)directory. create table directory_entry_dir ( id bigserial, target sha1_git not null, -- id of target directory name unix_path not null, -- path name, relative to containing dir perms file_perms not null -- unix-like permissions ); comment on table directory_entry_dir is 'Directory entry for directory'; comment on column directory_entry_dir.id is 'Directory identifier'; comment on column directory_entry_dir.target is 'Target directory identifier'; comment on column directory_entry_dir.name is 'Path name, relative to containing directory'; comment on column directory_entry_dir.perms is 'Unix-like permissions'; -- A directory entry pointing to a file content. create table directory_entry_file ( id bigserial, target sha1_git not null, -- id of target file name unix_path not null, -- path name, relative to containing dir perms file_perms not null -- unix-like permissions ); comment on table directory_entry_file is 'Directory entry for file'; comment on column directory_entry_file.id is 'File identifier'; comment on column directory_entry_file.target is 'Target file identifier'; comment on column directory_entry_file.name is 'Path name, relative to containing directory'; comment on column directory_entry_file.perms is 'Unix-like permissions'; -- A directory entry pointing to a revision. create table directory_entry_rev ( id bigserial, target sha1_git not null, -- id of target revision name unix_path not null, -- path name, relative to containing dir perms file_perms not null -- unix-like permissions ); comment on table directory_entry_rev is 'Directory entry for revision'; comment on column directory_entry_dir.id is 'Revision identifier'; comment on column directory_entry_dir.target is 'Target revision in identifier'; comment on column directory_entry_dir.name is 'Path name, relative to containing directory'; comment on column directory_entry_dir.perms is 'Unix-like permissions'; -- A person referenced by some source code artifacts, e.g., a VCS revision or -- release metadata. create table person ( id bigserial, name bytea, -- advisory: not null if we managed to parse a name email bytea, -- advisory: not null if we managed to parse an email fullname bytea not null -- freeform specification; what is actually used in the checksums -- will usually be of the form 'name ' ); comment on table person is 'Person referenced in code artifact release metadata'; comment on column person.id is 'Person identifier'; comment on column person.name is 'Name'; comment on column person.email is 'Email'; comment on column person.fullname is 'Full name (raw name)'; -- The state of a source code tree at a specific point in time. -- -- Synonyms/mappings: -- * git / subversion / etc: commit -- * tarball: a specific tarball -- -- Revisions are organized as DAGs. Each revision points to 0, 1, or more (in -- case of merges) parent revisions. Each revision points to a directory, i.e., -- a file-system tree containing files and directories. create table revision ( id sha1_git not null, date timestamptz, date_offset smallint, committer_date timestamptz, committer_date_offset smallint, type revision_type not null, directory sha1_git, -- source code 'root' directory message bytea, author bigint, committer bigint, synthetic boolean not null default false, -- true iff revision has been created by Software Heritage metadata jsonb, -- extra metadata (tarball checksums, extra commit information, etc...) object_id bigserial, date_neg_utc_offset boolean, committer_date_neg_utc_offset boolean ); comment on table revision is 'Revision represents the state of a source code tree at a specific point in time'; comment on column revision.id is 'Git id of sha1 checksum'; comment on column revision.date is 'Timestamp when revision was authored'; comment on column revision.date_offset is 'Authored timestamp offset from UTC'; comment on column revision.committer_date is 'Timestamp when revision was committed'; comment on column revision.committer_date_offset is 'Committed timestamp offset from UTC'; comment on column revision.type is 'Possible revision types (''git'', ''tar'', ''dsc'', ''svn'', ''hg'')'; comment on column revision.directory is 'Directory identifier'; comment on column revision.message is 'Revision message'; comment on column revision.author is 'Author identifier'; comment on column revision.committer is 'Committer identifier'; comment on column revision.synthetic is 'true iff revision has been created by Software Heritage'; comment on column revision.metadata is 'extra metadata (tarball checksums, extra commit information, etc...)'; comment on column revision.object_id is 'Object identifier'; comment on column revision.date_neg_utc_offset is 'True indicates -0 UTC offset for author timestamp'; comment on column revision.committer_date_neg_utc_offset is 'True indicates -0 UTC offset for committer timestamp'; -- either this table or the sha1_git[] column on the revision table create table revision_history ( id sha1_git not null, parent_id sha1_git not null, parent_rank int not null default 0 -- parent position in merge commits, 0-based ); comment on table revision_history is 'Sequence of revision history with parent and position in history'; comment on column revision_history.id is 'Revision history git object sha1 checksum'; comment on column revision_history.parent_id is 'Parent revision git object identifier'; comment on column revision_history.parent_rank is 'Parent position in merge commits, 0-based'; -- Crawling history of software origins visited by Software Heritage. Each -- visit is a 3-way mapping between a software origin, a timestamp, and a -- snapshot object capturing the full-state of the origin at visit time. create table origin_visit ( origin bigint not null, visit bigint not null, date timestamptz not null, type text not null, status origin_visit_status not null, metadata jsonb, snapshot sha1_git ); comment on column origin_visit.origin is 'Visited origin'; comment on column origin_visit.visit is 'Sequential visit number for the origin'; comment on column origin_visit.date is 'Visit timestamp'; comment on column origin_visit.type is 'Type of loader that did the visit (hg, git, ...)'; comment on column origin_visit.status is 'Visit result'; comment on column origin_visit.metadata is 'Origin metadata at visit time'; comment on column origin_visit.snapshot is 'Origin snapshot at visit time'; -- A snapshot represents the entire state of a software origin as crawled by -- Software Heritage. This table is a simple mapping between (public) intrinsic -- snapshot identifiers and (private) numeric sequential identifiers. create table snapshot ( object_id bigserial not null, -- PK internal object identifier id sha1_git not null -- snapshot intrinsic identifier ); comment on table snapshot is 'State of a software origin as crawled by Software Heritage'; comment on column snapshot.object_id is 'Internal object identifier'; comment on column snapshot.id is 'Intrinsic snapshot identifier'; -- Each snapshot associate "branch" names to other objects in the Software -- Heritage Merkle DAG. This table describes branches as mappings between names -- and target typed objects. create table snapshot_branch ( object_id bigserial not null, -- PK internal object identifier name bytea not null, -- branch name, e.g., "master" or "feature/drag-n-drop" target bytea, -- target object identifier, e.g., a revision identifier target_type snapshot_target -- target object type, e.g., "revision" ); comment on table snapshot_branch is 'Associates branches with objects in Heritage Merkle DAG'; comment on column snapshot_branch.object_id is 'Internal object identifier'; comment on column snapshot_branch.name is 'Branch name'; comment on column snapshot_branch.target is 'Target object identifier'; comment on column snapshot_branch.target_type is 'Target object type'; -- Mapping between snapshots and their branches. create table snapshot_branches ( snapshot_id bigint not null, -- snapshot identifier, ref. snapshot.object_id branch_id bigint not null -- branch identifier, ref. snapshot_branch.object_id ); comment on table snapshot_branches is 'Mapping between snapshot and their branches'; comment on column snapshot_branches.snapshot_id is 'Snapshot identifier'; comment on column snapshot_branches.branch_id is 'Branch identifier'; -- A "memorable" point in time in the development history of a software -- project. -- -- Synonyms/mappings: -- * git: tag (of the annotated kind, otherwise they are just references) -- * tarball: the release version number create table release ( id sha1_git not null, target sha1_git, date timestamptz, date_offset smallint, name bytea, comment bytea, author bigint, synthetic boolean not null default false, -- true iff release has been created by Software Heritage object_id bigserial, target_type object_type not null, date_neg_utc_offset boolean ); comment on table release is 'Details of a software release, synonymous with a tag (git) or version number (tarball)'; comment on column release.id is 'Release git identifier'; comment on column release.target is 'Target git identifier'; comment on column release.date is 'Release timestamp'; comment on column release.date_offset is 'Timestamp offset from UTC'; comment on column release.name is 'Name'; comment on column release.comment is 'Comment'; comment on column release.author is 'Author'; comment on column release.synthetic is 'Indicates if created by Software Heritage'; comment on column release.object_id is 'Object identifier'; comment on column release.target_type is 'Object type (''content'', ''directory'', ''revision'', ''release'', ''snapshot'')'; comment on column release.date_neg_utc_offset is 'True indicates -0 UTC offset for release timestamp'; -- Tools create table tool ( id serial not null, name text not null, version text not null, configuration jsonb ); comment on table tool is 'Tool information'; comment on column tool.id is 'Tool identifier'; comment on column tool.version is 'Tool name'; comment on column tool.version is 'Tool version'; comment on column tool.configuration is 'Tool configuration: command line, flags, etc...'; create table metadata_provider ( id serial not null, provider_name text not null, provider_type text not null, provider_url text, metadata jsonb ); comment on table metadata_provider is 'Metadata provider information'; comment on column metadata_provider.id is 'Provider''s identifier'; comment on column metadata_provider.provider_name is 'Provider''s name'; comment on column metadata_provider.provider_url is 'Provider''s url'; comment on column metadata_provider.metadata is 'Other metadata about provider'; -- Discovery of metadata during a listing, loading, deposit or external_catalog of an origin -- also provides a translation to a defined json schema using a translation tool (tool_id) create table origin_metadata ( id bigserial not null, -- PK internal object identifier origin_id bigint not null, -- references origin(id) discovery_date timestamptz not null, -- when it was extracted provider_id bigint not null, -- ex: 'hal', 'lister-github', 'loader-github' tool_id bigint not null, metadata jsonb not null ); comment on table origin_metadata is 'keeps all metadata found concerning an origin'; comment on column origin_metadata.id is 'the origin_metadata object''s id'; comment on column origin_metadata.origin_id is 'the origin id for which the metadata was found'; comment on column origin_metadata.discovery_date is 'the date of retrieval'; comment on column origin_metadata.provider_id is 'the metadata provider: github, openhub, deposit, etc.'; comment on column origin_metadata.tool_id is 'the tool used for extracting metadata: lister-github, etc.'; comment on column origin_metadata.metadata is 'metadata in json format but with original terms'; -- Keep a cache of object counts create table object_counts ( object_type text, -- table for which we're counting objects (PK) value bigint, -- count of objects in the table last_update timestamptz, -- last update for the object count in this table single_update boolean -- whether we update this table standalone (true) or through bucketed counts (false) ); comment on table object_counts is 'Cache of object counts'; comment on column object_counts.object_type is 'Object type (''content'', ''directory'', ''revision'', ''release'', ''snapshot'')'; comment on column object_counts.value is 'Count of objects in the table'; comment on column object_counts.last_update is 'Last update for object count'; comment on column object_counts.single_update is 'standalone (true) or bucketed counts (false)'; create table object_counts_bucketed ( line serial not null, -- PK object_type text not null, -- table for which we're counting objects identifier text not null, -- identifier across which we're bucketing objects bucket_start bytea, -- lower bound (inclusive) for the bucket bucket_end bytea, -- upper bound (exclusive) for the bucket value bigint, -- count of objects in the bucket last_update timestamptz -- last update for the object count in this bucket ); comment on table object_counts_bucketed is 'Bucketed count for objects ordered by type'; comment on column object_counts_bucketed.line is 'Auto incremented idenitfier value'; comment on column object_counts_bucketed.object_type is 'Object type (''content'', ''directory'', ''revision'', ''release'', ''snapshot'')'; comment on column object_counts_bucketed.identifier is 'Common identifier for bucketed objects'; comment on column object_counts_bucketed.bucket_start is 'Lower bound (inclusive) for the bucket'; comment on column object_counts_bucketed.bucket_end is 'Upper bound (exclusive) for the bucket'; comment on column object_counts_bucketed.value is 'Count of objects in the bucket'; comment on column object_counts_bucketed.last_update is 'Last update for the object count in this bucket'; diff --git a/swh/storage/sql/60-swh-indexes.sql b/swh/storage/sql/60-swh-indexes.sql index ef3ebcee..7ae186ce 100644 --- a/swh/storage/sql/60-swh-indexes.sql +++ b/swh/storage/sql/60-swh-indexes.sql @@ -1,182 +1,186 @@ -- content create unique index concurrently content_pkey on content(sha1); create unique index concurrently on content(sha1_git); create index concurrently on content(sha256); create index concurrently on content(blake2s256); create index concurrently on content(ctime); -- TODO use a BRIN index here (postgres >= 9.5) create unique index concurrently on content(object_id); alter table content add primary key using index content_pkey; -- origin create unique index concurrently origin_pkey on origin(id); alter table origin add primary key using index origin_pkey; create index concurrently on origin using gin (url gin_trgm_ops); create index concurrently on origin using hash (url); -- skipped_content alter table skipped_content add constraint skipped_content_sha1_sha1_git_sha256_key unique (sha1, sha1_git, sha256); create index concurrently on skipped_content(sha1); create index concurrently on skipped_content(sha1_git); create index concurrently on skipped_content(sha256); create index concurrently on skipped_content(blake2s256); create unique index concurrently on skipped_content(object_id); alter table skipped_content add constraint skipped_content_origin_fkey foreign key (origin) references origin(id) not valid; alter table skipped_content validate constraint skipped_content_origin_fkey; -- fetch_history create unique index concurrently fetch_history_pkey on fetch_history(id); alter table fetch_history add primary key using index fetch_history_pkey; alter table fetch_history add constraint fetch_history_origin_fkey foreign key (origin) references origin(id) not valid; alter table fetch_history validate constraint fetch_history_origin_fkey; -- directory create unique index concurrently directory_pkey on directory(id); alter table directory add primary key using index directory_pkey; create index concurrently on directory using gin (dir_entries); create index concurrently on directory using gin (file_entries); create index concurrently on directory using gin (rev_entries); create unique index concurrently on directory(object_id); -- directory_entry_dir create unique index concurrently directory_entry_dir_pkey on directory_entry_dir(id); alter table directory_entry_dir add primary key using index directory_entry_dir_pkey; create unique index concurrently on directory_entry_dir(target, name, perms); -- directory_entry_file create unique index concurrently directory_entry_file_pkey on directory_entry_file(id); alter table directory_entry_file add primary key using index directory_entry_file_pkey; create unique index concurrently on directory_entry_file(target, name, perms); -- directory_entry_rev create unique index concurrently directory_entry_rev_pkey on directory_entry_rev(id); alter table directory_entry_rev add primary key using index directory_entry_rev_pkey; create unique index concurrently on directory_entry_rev(target, name, perms); -- person create unique index concurrently person_pkey on person(id); alter table person add primary key using index person_pkey; create unique index concurrently on person(fullname); create index concurrently on person(name); create index concurrently on person(email); -- revision create unique index concurrently revision_pkey on revision(id); alter table revision add primary key using index revision_pkey; alter table revision add constraint revision_author_fkey foreign key (author) references person(id) not valid; alter table revision validate constraint revision_author_fkey; alter table revision add constraint revision_committer_fkey foreign key (committer) references person(id) not valid; alter table revision validate constraint revision_committer_fkey; create index concurrently on revision(directory); create unique index concurrently on revision(object_id); -- revision_history create unique index concurrently revision_history_pkey on revision_history(id, parent_rank); alter table revision_history add primary key using index revision_history_pkey; create index concurrently on revision_history(parent_id); alter table revision_history add constraint revision_history_id_fkey foreign key (id) references revision(id) not valid; alter table revision_history validate constraint revision_history_id_fkey; -- snapshot create unique index concurrently snapshot_pkey on snapshot(object_id); alter table snapshot add primary key using index snapshot_pkey; create unique index concurrently on snapshot(id); -- snapshot_branch create unique index concurrently snapshot_branch_pkey on snapshot_branch(object_id); alter table snapshot_branch add primary key using index snapshot_branch_pkey; create unique index concurrently on snapshot_branch (target_type, target, name); alter table snapshot_branch add constraint snapshot_branch_target_check check ((target_type is null) = (target is null)) not valid; alter table snapshot_branch validate constraint snapshot_branch_target_check; alter table snapshot_branch add constraint snapshot_target_check check (target_type not in ('content', 'directory', 'revision', 'release', 'snapshot') or length(target) = 20) not valid; alter table snapshot_branch validate constraint snapshot_target_check; create unique index concurrently on snapshot_branch (name) where target_type is null and target is null; -- snapshot_branches create unique index concurrently snapshot_branches_pkey on snapshot_branches(snapshot_id, branch_id); alter table snapshot_branches add primary key using index snapshot_branches_pkey; alter table snapshot_branches add constraint snapshot_branches_snapshot_id_fkey foreign key (snapshot_id) references snapshot(object_id) not valid; alter table snapshot_branches validate constraint snapshot_branches_snapshot_id_fkey; alter table snapshot_branches add constraint snapshot_branches_branch_id_fkey foreign key (branch_id) references snapshot_branch(object_id) not valid; alter table snapshot_branches validate constraint snapshot_branches_branch_id_fkey; -- origin_visit create unique index concurrently origin_visit_pkey on origin_visit(origin, visit); alter table origin_visit add primary key using index origin_visit_pkey; create index concurrently on origin_visit(date); alter table origin_visit add constraint origin_visit_origin_fkey foreign key (origin) references origin(id) not valid; alter table origin_visit validate constraint origin_visit_origin_fkey; -- release create unique index concurrently release_pkey on release(id); alter table release add primary key using index release_pkey; create index concurrently on release(target, target_type); create unique index concurrently on release(object_id); alter table release add constraint release_author_fkey foreign key (author) references person(id) not valid; alter table release validate constraint release_author_fkey; +-- if the author is null, then the date must be null +alter table release add constraint release_author_date_check check ((date is null) or (author is not null)) not valid; +alter table release validate constraint release_author_date_check; + -- tool create unique index tool_pkey on tool(id); alter table tool add primary key using index tool_pkey; create unique index on tool(name, version, configuration); -- metadata_provider create unique index concurrently metadata_provider_pkey on metadata_provider(id); alter table metadata_provider add primary key using index metadata_provider_pkey; create index concurrently on metadata_provider(provider_name, provider_url); -- origin_metadata create unique index concurrently origin_metadata_pkey on origin_metadata(id); alter table origin_metadata add primary key using index origin_metadata_pkey; create index concurrently on origin_metadata(origin_id, provider_id, tool_id); alter table origin_metadata add constraint origin_metadata_origin_fkey foreign key (origin_id) references origin(id) not valid; alter table origin_metadata validate constraint origin_metadata_origin_fkey; alter table origin_metadata add constraint origin_metadata_provider_fkey foreign key (provider_id) references metadata_provider(id) not valid; alter table origin_metadata validate constraint origin_metadata_provider_fkey; alter table origin_metadata add constraint origin_metadata_tool_fkey foreign key (tool_id) references tool(id) not valid; alter table origin_metadata validate constraint origin_metadata_tool_fkey; -- object_counts create unique index concurrently object_counts_pkey on object_counts(object_type); alter table object_counts add primary key using index object_counts_pkey; -- object_counts_bucketed create unique index concurrently object_counts_bucketed_pkey on object_counts_bucketed(line); alter table object_counts_bucketed add primary key using index object_counts_bucketed_pkey; diff --git a/swh/storage/tests/test_storage.py b/swh/storage/tests/test_storage.py index e0861f8c..4585d7c1 100644 --- a/swh/storage/tests/test_storage.py +++ b/swh/storage/tests/test_storage.py @@ -1,3930 +1,3972 @@ # Copyright (C) 2015-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import copy import datetime import itertools import random import unittest from collections import defaultdict from unittest.mock import Mock, patch import psycopg2.errors import pytest from hypothesis import given, strategies, settings, HealthCheck from swh.model import from_disk, identifiers from swh.model.hashutil import hash_to_bytes from swh.model.hypothesis_strategies import origins, objects from swh.storage.tests.storage_testing import StorageTestFixture from swh.storage import HashCollision from .generate_data_test import gen_contents @pytest.mark.db class StorageTestDbFixture(StorageTestFixture): def setUp(self): super().setUp() db = self.test_db[self.TEST_DB_NAME] self.conn = db.conn self.cursor = db.cursor self.maxDiff = None def tearDown(self): self.reset_storage() super().tearDown() class TestStorageData: def setUp(self): super().setUp() self.cont = { 'data': b'42\n', 'length': 3, 'sha1': hash_to_bytes( '34973274ccef6ab4dfaaf86599792fa9c3fe4689'), 'sha1_git': hash_to_bytes( 'd81cc0710eb6cf9efd5b920a8453e1e07157b6cd'), 'sha256': hash_to_bytes( '673650f936cb3b0a2f93ce09d81be107' '48b1b203c19e8176b4eefc1964a0cf3a'), 'blake2s256': hash_to_bytes('d5fe1939576527e42cfd76a9455a2' '432fe7f56669564577dd93c4280e76d661d'), 'status': 'visible', } self.cont2 = { 'data': b'4242\n', 'length': 5, 'sha1': hash_to_bytes( '61c2b3a30496d329e21af70dd2d7e097046d07b7'), 'sha1_git': hash_to_bytes( '36fade77193cb6d2bd826161a0979d64c28ab4fa'), 'sha256': hash_to_bytes( '859f0b154fdb2d630f45e1ecae4a8629' '15435e663248bb8461d914696fc047cd'), 'blake2s256': hash_to_bytes('849c20fad132b7c2d62c15de310adfe87be' '94a379941bed295e8141c6219810d'), 'status': 'visible', } self.cont3 = { 'data': b'424242\n', 'length': 7, 'sha1': hash_to_bytes( '3e21cc4942a4234c9e5edd8a9cacd1670fe59f13'), 'sha1_git': hash_to_bytes( 'c932c7649c6dfa4b82327d121215116909eb3bea'), 'sha256': hash_to_bytes( '92fb72daf8c6818288a35137b72155f5' '07e5de8d892712ab96277aaed8cf8a36'), 'blake2s256': hash_to_bytes('76d0346f44e5a27f6bafdd9c2befd304af' 'f83780f93121d801ab6a1d4769db11'), 'status': 'visible', } self.missing_cont = { 'data': b'missing\n', 'length': 8, 'sha1': hash_to_bytes( 'f9c24e2abb82063a3ba2c44efd2d3c797f28ac90'), 'sha1_git': hash_to_bytes( '33e45d56f88993aae6a0198013efa80716fd8919'), 'sha256': hash_to_bytes( '6bbd052ab054ef222c1c87be60cd191a' 'ddedd24cc882d1f5f7f7be61dc61bb3a'), 'blake2s256': hash_to_bytes('306856b8fd879edb7b6f1aeaaf8db9bbecc9' '93cd7f776c333ac3a782fa5c6eba'), 'status': 'absent', } self.skipped_cont = { 'length': 1024 * 1024 * 200, 'sha1_git': hash_to_bytes( '33e45d56f88993aae6a0198013efa80716fd8920'), 'sha1': hash_to_bytes( '43e45d56f88993aae6a0198013efa80716fd8920'), 'sha256': hash_to_bytes( '7bbd052ab054ef222c1c87be60cd191a' 'ddedd24cc882d1f5f7f7be61dc61bb3a'), 'blake2s256': hash_to_bytes( 'ade18b1adecb33f891ca36664da676e1' '2c772cc193778aac9a137b8dc5834b9b'), 'reason': 'Content too long', 'status': 'absent', } self.skipped_cont2 = { 'length': 1024 * 1024 * 300, 'sha1_git': hash_to_bytes( '44e45d56f88993aae6a0198013efa80716fd8921'), 'sha1': hash_to_bytes( '54e45d56f88993aae6a0198013efa80716fd8920'), 'sha256': hash_to_bytes( '8cbd052ab054ef222c1c87be60cd191a' 'ddedd24cc882d1f5f7f7be61dc61bb3a'), 'blake2s256': hash_to_bytes( '9ce18b1adecb33f891ca36664da676e1' '2c772cc193778aac9a137b8dc5834b9b'), 'reason': 'Content too long', 'status': 'absent', } self.dir = { 'id': b'4\x013\x422\x531\x000\xf51\xe62\xa73\xff7\xc3\xa90', 'entries': [ { 'name': b'foo', 'type': 'file', 'target': self.cont['sha1_git'], 'perms': from_disk.DentryPerms.content, }, { 'name': b'bar\xc3', 'type': 'dir', 'target': b'12345678901234567890', 'perms': from_disk.DentryPerms.directory, }, ], } self.dir2 = { 'id': b'4\x013\x422\x531\x000\xf51\xe62\xa73\xff7\xc3\xa95', 'entries': [ { 'name': b'oof', 'type': 'file', 'target': self.cont2['sha1_git'], 'perms': from_disk.DentryPerms.content, } ], } self.dir3 = { 'id': hash_to_bytes('33e45d56f88993aae6a0198013efa80716fd8921'), 'entries': [ { 'name': b'foo', 'type': 'file', 'target': self.cont['sha1_git'], 'perms': from_disk.DentryPerms.content, }, { 'name': b'subdir', 'type': 'dir', 'target': self.dir['id'], 'perms': from_disk.DentryPerms.directory, }, { 'name': b'hello', 'type': 'file', 'target': b'12345678901234567890', 'perms': from_disk.DentryPerms.content, }, ], } self.dir4 = { 'id': hash_to_bytes('33e45d56f88993aae6a0198013efa80716fd8922'), 'entries': [ { 'name': b'subdir1', 'type': 'dir', 'target': self.dir3['id'], 'perms': from_disk.DentryPerms.directory, }, ] } self.minus_offset = datetime.timezone(datetime.timedelta(minutes=-120)) self.plus_offset = datetime.timezone(datetime.timedelta(minutes=120)) self.revision = { 'id': b'56789012345678901234', 'message': b'hello', 'author': { 'name': b'Nicolas Dandrimont', 'email': b'nicolas@example.com', 'fullname': b'Nicolas Dandrimont ', }, 'date': { 'timestamp': 1234567890, 'offset': 120, 'negative_utc': None, }, 'committer': { 'name': b'St\xc3fano Zacchiroli', 'email': b'stefano@example.com', 'fullname': b'St\xc3fano Zacchiroli ' }, 'committer_date': { 'timestamp': 1123456789, 'offset': 0, 'negative_utc': True, }, 'parents': [b'01234567890123456789', b'23434512345123456789'], 'type': 'git', 'directory': self.dir['id'], 'metadata': { 'checksums': { 'sha1': 'tarball-sha1', 'sha256': 'tarball-sha256', }, 'signed-off-by': 'some-dude', 'extra_headers': [ ['gpgsig', b'test123'], ['mergetags', [b'foo\\bar', b'\x22\xaf\x89\x80\x01\x00']], ], }, 'synthetic': True } self.revision2 = { 'id': b'87659012345678904321', 'message': b'hello again', 'author': { 'name': b'Roberto Dicosmo', 'email': b'roberto@example.com', 'fullname': b'Roberto Dicosmo ', }, 'date': { 'timestamp': { 'seconds': 1234567843, 'microseconds': 220000, }, 'offset': -720, 'negative_utc': None, }, 'committer': { 'name': b'tony', 'email': b'ar@dumont.fr', 'fullname': b'tony ', }, 'committer_date': { 'timestamp': 1123456789, 'offset': 0, 'negative_utc': False, }, 'parents': [b'01234567890123456789'], 'type': 'git', 'directory': self.dir2['id'], 'metadata': None, 'synthetic': False } self.revision3 = { 'id': hash_to_bytes('7026b7c1a2af56521e951c01ed20f255fa054238'), 'message': b'a simple revision with no parents this time', 'author': { 'name': b'Roberto Dicosmo', 'email': b'roberto@example.com', 'fullname': b'Roberto Dicosmo ', }, 'date': { 'timestamp': { 'seconds': 1234567843, 'microseconds': 220000, }, 'offset': -720, 'negative_utc': None, }, 'committer': { 'name': b'tony', 'email': b'ar@dumont.fr', 'fullname': b'tony ', }, 'committer_date': { 'timestamp': 1127351742, 'offset': 0, 'negative_utc': False, }, 'parents': [], 'type': 'git', 'directory': self.dir2['id'], 'metadata': None, 'synthetic': True } self.revision4 = { 'id': hash_to_bytes('368a48fe15b7db2383775f97c6b247011b3f14f4'), 'message': b'parent of self.revision2', 'author': { 'name': b'me', 'email': b'me@soft.heri', 'fullname': b'me ', }, 'date': { 'timestamp': { 'seconds': 1244567843, 'microseconds': 220000, }, 'offset': -720, 'negative_utc': None, }, 'committer': { 'name': b'committer-dude', 'email': b'committer@dude.com', 'fullname': b'committer-dude ', }, 'committer_date': { 'timestamp': { 'seconds': 1244567843, 'microseconds': 220000, }, 'offset': -720, 'negative_utc': None, }, 'parents': [self.revision3['id']], 'type': 'git', 'directory': self.dir['id'], 'metadata': None, 'synthetic': False } self.origin = { 'url': 'file:///dev/null', 'type': 'git', } self.origin2 = { 'url': 'file:///dev/zero', 'type': 'hg', } self.provider = { 'name': 'hal', 'type': 'deposit-client', 'url': 'http:///hal/inria', 'metadata': { 'location': 'France' } } self.metadata_tool = { 'name': 'swh-deposit', 'version': '0.0.1', 'configuration': { 'sword_version': '2' } } self.origin_metadata = { 'origin': self.origin, 'discovery_date': datetime.datetime(2015, 1, 1, 23, 0, 0, tzinfo=datetime.timezone.utc), 'provider': self.provider, 'tool': 'swh-deposit', 'metadata': { 'name': 'test_origin_metadata', 'version': '0.0.1' } } self.origin_metadata2 = { 'origin': self.origin, 'discovery_date': datetime.datetime(2017, 1, 1, 23, 0, 0, tzinfo=datetime.timezone.utc), 'provider': self.provider, 'tool': 'swh-deposit', 'metadata': { 'name': 'test_origin_metadata', 'version': '0.0.1' } } self.date_visit1 = datetime.datetime(2015, 1, 1, 23, 0, 0, tzinfo=datetime.timezone.utc) self.date_visit2 = datetime.datetime(2017, 1, 1, 23, 0, 0, tzinfo=datetime.timezone.utc) self.date_visit3 = datetime.datetime(2018, 1, 1, 23, 0, 0, tzinfo=datetime.timezone.utc) self.release = { 'id': b'87659012345678901234', 'name': b'v0.0.1', 'author': { 'name': b'olasd', 'email': b'nic@olasd.fr', 'fullname': b'olasd ', }, 'date': { 'timestamp': 1234567890, 'offset': 42, 'negative_utc': None, }, 'target': b'43210987654321098765', 'target_type': 'revision', 'message': b'synthetic release', 'synthetic': True, } self.release2 = { 'id': b'56789012348765901234', 'name': b'v0.0.2', 'author': { 'name': b'tony', 'email': b'ar@dumont.fr', 'fullname': b'tony ', }, 'date': { 'timestamp': 1634366813, 'offset': -120, 'negative_utc': None, }, 'target': b'432109\xa9765432\xc309\x00765', 'target_type': 'revision', 'message': b'v0.0.2\nMisc performance improvements + bug fixes', 'synthetic': False } self.release3 = { 'id': b'87659012345678904321', 'name': b'v0.0.2', 'author': { 'name': b'tony', 'email': b'tony@ardumont.fr', 'fullname': b'tony ', }, 'date': { 'timestamp': 1634336813, 'offset': 0, 'negative_utc': False, }, 'target': self.revision2['id'], 'target_type': 'revision', 'message': b'yet another synthetic release', 'synthetic': True, } self.fetch_history_date = datetime.datetime( 2015, 1, 2, 21, 0, 0, tzinfo=datetime.timezone.utc) self.fetch_history_end = datetime.datetime( 2015, 1, 2, 23, 0, 0, tzinfo=datetime.timezone.utc) self.fetch_history_duration = (self.fetch_history_end - self.fetch_history_date) self.fetch_history_data = { 'status': True, 'result': {'foo': 'bar'}, 'stdout': 'blabla', 'stderr': 'blablabla', } self.snapshot = { 'id': hash_to_bytes('2498dbf535f882bc7f9a18fb16c9ad27fda7bab7'), 'branches': { b'master': { 'target': self.revision['id'], 'target_type': 'revision', }, }, } self.empty_snapshot = { 'id': hash_to_bytes('1a8893e6a86f444e8be8e7bda6cb34fb1735a00e'), 'branches': {}, } self.complete_snapshot = { 'id': hash_to_bytes('6e65b86363953b780d92b0a928f3e8fcdd10db36'), 'branches': { b'directory': { 'target': hash_to_bytes( '1bd0e65f7d2ff14ae994de17a1e7fe65111dcad8'), 'target_type': 'directory', }, b'directory2': { 'target': hash_to_bytes( '1bd0e65f7d2ff14ae994de17a1e7fe65111dcad8'), 'target_type': 'directory', }, b'content': { 'target': hash_to_bytes( 'fe95a46679d128ff167b7c55df5d02356c5a1ae1'), 'target_type': 'content', }, b'alias': { 'target': b'revision', 'target_type': 'alias', }, b'revision': { 'target': hash_to_bytes( 'aafb16d69fd30ff58afdd69036a26047f3aebdc6'), 'target_type': 'revision', }, b'release': { 'target': hash_to_bytes( '7045404f3d1c54e6473c71bbb716529fbad4be24'), 'target_type': 'release', }, b'snapshot': { 'target': hash_to_bytes( '1a8893e6a86f444e8be8e7bda6cb34fb1735a00e'), 'target_type': 'snapshot', }, b'dangling': None, }, } class CommonTestStorage(TestStorageData): """Base class for Storage testing. This class is used as-is to test local storage (see TestLocalStorage below) and remote storage (see TestRemoteStorage in test_remote_storage.py. We need to have the two classes inherit from this base class separately to avoid nosetests running the tests from the base class twice. """ maxDiff = None _test_origin_ids = True @staticmethod def normalize_entity(entity): entity = copy.deepcopy(entity) for key in ('date', 'committer_date'): if key in entity: entity[key] = identifiers.normalize_timestamp(entity[key]) return entity def test_check_config(self): self.assertTrue(self.storage.check_config(check_write=True)) self.assertTrue(self.storage.check_config(check_write=False)) def test_content_add(self): cont = self.cont insertion_start_time = datetime.datetime.now(tz=datetime.timezone.utc) actual_result = self.storage.content_add([cont]) insertion_end_time = datetime.datetime.now(tz=datetime.timezone.utc) self.assertEqual(actual_result, { 'content:add': 1, 'content:add:bytes': cont['length'], 'skipped_content:add': 0 }) self.assertEqual(list(self.storage.content_get([cont['sha1']])), [{'sha1': cont['sha1'], 'data': cont['data']}]) expected_cont = cont.copy() del expected_cont['data'] journal_objects = list(self.journal_writer.objects) for (obj_type, obj) in journal_objects: self.assertLessEqual(insertion_start_time, obj['ctime']) self.assertLessEqual(obj['ctime'], insertion_end_time) del obj['ctime'] self.assertEqual(journal_objects, [('content', expected_cont)]) def test_content_add_validation(self): cont = self.cont with self.assertRaisesRegex(ValueError, 'status'): self.storage.content_add([{**cont, 'status': 'foobar'}]) with self.assertRaisesRegex(ValueError, "(?i)length"): self.storage.content_add([{**cont, 'length': -2}]) with self.assertRaisesRegex( (ValueError, psycopg2.errors.NotNullViolation), "reason"): self.storage.content_add([{**cont, 'status': 'absent'}]) with self.assertRaisesRegex( ValueError, "^Must not provide a reason if content is not absent.$"): self.storage.content_add([{**cont, 'reason': 'foobar'}]) def test_content_get_missing(self): cont = self.cont self.storage.content_add([cont]) # Query a single missing content results = list(self.storage.content_get( [self.cont2['sha1']])) self.assertEqual(results, [None]) # Check content_get does not abort after finding a missing content results = list(self.storage.content_get( [self.cont['sha1'], self.cont2['sha1']])) self.assertEqual(results, [{'sha1': cont['sha1'], 'data': cont['data']}, None]) # Check content_get does not discard found countent when it finds # a missing content. results = list(self.storage.content_get( [self.cont2['sha1'], self.cont['sha1']])) self.assertEqual(results, [None, {'sha1': cont['sha1'], 'data': cont['data']}]) def test_content_add_same_input(self): cont = self.cont actual_result = self.storage.content_add([cont, cont]) self.assertEqual(actual_result, { 'content:add': 1, 'content:add:bytes': cont['length'], 'skipped_content:add': 0 }) def test_content_add_different_input(self): cont = self.cont cont2 = self.cont2 actual_result = self.storage.content_add([cont, cont2]) self.assertEqual(actual_result, { 'content:add': 2, 'content:add:bytes': cont['length'] + cont2['length'], 'skipped_content:add': 0 }) def test_content_add_again(self): actual_result = self.storage.content_add([self.cont]) self.assertEqual(actual_result, { 'content:add': 1, 'content:add:bytes': self.cont['length'], 'skipped_content:add': 0 }) actual_result = self.storage.content_add([self.cont, self.cont2]) self.assertEqual(actual_result, { 'content:add': 1, 'content:add:bytes': self.cont2['length'], 'skipped_content:add': 0 }) self.assertEqual(len(self.storage.content_find(self.cont)), 1) self.assertEqual(len(self.storage.content_find(self.cont2)), 1) def test_content_add_db(self): cont = self.cont actual_result = self.storage.content_add([cont]) self.assertEqual(actual_result, { 'content:add': 1, 'content:add:bytes': cont['length'], 'skipped_content:add': 0 }) if hasattr(self.storage, 'objstorage'): self.assertIn(cont['sha1'], self.storage.objstorage) self.cursor.execute('SELECT sha1, sha1_git, sha256, length, status' ' FROM content WHERE sha1 = %s', (cont['sha1'],)) datum = self.cursor.fetchone() self.assertEqual( (datum[0].tobytes(), datum[1].tobytes(), datum[2].tobytes(), datum[3], datum[4]), (cont['sha1'], cont['sha1_git'], cont['sha256'], cont['length'], 'visible')) expected_cont = cont.copy() del expected_cont['data'] journal_objects = list(self.journal_writer.objects) for (obj_type, obj) in journal_objects: del obj['ctime'] self.assertEqual(journal_objects, [('content', expected_cont)]) def test_content_add_collision(self): cont1 = self.cont # create (corrupted) content with same sha1{,_git} but != sha256 cont1b = cont1.copy() sha256_array = bytearray(cont1b['sha256']) sha256_array[0] += 1 cont1b['sha256'] = bytes(sha256_array) with self.assertRaises(HashCollision) as cm: self.storage.content_add([cont1, cont1b]) self.assertIn(cm.exception.args[0], ['sha1', 'sha1_git', 'blake2s256']) def test_content_add_metadata(self): cont = self.cont.copy() del cont['data'] cont['ctime'] = datetime.datetime.now() actual_result = self.storage.content_add_metadata([cont]) self.assertEqual(actual_result, { 'content:add': 1, 'skipped_content:add': 0 }) expected_cont = cont.copy() del expected_cont['ctime'] self.assertEqual( list(self.storage.content_get_metadata([cont['sha1']])), [expected_cont]) self.assertEqual(list(self.journal_writer.objects), [('content', cont)]) def test_content_add_metadata_same_input(self): cont = self.cont.copy() del cont['data'] cont['ctime'] = datetime.datetime.now() actual_result = self.storage.content_add_metadata([cont, cont]) self.assertEqual(actual_result, { 'content:add': 1, 'skipped_content:add': 0 }) def test_content_add_metadata_different_input(self): cont = self.cont.copy() del cont['data'] cont['ctime'] = datetime.datetime.now() cont2 = self.cont2.copy() del cont2['data'] cont2['ctime'] = datetime.datetime.now() actual_result = self.storage.content_add_metadata([cont, cont2]) self.assertEqual(actual_result, { 'content:add': 2, 'skipped_content:add': 0 }) def test_content_add_metadata_db(self): cont = self.cont.copy() del cont['data'] cont['ctime'] = datetime.datetime.now() actual_result = self.storage.content_add_metadata([cont]) self.assertEqual(actual_result, { 'content:add': 1, 'skipped_content:add': 0 }) if hasattr(self.storage, 'objstorage'): self.assertNotIn(cont['sha1'], self.storage.objstorage) self.cursor.execute('SELECT sha1, sha1_git, sha256, length, status' ' FROM content WHERE sha1 = %s', (cont['sha1'],)) datum = self.cursor.fetchone() self.assertEqual( (datum[0].tobytes(), datum[1].tobytes(), datum[2].tobytes(), datum[3], datum[4]), (cont['sha1'], cont['sha1_git'], cont['sha256'], cont['length'], 'visible')) self.assertEqual(list(self.journal_writer.objects), [('content', cont)]) def test_content_add_metadata_collision(self): cont1 = self.cont.copy() del cont1['data'] cont1['ctime'] = datetime.datetime.now() # create (corrupted) content with same sha1{,_git} but != sha256 cont1b = cont1.copy() sha256_array = bytearray(cont1b['sha256']) sha256_array[0] += 1 cont1b['sha256'] = bytes(sha256_array) with self.assertRaises(HashCollision) as cm: self.storage.content_add_metadata([cont1, cont1b]) self.assertIn(cm.exception.args[0], ['sha1', 'sha1_git', 'blake2s256']) def test_skipped_content_add_db(self): cont = self.skipped_cont.copy() cont2 = self.skipped_cont2.copy() cont2['blake2s256'] = None actual_result = self.storage.content_add([cont, cont, cont2]) self.assertEqual(actual_result, { 'content:add': 0, 'content:add:bytes': 0, 'skipped_content:add': 2, }) self.cursor.execute('SELECT sha1, sha1_git, sha256, blake2s256, ' 'length, status, reason ' 'FROM skipped_content ORDER BY sha1_git') datums = self.cursor.fetchall() self.assertEqual(2, len(datums)) datum = datums[0] self.assertEqual( (datum[0].tobytes(), datum[1].tobytes(), datum[2].tobytes(), datum[3].tobytes(), datum[4], datum[5], datum[6]), (cont['sha1'], cont['sha1_git'], cont['sha256'], cont['blake2s256'], cont['length'], 'absent', 'Content too long') ) datum2 = datums[1] self.assertEqual( (datum2[0].tobytes(), datum2[1].tobytes(), datum2[2].tobytes(), datum2[3], datum2[4], datum2[5], datum2[6]), (cont2['sha1'], cont2['sha1_git'], cont2['sha256'], cont2['blake2s256'], cont2['length'], 'absent', 'Content too long') ) def test_skipped_content_add(self): cont = self.skipped_cont.copy() cont2 = self.skipped_cont2.copy() cont2['blake2s256'] = None missing = list(self.storage.skipped_content_missing([cont, cont2])) self.assertEqual(len(missing), 2, missing) actual_result = self.storage.content_add([cont, cont, cont2]) self.assertEqual(actual_result, { 'content:add': 0, 'content:add:bytes': 0, 'skipped_content:add': 2, }) missing = list(self.storage.skipped_content_missing([cont, cont2])) self.assertEqual(missing, []) @pytest.mark.property_based @settings(deadline=None) # this test is very slow @given(strategies.sets( elements=strategies.sampled_from( ['sha256', 'sha1_git', 'blake2s256']), min_size=0)) def test_content_missing(self, algos): algos |= {'sha1'} cont2 = self.cont2 missing_cont = self.missing_cont self.storage.content_add([cont2]) test_contents = [cont2] missing_per_hash = defaultdict(list) for i in range(256): test_content = missing_cont.copy() for hash in algos: test_content[hash] = bytes([i]) + test_content[hash][1:] missing_per_hash[hash].append(test_content[hash]) test_contents.append(test_content) self.assertCountEqual( self.storage.content_missing(test_contents), missing_per_hash['sha1'] ) for hash in algos: self.assertCountEqual( self.storage.content_missing(test_contents, key_hash=hash), missing_per_hash[hash] ) @pytest.mark.property_based @given(strategies.sets( elements=strategies.sampled_from( ['sha256', 'sha1_git', 'blake2s256']), min_size=0)) def test_content_missing_unknown_algo(self, algos): algos |= {'sha1'} cont2 = self.cont2 missing_cont = self.missing_cont self.storage.content_add([cont2]) test_contents = [cont2] missing_per_hash = defaultdict(list) for i in range(16): test_content = missing_cont.copy() for hash in algos: test_content[hash] = bytes([i]) + test_content[hash][1:] missing_per_hash[hash].append(test_content[hash]) test_content['nonexisting_algo'] = b'\x00' test_contents.append(test_content) self.assertCountEqual( self.storage.content_missing(test_contents), missing_per_hash['sha1'] ) for hash in algos: self.assertCountEqual( self.storage.content_missing(test_contents, key_hash=hash), missing_per_hash[hash] ) def test_content_missing_per_sha1(self): # given cont2 = self.cont2 missing_cont = self.missing_cont self.storage.content_add([cont2]) # when gen = self.storage.content_missing_per_sha1([cont2['sha1'], missing_cont['sha1']]) # then self.assertEqual(list(gen), [missing_cont['sha1']]) def test_content_get_metadata(self): cont1 = self.cont.copy() cont2 = self.cont2.copy() self.storage.content_add([cont1, cont2]) gen = self.storage.content_get_metadata([cont1['sha1'], cont2['sha1']]) # we only retrieve the metadata cont1.pop('data') cont2.pop('data') self.assertCountEqual(list(gen), [cont1, cont2]) def test_content_get_metadata_missing_sha1(self): cont1 = self.cont.copy() cont2 = self.cont2.copy() missing_cont = self.missing_cont.copy() self.storage.content_add([cont1, cont2]) gen = self.storage.content_get_metadata([missing_cont['sha1']]) # All the metadata keys are None missing_cont.pop('data') for key in list(missing_cont): if key != 'sha1': missing_cont[key] = None self.assertEqual(list(gen), [missing_cont]) @staticmethod def _transform_entries(dir_, *, prefix=b''): for ent in dir_['entries']: yield { 'dir_id': dir_['id'], 'type': ent['type'], 'target': ent['target'], 'name': prefix + ent['name'], 'perms': ent['perms'], 'status': None, 'sha1': None, 'sha1_git': None, 'sha256': None, 'length': None, } def test_directory_add(self): init_missing = list(self.storage.directory_missing([self.dir['id']])) self.assertEqual([self.dir['id']], init_missing) actual_result = self.storage.directory_add([self.dir]) self.assertEqual(actual_result, {'directory:add': 1}) self.assertEqual(list(self.journal_writer.objects), [('directory', self.dir)]) actual_data = list(self.storage.directory_ls(self.dir['id'])) expected_data = list(self._transform_entries(self.dir)) self.assertCountEqual(expected_data, actual_data) after_missing = list(self.storage.directory_missing([self.dir['id']])) self.assertEqual([], after_missing) def test_directory_add_validation(self): dir_ = copy.deepcopy(self.dir) dir_['entries'][0]['type'] = 'foobar' with self.assertRaisesRegex(ValueError, 'type.*foobar'): self.storage.directory_add([dir_]) dir_ = copy.deepcopy(self.dir) del dir_['entries'][0]['target'] with self.assertRaisesRegex( (TypeError, psycopg2.errors.NotNullViolation), 'target'): self.storage.directory_add([dir_]) def test_directory_get_recursive(self): init_missing = list(self.storage.directory_missing([self.dir['id']])) self.assertEqual([self.dir['id']], init_missing) actual_result = self.storage.directory_add( [self.dir, self.dir2, self.dir3]) self.assertEqual(actual_result, {'directory:add': 3}) self.assertEqual(list(self.journal_writer.objects), [('directory', self.dir), ('directory', self.dir2), ('directory', self.dir3)]) # List directory containing a file and an unknown subdirectory actual_data = list(self.storage.directory_ls( self.dir['id'], recursive=True)) expected_data = list(self._transform_entries(self.dir)) self.assertCountEqual(expected_data, actual_data) # List directory containing a file and an unknown subdirectory actual_data = list(self.storage.directory_ls( self.dir2['id'], recursive=True)) expected_data = list(self._transform_entries(self.dir2)) self.assertCountEqual(expected_data, actual_data) # List directory containing a known subdirectory, entries should # be both those of the directory and of the subdir actual_data = list(self.storage.directory_ls( self.dir3['id'], recursive=True)) expected_data = list(itertools.chain( self._transform_entries(self.dir3), self._transform_entries(self.dir, prefix=b'subdir/'))) self.assertCountEqual(expected_data, actual_data) def test_directory_get_non_recursive(self): init_missing = list(self.storage.directory_missing([self.dir['id']])) self.assertEqual([self.dir['id']], init_missing) actual_result = self.storage.directory_add( [self.dir, self.dir2, self.dir3]) self.assertEqual(actual_result, {'directory:add': 3}) self.assertEqual(list(self.journal_writer.objects), [('directory', self.dir), ('directory', self.dir2), ('directory', self.dir3)]) # List directory containing a file and an unknown subdirectory actual_data = list(self.storage.directory_ls(self.dir['id'])) expected_data = list(self._transform_entries(self.dir)) self.assertCountEqual(expected_data, actual_data) # List directory contaiining a single file actual_data = list(self.storage.directory_ls(self.dir2['id'])) expected_data = list(self._transform_entries(self.dir2)) self.assertCountEqual(expected_data, actual_data) # List directory containing a known subdirectory, entries should # only be those of the parent directory, not of the subdir actual_data = list(self.storage.directory_ls(self.dir3['id'])) expected_data = list(self._transform_entries(self.dir3)) self.assertCountEqual(expected_data, actual_data) def test_directory_entry_get_by_path(self): # given init_missing = list(self.storage.directory_missing([self.dir3['id']])) self.assertEqual([self.dir3['id']], init_missing) actual_result = self.storage.directory_add([self.dir3, self.dir4]) self.assertEqual(actual_result, {'directory:add': 2}) expected_entries = [ { 'dir_id': self.dir3['id'], 'name': b'foo', 'type': 'file', 'target': self.cont['sha1_git'], 'sha1': None, 'sha1_git': None, 'sha256': None, 'status': None, 'perms': from_disk.DentryPerms.content, 'length': None, }, { 'dir_id': self.dir3['id'], 'name': b'subdir', 'type': 'dir', 'target': self.dir['id'], 'sha1': None, 'sha1_git': None, 'sha256': None, 'status': None, 'perms': from_disk.DentryPerms.directory, 'length': None, }, { 'dir_id': self.dir3['id'], 'name': b'hello', 'type': 'file', 'target': b'12345678901234567890', 'sha1': None, 'sha1_git': None, 'sha256': None, 'status': None, 'perms': from_disk.DentryPerms.content, 'length': None, }, ] # when (all must be found here) for entry, expected_entry in zip(self.dir3['entries'], expected_entries): actual_entry = self.storage.directory_entry_get_by_path( self.dir3['id'], [entry['name']]) self.assertEqual(actual_entry, expected_entry) # same, but deeper for entry, expected_entry in zip(self.dir3['entries'], expected_entries): actual_entry = self.storage.directory_entry_get_by_path( self.dir4['id'], [b'subdir1', entry['name']]) expected_entry = expected_entry.copy() expected_entry['name'] = b'subdir1/' + expected_entry['name'] self.assertEqual(actual_entry, expected_entry) # when (nothing should be found here since self.dir is not persisted.) for entry in self.dir['entries']: actual_entry = self.storage.directory_entry_get_by_path( self.dir['id'], [entry['name']]) self.assertIsNone(actual_entry) def test_revision_add(self): init_missing = self.storage.revision_missing([self.revision['id']]) self.assertEqual([self.revision['id']], list(init_missing)) actual_result = self.storage.revision_add([self.revision]) self.assertEqual(actual_result, {'revision:add': 1}) end_missing = self.storage.revision_missing([self.revision['id']]) self.assertEqual([], list(end_missing)) self.assertEqual(list(self.journal_writer.objects), [('revision', self.revision)]) # already there so nothing added actual_result = self.storage.revision_add([self.revision]) self.assertEqual(actual_result, {'revision:add': 0}) + def test_revision_add_validation(self): + rev = copy.deepcopy(self.revision) + rev['date']['offset'] = 2**16 + + with self.assertRaisesRegex( + (ValueError, psycopg2.errors.NumericValueOutOfRange), + 'offset'): + self.storage.revision_add([rev]) + + rev = copy.deepcopy(self.revision) + rev['committer_date']['offset'] = 2**16 + + with self.assertRaisesRegex( + (ValueError, psycopg2.errors.NumericValueOutOfRange), + 'offset'): + self.storage.revision_add([rev]) + + rev = copy.deepcopy(self.revision) + rev['type'] = 'foobar' + + with self.assertRaisesRegex( + (ValueError, psycopg2.errors.InvalidTextRepresentation), + '(?i)type'): + self.storage.revision_add([rev]) + def test_revision_add_name_clash(self): revision1 = self.revision.copy() revision2 = self.revision2.copy() revision1['author'] = { 'fullname': b'John Doe ', 'name': b'John Doe', 'email': b'john.doe@example.com' } revision2['author'] = { 'fullname': b'John Doe ', 'name': b'John Doe ', 'email': b'john.doe@example.com ' } actual_result = self.storage.revision_add([revision1, revision2]) self.assertEqual(actual_result, {'revision:add': 2}) def test_revision_log(self): # given # self.revision4 -is-child-of-> self.revision3 self.storage.revision_add([self.revision3, self.revision4]) # when actual_results = list(self.storage.revision_log( [self.revision4['id']])) # hack: ids generated for actual_result in actual_results: if 'id' in actual_result['author']: del actual_result['author']['id'] if 'id' in actual_result['committer']: del actual_result['committer']['id'] self.assertEqual(len(actual_results), 2) # rev4 -child-> rev3 self.assertEqual(actual_results[0], self.normalize_entity(self.revision4)) self.assertEqual(actual_results[1], self.normalize_entity(self.revision3)) self.assertEqual(list(self.journal_writer.objects), [('revision', self.revision3), ('revision', self.revision4)]) def test_revision_log_with_limit(self): # given # self.revision4 -is-child-of-> self.revision3 self.storage.revision_add([self.revision3, self.revision4]) actual_results = list(self.storage.revision_log( [self.revision4['id']], 1)) # hack: ids generated for actual_result in actual_results: if 'id' in actual_result['author']: del actual_result['author']['id'] if 'id' in actual_result['committer']: del actual_result['committer']['id'] self.assertEqual(len(actual_results), 1) self.assertEqual(actual_results[0], self.revision4) def test_revision_log_unknown_revision(self): rev_log = list(self.storage.revision_log([self.revision['id']])) self.assertEqual(rev_log, []) @staticmethod def _short_revision(revision): return [revision['id'], revision['parents']] def test_revision_shortlog(self): # given # self.revision4 -is-child-of-> self.revision3 self.storage.revision_add([self.revision3, self.revision4]) # when actual_results = list(self.storage.revision_shortlog( [self.revision4['id']])) self.assertEqual(len(actual_results), 2) # rev4 -child-> rev3 self.assertEqual(list(actual_results[0]), self._short_revision(self.revision4)) self.assertEqual(list(actual_results[1]), self._short_revision(self.revision3)) def test_revision_shortlog_with_limit(self): # given # self.revision4 -is-child-of-> self.revision3 self.storage.revision_add([self.revision3, self.revision4]) actual_results = list(self.storage.revision_shortlog( [self.revision4['id']], 1)) self.assertEqual(len(actual_results), 1) self.assertEqual(list(actual_results[0]), self._short_revision(self.revision4)) def test_revision_get(self): self.storage.revision_add([self.revision]) actual_revisions = list(self.storage.revision_get( [self.revision['id'], self.revision2['id']])) # when if 'id' in actual_revisions[0]['author']: del actual_revisions[0]['author']['id'] # hack: ids are generated if 'id' in actual_revisions[0]['committer']: del actual_revisions[0]['committer']['id'] self.assertEqual(len(actual_revisions), 2) self.assertEqual(actual_revisions[0], self.normalize_entity(self.revision)) self.assertIsNone(actual_revisions[1]) def test_revision_get_no_parents(self): self.storage.revision_add([self.revision3]) get = list(self.storage.revision_get([self.revision3['id']])) self.assertEqual(len(get), 1) self.assertEqual(get[0]['parents'], []) # no parents on this one def test_release_add(self): init_missing = self.storage.release_missing([self.release['id'], self.release2['id']]) self.assertEqual([self.release['id'], self.release2['id']], list(init_missing)) actual_result = self.storage.release_add([self.release, self.release2]) self.assertEqual(actual_result, {'release:add': 2}) end_missing = self.storage.release_missing([self.release['id'], self.release2['id']]) self.assertEqual([], list(end_missing)) self.assertEqual(list(self.journal_writer.objects), [('release', self.release), ('release', self.release2)]) # already present so nothing added actual_result = self.storage.release_add([self.release, self.release2]) self.assertEqual(actual_result, {'release:add': 0}) def test_release_add_no_author_date(self): release = self.release.copy() release['author'] = None release['date'] = None actual_result = self.storage.release_add([release]) self.assertEqual(actual_result, {'release:add': 1}) end_missing = self.storage.release_missing([self.release['id']]) self.assertEqual([], list(end_missing)) self.assertEqual(list(self.journal_writer.objects), [('release', release)]) + def test_release_add_validation(self): + rel = copy.deepcopy(self.release) + rel['date']['offset'] = 2**16 + + with self.assertRaisesRegex( + (ValueError, psycopg2.errors.NumericValueOutOfRange), + 'offset'): + self.storage.release_add([rel]) + + rel = copy.deepcopy(self.release) + rel['author'] = None + + with self.assertRaisesRegex( + (ValueError, psycopg2.errors.CheckViolation), + 'date'): + self.storage.release_add([rel]) + def test_release_add_name_clash(self): release1 = self.release.copy() release2 = self.release2.copy() release1['author'] = { 'fullname': b'John Doe ', 'name': b'John Doe', 'email': b'john.doe@example.com' } release2['author'] = { 'fullname': b'John Doe ', 'name': b'John Doe ', 'email': b'john.doe@example.com ' } actual_result = self.storage.release_add([release1, release2]) self.assertEqual(actual_result, {'release:add': 2}) def test_release_get(self): # given self.storage.release_add([self.release, self.release2]) # when actual_releases = list(self.storage.release_get([self.release['id'], self.release2['id']])) # then for actual_release in actual_releases: if 'id' in actual_release['author']: del actual_release['author']['id'] # hack: ids are generated self.assertEqual([self.normalize_entity(self.release), self.normalize_entity(self.release2)], [actual_releases[0], actual_releases[1]]) unknown_releases = \ list(self.storage.release_get([self.release3['id']])) self.assertIsNone(unknown_releases[0]) def test_origin_add_one(self): origin0 = self.storage.origin_get(self.origin) self.assertIsNone(origin0) id = self.storage.origin_add_one(self.origin) actual_origin = self.storage.origin_get({'url': self.origin['url']}) if self._test_origin_ids: self.assertEqual(actual_origin['id'], id) self.assertEqual(actual_origin['url'], self.origin['url']) id2 = self.storage.origin_add_one(self.origin) self.assertEqual(id, id2) def test_origin_add(self): origin0 = self.storage.origin_get([self.origin])[0] self.assertIsNone(origin0) origin1, origin2 = self.storage.origin_add([self.origin, self.origin2]) actual_origin = self.storage.origin_get([{ 'url': self.origin['url'], }])[0] if self._test_origin_ids: self.assertEqual(actual_origin['id'], origin1['id']) self.assertEqual(actual_origin['url'], origin1['url']) actual_origin2 = self.storage.origin_get([{ 'url': self.origin2['url'], }])[0] if self._test_origin_ids: self.assertEqual(actual_origin2['id'], origin2['id']) self.assertEqual(actual_origin2['url'], origin2['url']) if 'id' in actual_origin: del actual_origin['id'] del actual_origin2['id'] self.assertEqual(list(self.journal_writer.objects), [('origin', actual_origin), ('origin', actual_origin2)]) def test_origin_add_twice(self): add1 = self.storage.origin_add([self.origin, self.origin2]) add2 = self.storage.origin_add([self.origin, self.origin2]) self.assertEqual(add1, add2) def test_origin_get_legacy(self): self.assertIsNone(self.storage.origin_get(self.origin)) id = self.storage.origin_add_one(self.origin) # lookup per url (returns id) actual_origin0 = self.storage.origin_get( {'url': self.origin['url']}) if self._test_origin_ids: self.assertEqual(actual_origin0['id'], id) self.assertEqual(actual_origin0['url'], self.origin['url']) # lookup per id (returns dict) if self._test_origin_ids: actual_origin1 = self.storage.origin_get({'id': id}) self.assertEqual(actual_origin1, {'id': id, 'type': self.origin['type'], 'url': self.origin['url']}) def test_origin_get(self): self.assertIsNone(self.storage.origin_get(self.origin)) origin_id = self.storage.origin_add_one(self.origin) # lookup per url (returns id) actual_origin0 = self.storage.origin_get( [{'url': self.origin['url']}]) self.assertEqual(len(actual_origin0), 1, actual_origin0) if self._test_origin_ids: self.assertEqual(actual_origin0[0]['id'], origin_id) self.assertEqual(actual_origin0[0]['url'], self.origin['url']) if self._test_origin_ids: # lookup per id (returns dict) actual_origin1 = self.storage.origin_get([{'id': origin_id}]) self.assertEqual(len(actual_origin1), 1, actual_origin1) self.assertEqual(actual_origin1[0], {'id': origin_id, 'type': self.origin['type'], 'url': self.origin['url']}) def test_origin_get_consistency(self): self.assertIsNone(self.storage.origin_get(self.origin)) id = self.storage.origin_add_one(self.origin) with self.assertRaises(ValueError): self.storage.origin_get([ {'url': self.origin['url']}, {'id': id}]) def test_origin_search_single_result(self): found_origins = list(self.storage.origin_search(self.origin['url'])) self.assertEqual(len(found_origins), 0) found_origins = list(self.storage.origin_search(self.origin['url'], regexp=True)) self.assertEqual(len(found_origins), 0) self.storage.origin_add_one(self.origin) origin_data = { 'type': self.origin['type'], 'url': self.origin['url']} found_origins = list(self.storage.origin_search(self.origin['url'])) self.assertEqual(len(found_origins), 1) if 'id' in found_origins[0]: del found_origins[0]['id'] self.assertEqual(found_origins[0], origin_data) found_origins = list(self.storage.origin_search( '.' + self.origin['url'][1:-1] + '.', regexp=True)) self.assertEqual(len(found_origins), 1) if 'id' in found_origins[0]: del found_origins[0]['id'] self.assertEqual(found_origins[0], origin_data) self.storage.origin_add_one(self.origin2) origin2_data = { 'type': self.origin2['type'], 'url': self.origin2['url']} found_origins = list(self.storage.origin_search(self.origin2['url'])) self.assertEqual(len(found_origins), 1) if 'id' in found_origins[0]: del found_origins[0]['id'] self.assertEqual(found_origins[0], origin2_data) found_origins = list(self.storage.origin_search( '.' + self.origin2['url'][1:-1] + '.', regexp=True)) self.assertEqual(len(found_origins), 1) if 'id' in found_origins[0]: del found_origins[0]['id'] self.assertEqual(found_origins[0], origin2_data) def test_origin_search_no_regexp(self): self.storage.origin_add_one(self.origin) self.storage.origin_add_one(self.origin2) origin = self.storage.origin_get({'url': self.origin['url']}) origin2 = self.storage.origin_get({'url': self.origin2['url']}) # no pagination found_origins = list(self.storage.origin_search('/')) self.assertEqual(len(found_origins), 2) # offset=0 found_origins0 = list(self.storage.origin_search('/', offset=0, limit=1)) # noqa self.assertEqual(len(found_origins0), 1) self.assertIn(found_origins0[0], [origin, origin2]) # offset=1 found_origins1 = list(self.storage.origin_search('/', offset=1, limit=1)) # noqa self.assertEqual(len(found_origins1), 1) self.assertIn(found_origins1[0], [origin, origin2]) # check both origins were returned self.assertCountEqual(found_origins0 + found_origins1, [origin, origin2]) def test_origin_search_regexp_substring(self): self.storage.origin_add_one(self.origin) self.storage.origin_add_one(self.origin2) origin = self.storage.origin_get({'url': self.origin['url']}) origin2 = self.storage.origin_get({'url': self.origin2['url']}) # no pagination found_origins = list(self.storage.origin_search('/', regexp=True)) self.assertEqual(len(found_origins), 2) # offset=0 found_origins0 = list(self.storage.origin_search('/', offset=0, limit=1, regexp=True)) # noqa self.assertEqual(len(found_origins0), 1) self.assertIn(found_origins0[0], [origin, origin2]) # offset=1 found_origins1 = list(self.storage.origin_search('/', offset=1, limit=1, regexp=True)) # noqa self.assertEqual(len(found_origins1), 1) self.assertIn(found_origins1[0], [origin, origin2]) # check both origins were returned self.assertCountEqual(found_origins0 + found_origins1, [origin, origin2]) def test_origin_search_regexp_fullstring(self): self.storage.origin_add_one(self.origin) self.storage.origin_add_one(self.origin2) origin = self.storage.origin_get({'url': self.origin['url']}) origin2 = self.storage.origin_get({'url': self.origin2['url']}) # no pagination found_origins = list(self.storage.origin_search('.*/.*', regexp=True)) self.assertEqual(len(found_origins), 2) # offset=0 found_origins0 = list(self.storage.origin_search('.*/.*', offset=0, limit=1, regexp=True)) # noqa self.assertEqual(len(found_origins0), 1) self.assertIn(found_origins0[0], [origin, origin2]) # offset=1 found_origins1 = list(self.storage.origin_search('.*/.*', offset=1, limit=1, regexp=True)) # noqa self.assertEqual(len(found_origins1), 1) self.assertIn(found_origins1[0], [origin, origin2]) # check both origins were returned self.assertCountEqual( found_origins0 + found_origins1, [origin, origin2]) @given(strategies.booleans()) def test_origin_visit_add(self, use_url): if not self._test_origin_ids and not use_url: return self.reset_storage() # given self.assertIsNone(self.storage.origin_get([self.origin2])[0]) origin_id = self.storage.origin_add_one(self.origin2) self.assertIsNotNone(origin_id) origin_id_or_url = self.origin2['url'] if use_url else origin_id # when origin_visit1 = self.storage.origin_visit_add( origin_id_or_url, type='git', date=self.date_visit2) actual_origin_visits = list(self.storage.origin_visit_get( origin_id_or_url)) self.assertEqual(actual_origin_visits, [{ 'origin': origin_id, 'date': self.date_visit2, 'visit': origin_visit1['visit'], 'type': 'git', 'status': 'ongoing', 'metadata': None, 'snapshot': None, }]) expected_origin = self.origin2.copy() data = { 'origin': expected_origin, 'date': self.date_visit2, 'visit': origin_visit1['visit'], 'type': 'git', 'status': 'ongoing', 'metadata': None, 'snapshot': None, } self.assertEqual(list(self.journal_writer.objects), [('origin', expected_origin), ('origin_visit', data)]) def test_origin_visit_get__unknown_origin(self): self.assertEqual([], list(self.storage.origin_visit_get('foo'))) if self._test_origin_ids: self.assertEqual([], list(self.storage.origin_visit_get(10))) @given(strategies.booleans()) def test_origin_visit_add_default_type(self, use_url): if not self._test_origin_ids and not use_url: return self.reset_storage() # given self.assertIsNone(self.storage.origin_get([self.origin2])[0]) origin_id = self.storage.origin_add_one(self.origin2) origin_id_or_url = self.origin2['url'] if use_url else origin_id self.assertIsNotNone(origin_id) # when origin_visit1 = self.storage.origin_visit_add( origin_id_or_url, date=self.date_visit2) origin_visit2 = self.storage.origin_visit_add( origin_id_or_url, date='2018-01-01 23:00:00+00') # then self.assertEqual(origin_visit1['origin'], origin_id) self.assertIsNotNone(origin_visit1['visit']) actual_origin_visits = list(self.storage.origin_visit_get( origin_id_or_url)) self.assertEqual(actual_origin_visits, [ { 'origin': origin_id, 'date': self.date_visit2, 'visit': origin_visit1['visit'], 'type': 'hg', 'status': 'ongoing', 'metadata': None, 'snapshot': None, }, { 'origin': origin_id, 'date': self.date_visit3, 'visit': origin_visit2['visit'], 'type': 'hg', 'status': 'ongoing', 'metadata': None, 'snapshot': None, }, ]) expected_origin = self.origin2.copy() data1 = { 'origin': expected_origin, 'date': self.date_visit2, 'visit': origin_visit1['visit'], 'type': 'hg', 'status': 'ongoing', 'metadata': None, 'snapshot': None, } data2 = { 'origin': expected_origin, 'date': self.date_visit3, 'visit': origin_visit2['visit'], 'type': 'hg', 'status': 'ongoing', 'metadata': None, 'snapshot': None, } self.assertEqual(list(self.journal_writer.objects), [('origin', expected_origin), ('origin_visit', data1), ('origin_visit', data2)]) @given(strategies.booleans()) def test_origin_visit_update(self, use_url): if not self._test_origin_ids and not use_url: return self.reset_storage() # given origin_id = self.storage.origin_add_one(self.origin) origin_id2 = self.storage.origin_add_one(self.origin2) origin2_id_or_url = self.origin2['url'] if use_url else origin_id2 origin_id_or_url = self.origin['url'] if use_url else origin_id origin_visit1 = self.storage.origin_visit_add( origin_id_or_url, date=self.date_visit2) origin_visit2 = self.storage.origin_visit_add( origin_id_or_url, date=self.date_visit3) origin_visit3 = self.storage.origin_visit_add( origin2_id_or_url, date=self.date_visit3) # when visit1_metadata = { 'contents': 42, 'directories': 22, } self.storage.origin_visit_update( origin_id_or_url, origin_visit1['visit'], status='full', metadata=visit1_metadata) self.storage.origin_visit_update( origin2_id_or_url, origin_visit3['visit'], status='partial') # then actual_origin_visits = list(self.storage.origin_visit_get( origin_id_or_url)) self.assertEqual(actual_origin_visits, [{ 'origin': origin_visit2['origin'], 'date': self.date_visit2, 'visit': origin_visit1['visit'], 'type': self.origin['type'], 'status': 'full', 'metadata': visit1_metadata, 'snapshot': None, }, { 'origin': origin_visit2['origin'], 'date': self.date_visit3, 'visit': origin_visit2['visit'], 'type': self.origin['type'], 'status': 'ongoing', 'metadata': None, 'snapshot': None, }]) actual_origin_visits_bis = list(self.storage.origin_visit_get( origin_id_or_url, limit=1)) self.assertEqual(actual_origin_visits_bis, [{ 'origin': origin_visit2['origin'], 'date': self.date_visit2, 'visit': origin_visit1['visit'], 'type': self.origin['type'], 'status': 'full', 'metadata': visit1_metadata, 'snapshot': None, }]) actual_origin_visits_ter = list(self.storage.origin_visit_get( origin_id_or_url, last_visit=origin_visit1['visit'])) self.assertEqual(actual_origin_visits_ter, [{ 'origin': origin_visit2['origin'], 'date': self.date_visit3, 'visit': origin_visit2['visit'], 'type': self.origin['type'], 'status': 'ongoing', 'metadata': None, 'snapshot': None, }]) actual_origin_visits2 = list(self.storage.origin_visit_get( origin2_id_or_url)) self.assertEqual(actual_origin_visits2, [{ 'origin': origin_visit3['origin'], 'date': self.date_visit3, 'visit': origin_visit3['visit'], 'type': self.origin2['type'], 'status': 'partial', 'metadata': None, 'snapshot': None, }]) expected_origin = self.origin.copy() expected_origin2 = self.origin2.copy() data1 = { 'origin': expected_origin, 'date': self.date_visit2, 'visit': origin_visit1['visit'], 'type': self.origin['type'], 'status': 'ongoing', 'metadata': None, 'snapshot': None, } data2 = { 'origin': expected_origin, 'date': self.date_visit3, 'visit': origin_visit2['visit'], 'type': self.origin['type'], 'status': 'ongoing', 'metadata': None, 'snapshot': None, } data3 = { 'origin': expected_origin2, 'date': self.date_visit3, 'visit': origin_visit3['visit'], 'type': self.origin2['type'], 'status': 'ongoing', 'metadata': None, 'snapshot': None, } data4 = { 'origin': expected_origin, 'date': self.date_visit2, 'visit': origin_visit1['visit'], 'type': self.origin['type'], 'metadata': visit1_metadata, 'status': 'full', 'snapshot': None, } data5 = { 'origin': expected_origin2, 'date': self.date_visit3, 'visit': origin_visit3['visit'], 'type': self.origin2['type'], 'status': 'partial', 'metadata': None, 'snapshot': None, } self.assertEqual(list(self.journal_writer.objects), [('origin', expected_origin), ('origin', expected_origin2), ('origin_visit', data1), ('origin_visit', data2), ('origin_visit', data3), ('origin_visit', data4), ('origin_visit', data5)]) def test_origin_visit_find_by_date(self): # given self.storage.origin_add_one(self.origin) self.storage.origin_visit_add( self.origin['url'], date=self.date_visit2) origin_visit2 = self.storage.origin_visit_add( self.origin['url'], date=self.date_visit3) origin_visit3 = self.storage.origin_visit_add( self.origin['url'], date=self.date_visit2) # Simple case visit = self.storage.origin_visit_find_by_date( self.origin['url'], self.date_visit3) self.assertEqual(visit['visit'], origin_visit2['visit']) # There are two visits at the same date, the latest must be returned visit = self.storage.origin_visit_find_by_date( self.origin['url'], self.date_visit2) self.assertEqual(visit['visit'], origin_visit3['visit']) def test_origin_visit_find_by_date__unknown_origin(self): self.storage.origin_visit_find_by_date('foo', self.date_visit2) @settings(deadline=None) @given(strategies.booleans()) def test_origin_visit_update_missing_snapshot(self, use_url): if not self._test_origin_ids and not use_url: return self.reset_storage() # given origin_id = self.storage.origin_add_one(self.origin) origin_id_or_url = self.origin['url'] if use_url else origin_id origin_visit = self.storage.origin_visit_add( origin_id_or_url, date=self.date_visit1) # when self.storage.origin_visit_update( origin_id_or_url, origin_visit['visit'], snapshot=self.snapshot['id']) # then actual_origin_visit = self.storage.origin_visit_get_by( origin_id_or_url, origin_visit['visit']) self.assertEqual(actual_origin_visit['snapshot'], self.snapshot['id']) # when self.storage.snapshot_add([self.snapshot]) self.assertEqual(actual_origin_visit['snapshot'], self.snapshot['id']) @settings(deadline=None) @given(strategies.booleans()) def test_origin_visit_get_by(self, use_url): if not self._test_origin_ids and not use_url: return self.reset_storage() origin_id = self.storage.origin_add_one(self.origin) origin_id2 = self.storage.origin_add_one(self.origin2) origin_id_or_url = self.origin['url'] if use_url else origin_id origin2_id_or_url = self.origin2['url'] if use_url else origin_id2 origin_visit1 = self.storage.origin_visit_add( origin_id_or_url, date=self.date_visit2) self.storage.snapshot_add([self.snapshot]) self.storage.origin_visit_update( origin_id_or_url, origin_visit1['visit'], snapshot=self.snapshot['id']) # Add some other {origin, visit} entries self.storage.origin_visit_add( origin_id_or_url, date=self.date_visit3) self.storage.origin_visit_add( origin2_id_or_url, date=self.date_visit3) # when visit1_metadata = { 'contents': 42, 'directories': 22, } self.storage.origin_visit_update( origin_id_or_url, origin_visit1['visit'], status='full', metadata=visit1_metadata) expected_origin_visit = origin_visit1.copy() expected_origin_visit.update({ 'origin': origin_id, 'visit': origin_visit1['visit'], 'date': self.date_visit2, 'type': self.origin['type'], 'metadata': visit1_metadata, 'status': 'full', 'snapshot': self.snapshot['id'], }) # when actual_origin_visit1 = self.storage.origin_visit_get_by( origin_id_or_url, origin_visit1['visit']) # then self.assertEqual(actual_origin_visit1, expected_origin_visit) def test_origin_visit_get_by__unknown_origin(self): if self._test_origin_ids: self.assertIsNone(self.storage.origin_visit_get_by(2, 10)) self.assertIsNone(self.storage.origin_visit_get_by('foo', 10)) @given(strategies.booleans()) def test_origin_visit_upsert_new(self, use_url): if not self._test_origin_ids and not use_url: return self.reset_storage() # given self.assertIsNone(self.storage.origin_get([self.origin2])[0]) origin_id = self.storage.origin_add_one(self.origin2) origin_id_or_url = self.origin2['url'] if use_url else origin_id self.assertIsNotNone(origin_id) # when self.storage.origin_visit_upsert([ { 'origin': self.origin2, 'date': self.date_visit2, 'visit': 123, 'type': self.origin2['type'], 'status': 'full', 'metadata': None, 'snapshot': None, }, { 'origin': self.origin2, 'date': '2018-01-01 23:00:00+00', 'visit': 1234, 'type': self.origin2['type'], 'status': 'full', 'metadata': None, 'snapshot': None, }, ]) # then actual_origin_visits = list(self.storage.origin_visit_get( origin_id_or_url)) self.assertEqual(actual_origin_visits, [ { 'origin': origin_id, 'date': self.date_visit2, 'visit': 123, 'type': self.origin2['type'], 'status': 'full', 'metadata': None, 'snapshot': None, }, { 'origin': origin_id, 'date': self.date_visit3, 'visit': 1234, 'type': self.origin2['type'], 'status': 'full', 'metadata': None, 'snapshot': None, }, ]) expected_origin = self.origin2.copy() data1 = { 'origin': expected_origin, 'date': self.date_visit2, 'visit': 123, 'type': self.origin2['type'], 'status': 'full', 'metadata': None, 'snapshot': None, } data2 = { 'origin': expected_origin, 'date': self.date_visit3, 'visit': 1234, 'type': self.origin2['type'], 'status': 'full', 'metadata': None, 'snapshot': None, } self.assertEqual(list(self.journal_writer.objects), [('origin', expected_origin), ('origin_visit', data1), ('origin_visit', data2)]) @settings(deadline=None) @given(strategies.booleans()) def test_origin_visit_upsert_existing(self, use_url): if not self._test_origin_ids and not use_url: return self.reset_storage() # given self.assertIsNone(self.storage.origin_get([self.origin2])[0]) origin_id = self.storage.origin_add_one(self.origin2) origin_id_or_url = self.origin2['url'] if use_url else origin_id self.assertIsNotNone(origin_id) # when origin_visit1 = self.storage.origin_visit_add( origin_id_or_url, date=self.date_visit2) self.storage.origin_visit_upsert([{ 'origin': self.origin2, 'date': self.date_visit2, 'visit': origin_visit1['visit'], 'type': self.origin2['type'], 'status': 'full', 'metadata': None, 'snapshot': None, }]) # then self.assertEqual(origin_visit1['origin'], origin_id) self.assertIsNotNone(origin_visit1['visit']) actual_origin_visits = list(self.storage.origin_visit_get( origin_id_or_url)) self.assertEqual(actual_origin_visits, [{ 'origin': origin_id, 'date': self.date_visit2, 'visit': origin_visit1['visit'], 'type': self.origin2['type'], 'status': 'full', 'metadata': None, 'snapshot': None, }]) expected_origin = self.origin2.copy() data1 = { 'origin': expected_origin, 'date': self.date_visit2, 'visit': origin_visit1['visit'], 'type': self.origin2['type'], 'status': 'ongoing', 'metadata': None, 'snapshot': None, } data2 = { 'origin': expected_origin, 'date': self.date_visit2, 'visit': origin_visit1['visit'], 'type': self.origin2['type'], 'status': 'full', 'metadata': None, 'snapshot': None, } self.assertEqual(list(self.journal_writer.objects), [('origin', expected_origin), ('origin_visit', data1), ('origin_visit', data2)]) def test_origin_visit_get_by_no_result(self): if self._test_origin_ids: actual_origin_visit = self.storage.origin_visit_get_by( 10, 999) self.assertIsNone(actual_origin_visit) self.storage.origin_add([self.origin]) actual_origin_visit = self.storage.origin_visit_get_by( self.origin['url'], 999) self.assertIsNone(actual_origin_visit) @settings(deadline=None) # this test is very slow @given(strategies.booleans()) def test_origin_visit_get_latest(self, use_url): if not self._test_origin_ids and not use_url: return self.reset_storage() origin_id = self.storage.origin_add_one(self.origin) origin_id_or_url = self.origin['url'] if use_url else origin_id origin_url = self.origin['url'] origin_visit1 = self.storage.origin_visit_add( origin_id_or_url, self.date_visit1) visit1_id = origin_visit1['visit'] origin_visit2 = self.storage.origin_visit_add( origin_id_or_url, self.date_visit2) visit2_id = origin_visit2['visit'] # Add a visit with the same date as the previous one origin_visit3 = self.storage.origin_visit_add( origin_id_or_url, self.date_visit2) visit3_id = origin_visit3['visit'] origin_visit1 = self.storage.origin_visit_get_by(origin_url, visit1_id) origin_visit2 = self.storage.origin_visit_get_by(origin_url, visit2_id) origin_visit3 = self.storage.origin_visit_get_by(origin_url, visit3_id) # Two visits, both with no snapshot self.assertEqual( origin_visit3, self.storage.origin_visit_get_latest(origin_url)) self.assertIsNone( self.storage.origin_visit_get_latest(origin_url, require_snapshot=True)) # Add snapshot to visit1; require_snapshot=True makes it return # visit1 and require_snapshot=False still returns visit2 self.storage.snapshot_add([self.complete_snapshot]) self.storage.origin_visit_update( origin_id_or_url, visit1_id, snapshot=self.complete_snapshot['id']) self.assertEqual( {**origin_visit1, 'snapshot': self.complete_snapshot['id']}, self.storage.origin_visit_get_latest( origin_url, require_snapshot=True) ) self.assertEqual( origin_visit3, self.storage.origin_visit_get_latest(origin_url) ) # Status filter: all three visits are status=ongoing, so no visit # returned self.assertIsNone( self.storage.origin_visit_get_latest( origin_url, allowed_statuses=['full']) ) # Mark the first visit as completed and check status filter again self.storage.origin_visit_update( origin_id_or_url, visit1_id, status='full') self.assertEqual( { **origin_visit1, 'snapshot': self.complete_snapshot['id'], 'status': 'full'}, self.storage.origin_visit_get_latest( origin_url, allowed_statuses=['full']), ) self.assertEqual( origin_visit3, self.storage.origin_visit_get_latest(origin_url), ) # Add snapshot to visit2 and check that the new snapshot is returned self.storage.snapshot_add([self.empty_snapshot]) self.storage.origin_visit_update( origin_id_or_url, visit2_id, snapshot=self.empty_snapshot['id']) self.assertEqual( {**origin_visit2, 'snapshot': self.empty_snapshot['id']}, self.storage.origin_visit_get_latest( origin_url, require_snapshot=True), ) self.assertEqual( origin_visit3, self.storage.origin_visit_get_latest(origin_url), ) # Check that the status filter is still working self.assertEqual( { **origin_visit1, 'snapshot': self.complete_snapshot['id'], 'status': 'full'}, self.storage.origin_visit_get_latest( origin_url, allowed_statuses=['full']), ) # Add snapshot to visit3 (same date as visit2) self.storage.snapshot_add([self.complete_snapshot]) self.storage.origin_visit_update( origin_id_or_url, visit3_id, snapshot=self.complete_snapshot['id']) self.assertEqual( { **origin_visit1, 'snapshot': self.complete_snapshot['id'], 'status': 'full'}, self.storage.origin_visit_get_latest( origin_url, allowed_statuses=['full']), ) self.assertEqual( { **origin_visit1, 'snapshot': self.complete_snapshot['id'], 'status': 'full'}, self.storage.origin_visit_get_latest( origin_url, allowed_statuses=['full'], require_snapshot=True), ) self.assertEqual( {**origin_visit3, 'snapshot': self.complete_snapshot['id']}, self.storage.origin_visit_get_latest( origin_url), ) self.assertEqual( {**origin_visit3, 'snapshot': self.complete_snapshot['id']}, self.storage.origin_visit_get_latest( origin_url, require_snapshot=True), ) def test_person_fullname_unicity(self): # given (person injection through revisions for example) revision = self.revision # create a revision with same committer fullname but wo name and email revision2 = copy.deepcopy(self.revision2) revision2['committer'] = dict(revision['committer']) revision2['committer']['email'] = None revision2['committer']['name'] = None self.storage.revision_add([revision]) self.storage.revision_add([revision2]) # when getting added revisions revisions = list( self.storage.revision_get([revision['id'], revision2['id']])) # then # check committers are the same self.assertEqual(revisions[0]['committer'], revisions[1]['committer']) def test_snapshot_add_get_empty(self): origin_id = self.storage.origin_add_one(self.origin) origin_visit1 = self.storage.origin_visit_add(origin_id, self.date_visit1) visit_id = origin_visit1['visit'] actual_result = self.storage.snapshot_add([self.empty_snapshot]) self.assertEqual(actual_result, {'snapshot:add': 1}) self.storage.origin_visit_update( origin_id, visit_id, snapshot=self.empty_snapshot['id']) by_id = self.storage.snapshot_get(self.empty_snapshot['id']) self.assertEqual(by_id, {**self.empty_snapshot, 'next_branch': None}) by_ov = self.storage.snapshot_get_by_origin_visit(origin_id, visit_id) self.assertEqual(by_ov, {**self.empty_snapshot, 'next_branch': None}) expected_origin = self.origin.copy() data1 = { 'origin': expected_origin, 'date': self.date_visit1, 'visit': origin_visit1['visit'], 'type': self.origin['type'], 'status': 'ongoing', 'metadata': None, 'snapshot': None, } data2 = { 'origin': expected_origin, 'date': self.date_visit1, 'visit': origin_visit1['visit'], 'type': self.origin['type'], 'status': 'ongoing', 'metadata': None, 'snapshot': self.empty_snapshot['id'], } self.assertEqual(list(self.journal_writer.objects), [('origin', expected_origin), ('origin_visit', data1), ('snapshot', self.empty_snapshot), ('origin_visit', data2)]) def test_snapshot_add_get_complete(self): origin_id = self.storage.origin_add_one(self.origin) origin_visit1 = self.storage.origin_visit_add(origin_id, self.date_visit1) visit_id = origin_visit1['visit'] actual_result = self.storage.snapshot_add([self.complete_snapshot]) self.storage.origin_visit_update( origin_id, visit_id, snapshot=self.complete_snapshot['id']) self.assertEqual(actual_result, {'snapshot:add': 1}) by_id = self.storage.snapshot_get(self.complete_snapshot['id']) self.assertEqual(by_id, {**self.complete_snapshot, 'next_branch': None}) by_ov = self.storage.snapshot_get_by_origin_visit(origin_id, visit_id) self.assertEqual(by_ov, {**self.complete_snapshot, 'next_branch': None}) def test_snapshot_add_many(self): actual_result = self.storage.snapshot_add( [self.snapshot, self.complete_snapshot]) self.assertEqual(actual_result, {'snapshot:add': 2}) self.assertEqual( {**self.complete_snapshot, 'next_branch': None}, self.storage.snapshot_get(self.complete_snapshot['id'])) self.assertEqual( {**self.snapshot, 'next_branch': None}, self.storage.snapshot_get(self.snapshot['id'])) def test_snapshot_add_many_incremental(self): actual_result = self.storage.snapshot_add([self.complete_snapshot]) self.assertEqual(actual_result, {'snapshot:add': 1}) actual_result2 = self.storage.snapshot_add( [self.snapshot, self.complete_snapshot]) self.assertEqual(actual_result2, {'snapshot:add': 1}) self.assertEqual( {**self.complete_snapshot, 'next_branch': None}, self.storage.snapshot_get(self.complete_snapshot['id'])) self.assertEqual( {**self.snapshot, 'next_branch': None}, self.storage.snapshot_get(self.snapshot['id'])) def test_snapshot_add_count_branches(self): origin_id = self.storage.origin_add_one(self.origin) origin_visit1 = self.storage.origin_visit_add(origin_id, self.date_visit1) visit_id = origin_visit1['visit'] actual_result = self.storage.snapshot_add([self.complete_snapshot]) self.storage.origin_visit_update( origin_id, visit_id, snapshot=self.complete_snapshot['id']) self.assertEqual(actual_result, {'snapshot:add': 1}) snp_id = self.complete_snapshot['id'] snp_size = self.storage.snapshot_count_branches(snp_id) expected_snp_size = { 'alias': 1, 'content': 1, 'directory': 2, 'release': 1, 'revision': 1, 'snapshot': 1, None: 1 } self.assertEqual(snp_size, expected_snp_size) def test_snapshot_add_get_paginated(self): origin_id = self.storage.origin_add_one(self.origin) origin_visit1 = self.storage.origin_visit_add(origin_id, self.date_visit1) visit_id = origin_visit1['visit'] self.storage.snapshot_add([self.complete_snapshot]) self.storage.origin_visit_update( origin_id, visit_id, snapshot=self.complete_snapshot['id']) snp_id = self.complete_snapshot['id'] branches = self.complete_snapshot['branches'] branch_names = list(sorted(branches)) # Test branch_from snapshot = self.storage.snapshot_get_branches(snp_id, branches_from=b'release') rel_idx = branch_names.index(b'release') expected_snapshot = { 'id': snp_id, 'branches': { name: branches[name] for name in branch_names[rel_idx:] }, 'next_branch': None, } self.assertEqual(snapshot, expected_snapshot) # Test branches_count snapshot = self.storage.snapshot_get_branches(snp_id, branches_count=1) expected_snapshot = { 'id': snp_id, 'branches': { branch_names[0]: branches[branch_names[0]], }, 'next_branch': b'content', } self.assertEqual(snapshot, expected_snapshot) # test branch_from + branches_count snapshot = self.storage.snapshot_get_branches( snp_id, branches_from=b'directory', branches_count=3) dir_idx = branch_names.index(b'directory') expected_snapshot = { 'id': snp_id, 'branches': { name: branches[name] for name in branch_names[dir_idx:dir_idx + 3] }, 'next_branch': branch_names[dir_idx + 3], } self.assertEqual(snapshot, expected_snapshot) def test_snapshot_add_get_filtered(self): origin_id = self.storage.origin_add_one(self.origin) origin_visit1 = self.storage.origin_visit_add(origin_id, self.date_visit1) visit_id = origin_visit1['visit'] self.storage.snapshot_add([self.complete_snapshot]) self.storage.origin_visit_update( origin_id, visit_id, snapshot=self.complete_snapshot['id']) snp_id = self.complete_snapshot['id'] branches = self.complete_snapshot['branches'] snapshot = self.storage.snapshot_get_branches( snp_id, target_types=['release', 'revision']) expected_snapshot = { 'id': snp_id, 'branches': { name: tgt for name, tgt in branches.items() if tgt and tgt['target_type'] in ['release', 'revision'] }, 'next_branch': None, } self.assertEqual(snapshot, expected_snapshot) snapshot = self.storage.snapshot_get_branches(snp_id, target_types=['alias']) expected_snapshot = { 'id': snp_id, 'branches': { name: tgt for name, tgt in branches.items() if tgt and tgt['target_type'] == 'alias' }, 'next_branch': None, } self.assertEqual(snapshot, expected_snapshot) def test_snapshot_add_get_filtered_and_paginated(self): origin_id = self.storage.origin_add_one(self.origin) origin_visit1 = self.storage.origin_visit_add(origin_id, self.date_visit1) visit_id = origin_visit1['visit'] self.storage.snapshot_add([self.complete_snapshot]) self.storage.origin_visit_update( origin_id, visit_id, snapshot=self.complete_snapshot['id']) snp_id = self.complete_snapshot['id'] branches = self.complete_snapshot['branches'] branch_names = list(sorted(branches)) # Test branch_from snapshot = self.storage.snapshot_get_branches( snp_id, target_types=['directory', 'release'], branches_from=b'directory2') expected_snapshot = { 'id': snp_id, 'branches': { name: branches[name] for name in (b'directory2', b'release') }, 'next_branch': None, } self.assertEqual(snapshot, expected_snapshot) # Test branches_count snapshot = self.storage.snapshot_get_branches( snp_id, target_types=['directory', 'release'], branches_count=1) expected_snapshot = { 'id': snp_id, 'branches': { b'directory': branches[b'directory'] }, 'next_branch': b'directory2', } self.assertEqual(snapshot, expected_snapshot) # Test branches_count snapshot = self.storage.snapshot_get_branches( snp_id, target_types=['directory', 'release'], branches_count=2) expected_snapshot = { 'id': snp_id, 'branches': { name: branches[name] for name in (b'directory', b'directory2') }, 'next_branch': b'release', } self.assertEqual(snapshot, expected_snapshot) # test branch_from + branches_count snapshot = self.storage.snapshot_get_branches( snp_id, target_types=['directory', 'release'], branches_from=b'directory2', branches_count=1) dir_idx = branch_names.index(b'directory2') expected_snapshot = { 'id': snp_id, 'branches': { branch_names[dir_idx]: branches[branch_names[dir_idx]], }, 'next_branch': b'release', } self.assertEqual(snapshot, expected_snapshot) def test_snapshot_add_get(self): origin_id = self.storage.origin_add_one(self.origin) origin_visit1 = self.storage.origin_visit_add(origin_id, self.date_visit1) visit_id = origin_visit1['visit'] self.storage.snapshot_add([self.snapshot]) self.storage.origin_visit_update( origin_id, visit_id, snapshot=self.snapshot['id']) by_id = self.storage.snapshot_get(self.snapshot['id']) self.assertEqual(by_id, {**self.snapshot, 'next_branch': None}) by_ov = self.storage.snapshot_get_by_origin_visit(origin_id, visit_id) self.assertEqual(by_ov, {**self.snapshot, 'next_branch': None}) origin_visit_info = self.storage.origin_visit_get_by(origin_id, visit_id) self.assertEqual(origin_visit_info['snapshot'], self.snapshot['id']) def test_snapshot_add_nonexistent_visit(self): origin_id = self.storage.origin_add_one(self.origin) visit_id = 54164461156 self.journal_writer.objects[:] = [] self.storage.snapshot_add([self.snapshot]) with self.assertRaises(ValueError): self.storage.origin_visit_update( origin_id, visit_id, snapshot=self.snapshot['id']) self.assertEqual(list(self.journal_writer.objects), [ ('snapshot', self.snapshot)]) def test_snapshot_add_twice(self): origin_id = self.storage.origin_add_one(self.origin) origin_visit1 = self.storage.origin_visit_add(origin_id, self.date_visit1) visit1_id = origin_visit1['visit'] self.storage.snapshot_add([self.snapshot]) self.storage.origin_visit_update( origin_id, visit1_id, snapshot=self.snapshot['id']) by_ov1 = self.storage.snapshot_get_by_origin_visit(origin_id, visit1_id) self.assertEqual(by_ov1, {**self.snapshot, 'next_branch': None}) origin_visit2 = self.storage.origin_visit_add(origin_id, self.date_visit2) visit2_id = origin_visit2['visit'] self.storage.snapshot_add([self.snapshot]) self.storage.origin_visit_update( origin_id, visit2_id, snapshot=self.snapshot['id']) by_ov2 = self.storage.snapshot_get_by_origin_visit(origin_id, visit2_id) self.assertEqual(by_ov2, {**self.snapshot, 'next_branch': None}) expected_origin = self.origin.copy() data1 = { 'origin': expected_origin, 'date': self.date_visit1, 'visit': origin_visit1['visit'], 'type': self.origin['type'], 'status': 'ongoing', 'metadata': None, 'snapshot': None, } data2 = { 'origin': expected_origin, 'date': self.date_visit1, 'visit': origin_visit1['visit'], 'type': self.origin['type'], 'status': 'ongoing', 'metadata': None, 'snapshot': self.snapshot['id'], } data3 = { 'origin': expected_origin, 'date': self.date_visit2, 'visit': origin_visit2['visit'], 'type': self.origin['type'], 'status': 'ongoing', 'metadata': None, 'snapshot': None, } data4 = { 'origin': expected_origin, 'date': self.date_visit2, 'visit': origin_visit2['visit'], 'type': self.origin['type'], 'status': 'ongoing', 'metadata': None, 'snapshot': self.snapshot['id'], } self.assertEqual(list(self.journal_writer.objects), [('origin', expected_origin), ('origin_visit', data1), ('snapshot', self.snapshot), ('origin_visit', data2), ('origin_visit', data3), ('origin_visit', data4)]) @settings(deadline=None) # this test is very slow @given(strategies.booleans()) def test_snapshot_get_latest(self, use_url): if not self._test_origin_ids and not use_url: return self.reset_storage() origin_id = self.storage.origin_add_one(self.origin) origin_id_or_url = self.origin['url'] if use_url else origin_id origin_visit1 = self.storage.origin_visit_add(origin_id, self.date_visit1) visit1_id = origin_visit1['visit'] origin_visit2 = self.storage.origin_visit_add(origin_id, self.date_visit2) visit2_id = origin_visit2['visit'] # Add a visit with the same date as the previous one origin_visit3 = self.storage.origin_visit_add(origin_id, self.date_visit2) visit3_id = origin_visit3['visit'] # Two visits, both with no snapshot: latest snapshot is None self.assertIsNone(self.storage.snapshot_get_latest( origin_id_or_url)) # Add snapshot to visit1, latest snapshot = visit 1 snapshot self.storage.snapshot_add([self.complete_snapshot]) self.storage.origin_visit_update( origin_id, visit1_id, snapshot=self.complete_snapshot['id']) self.assertEqual({**self.complete_snapshot, 'next_branch': None}, self.storage.snapshot_get_latest( origin_id_or_url)) # Status filter: all three visits are status=ongoing, so no snapshot # returned self.assertIsNone( self.storage.snapshot_get_latest( origin_id_or_url, allowed_statuses=['full']) ) # Mark the first visit as completed and check status filter again self.storage.origin_visit_update(origin_id, visit1_id, status='full') self.assertEqual( {**self.complete_snapshot, 'next_branch': None}, self.storage.snapshot_get_latest( origin_id_or_url, allowed_statuses=['full']), ) # Add snapshot to visit2 and check that the new snapshot is returned self.storage.snapshot_add([self.empty_snapshot]) self.storage.origin_visit_update( origin_id, visit2_id, snapshot=self.empty_snapshot['id']) self.assertEqual({**self.empty_snapshot, 'next_branch': None}, self.storage.snapshot_get_latest(origin_id)) # Check that the status filter is still working self.assertEqual( {**self.complete_snapshot, 'next_branch': None}, self.storage.snapshot_get_latest( origin_id_or_url, allowed_statuses=['full']), ) # Add snapshot to visit3 (same date as visit2) and check that # the new snapshot is returned self.storage.snapshot_add([self.complete_snapshot]) self.storage.origin_visit_update( origin_id, visit3_id, snapshot=self.complete_snapshot['id']) self.assertEqual({**self.complete_snapshot, 'next_branch': None}, self.storage.snapshot_get_latest( origin_id_or_url)) @given(strategies.booleans()) def test_snapshot_get_latest__missing_snapshot(self, use_url): if not self._test_origin_ids and not use_url: return self.reset_storage() # Origin does not exist self.assertIsNone(self.storage.snapshot_get_latest( self.origin['url'] if use_url else 999)) origin_id = self.storage.origin_add_one(self.origin) origin_id_or_url = self.origin['url'] if use_url else origin_id origin_visit1 = self.storage.origin_visit_add( origin_id_or_url, self.date_visit1) visit1_id = origin_visit1['visit'] origin_visit2 = self.storage.origin_visit_add( origin_id_or_url, self.date_visit2) visit2_id = origin_visit2['visit'] # Two visits, both with no snapshot: latest snapshot is None self.assertIsNone(self.storage.snapshot_get_latest( origin_id_or_url)) # Add unknown snapshot to visit1, check that the inconsistency is # detected self.storage.origin_visit_update( origin_id_or_url, visit1_id, snapshot=self.complete_snapshot['id']) with self.assertRaises(ValueError): self.storage.snapshot_get_latest( origin_id_or_url) # Status filter: both visits are status=ongoing, so no snapshot # returned self.assertIsNone( self.storage.snapshot_get_latest( origin_id_or_url, allowed_statuses=['full']) ) # Mark the first visit as completed and check status filter again self.storage.origin_visit_update( origin_id_or_url, visit1_id, status='full') with self.assertRaises(ValueError): self.storage.snapshot_get_latest( origin_id_or_url, allowed_statuses=['full']), # Actually add the snapshot and check status filter again self.storage.snapshot_add([self.complete_snapshot]) self.assertEqual( {**self.complete_snapshot, 'next_branch': None}, self.storage.snapshot_get_latest( origin_id_or_url) ) # Add unknown snapshot to visit2 and check that the inconsistency # is detected self.storage.origin_visit_update( origin_id_or_url, visit2_id, snapshot=self.snapshot['id']) with self.assertRaises(ValueError): self.storage.snapshot_get_latest( origin_id_or_url) # Actually add that snapshot and check that the new one is returned self.storage.snapshot_add([self.snapshot]) self.assertEqual( {**self.snapshot, 'next_branch': None}, self.storage.snapshot_get_latest( origin_id_or_url) ) def test_stat_counters(self): expected_keys = ['content', 'directory', 'origin', 'revision'] # Initially, all counters are 0 self.storage.refresh_stat_counters() counters = self.storage.stat_counters() self.assertTrue(set(expected_keys) <= set(counters)) for key in expected_keys: self.assertEqual(counters[key], 0) # Add a content. Only the content counter should increase. self.storage.content_add([self.cont]) self.storage.refresh_stat_counters() counters = self.storage.stat_counters() self.assertTrue(set(expected_keys) <= set(counters)) for key in expected_keys: if key != 'content': self.assertEqual(counters[key], 0) self.assertEqual(counters['content'], 1) # Add other objects. Check their counter increased as well. self.storage.origin_add_one(self.origin2) origin_visit1 = self.storage.origin_visit_add( self.origin2['url'], date=self.date_visit2) self.storage.snapshot_add([self.snapshot]) self.storage.origin_visit_update( self.origin2['url'], origin_visit1['visit'], snapshot=self.snapshot['id']) self.storage.directory_add([self.dir]) self.storage.revision_add([self.revision]) self.storage.release_add([self.release]) self.storage.refresh_stat_counters() counters = self.storage.stat_counters() self.assertEqual(counters['content'], 1) self.assertEqual(counters['directory'], 1) self.assertEqual(counters['snapshot'], 1) self.assertEqual(counters['origin'], 1) self.assertEqual(counters['origin_visit'], 1) self.assertEqual(counters['revision'], 1) self.assertEqual(counters['release'], 1) self.assertEqual(counters['snapshot'], 1) if 'person' in counters: self.assertEqual(counters['person'], 3) def test_content_find_ctime(self): cont = self.cont.copy() del cont['data'] now = datetime.datetime.now(tz=datetime.timezone.utc) cont['ctime'] = now self.storage.content_add_metadata([cont]) actually_present = self.storage.content_find({'sha1': cont['sha1']}) # check ctime up to one second dt = actually_present[0]['ctime'] - now self.assertLessEqual(abs(dt.total_seconds()), 1, dt) del actually_present[0]['ctime'] self.assertEqual(actually_present[0], { 'sha1': cont['sha1'], 'sha256': cont['sha256'], 'sha1_git': cont['sha1_git'], 'blake2s256': cont['blake2s256'], 'length': cont['length'], 'status': 'visible' }) def test_content_find_with_present_content(self): # 1. with something to find cont = self.cont self.storage.content_add([cont, self.cont2]) actually_present = self.storage.content_find( {'sha1': cont['sha1']} ) self.assertEqual(1, len(actually_present)) actually_present[0].pop('ctime') self.assertEqual(actually_present[0], { 'sha1': cont['sha1'], 'sha256': cont['sha256'], 'sha1_git': cont['sha1_git'], 'blake2s256': cont['blake2s256'], 'length': cont['length'], 'status': 'visible' }) # 2. with something to find actually_present = self.storage.content_find( {'sha1_git': cont['sha1_git']}) self.assertEqual(1, len(actually_present)) actually_present[0].pop('ctime') self.assertEqual(actually_present[0], { 'sha1': cont['sha1'], 'sha256': cont['sha256'], 'sha1_git': cont['sha1_git'], 'blake2s256': cont['blake2s256'], 'length': cont['length'], 'status': 'visible' }) # 3. with something to find actually_present = self.storage.content_find( {'sha256': cont['sha256']}) self.assertEqual(1, len(actually_present)) actually_present[0].pop('ctime') self.assertEqual(actually_present[0], { 'sha1': cont['sha1'], 'sha256': cont['sha256'], 'sha1_git': cont['sha1_git'], 'blake2s256': cont['blake2s256'], 'length': cont['length'], 'status': 'visible' }) # 4. with something to find actually_present = self.storage.content_find({ 'sha1': cont['sha1'], 'sha1_git': cont['sha1_git'], 'sha256': cont['sha256'], 'blake2s256': cont['blake2s256'], }) self.assertEqual(1, len(actually_present)) actually_present[0].pop('ctime') self.assertEqual(actually_present[0], { 'sha1': cont['sha1'], 'sha256': cont['sha256'], 'sha1_git': cont['sha1_git'], 'blake2s256': cont['blake2s256'], 'length': cont['length'], 'status': 'visible' }) def test_content_find_with_non_present_content(self): # 1. with something that does not exist missing_cont = self.missing_cont actually_present = self.storage.content_find( {'sha1': missing_cont['sha1']}) self.assertEqual(actually_present, []) # 2. with something that does not exist actually_present = self.storage.content_find( {'sha1_git': missing_cont['sha1_git']}) self.assertEqual(actually_present, []) # 3. with something that does not exist actually_present = self.storage.content_find( {'sha256': missing_cont['sha256']}) self.assertEqual(actually_present, []) def test_content_find_with_duplicate_input(self): cont1 = self.cont duplicate_cont = cont1.copy() # Create fake data with colliding sha256 and blake2s256 sha1_array = bytearray(duplicate_cont['sha1']) sha1_array[0] += 1 duplicate_cont['sha1'] = bytes(sha1_array) sha1git_array = bytearray(duplicate_cont['sha1_git']) sha1git_array[0] += 1 duplicate_cont['sha1_git'] = bytes(sha1git_array) # Inject the data self.storage.content_add([cont1, duplicate_cont]) finder = {'blake2s256': duplicate_cont['blake2s256'], 'sha256': duplicate_cont['sha256']} actual_result = list(self.storage.content_find(finder)) cont1.pop('data') duplicate_cont.pop('data') actual_result[0].pop('ctime') actual_result[1].pop('ctime') expected_result = [ cont1, duplicate_cont ] self.assertCountEqual(expected_result, actual_result) def test_content_find_with_duplicate_sha256(self): cont1 = self.cont duplicate_cont = cont1.copy() # Create fake data with colliding sha256 and blake2s256 sha1_array = bytearray(duplicate_cont['sha1']) sha1_array[0] += 1 duplicate_cont['sha1'] = bytes(sha1_array) sha1git_array = bytearray(duplicate_cont['sha1_git']) sha1git_array[0] += 1 duplicate_cont['sha1_git'] = bytes(sha1git_array) blake2s256_array = bytearray(duplicate_cont['blake2s256']) blake2s256_array[0] += 1 duplicate_cont['blake2s256'] = bytes(blake2s256_array) self.storage.content_add([cont1, duplicate_cont]) finder = { 'sha256': duplicate_cont['sha256'] } actual_result = list(self.storage.content_find(finder)) cont1.pop('data') duplicate_cont.pop('data') actual_result[0].pop('ctime') actual_result[1].pop('ctime') expected_result = [ cont1, duplicate_cont ] self.assertCountEqual(expected_result, actual_result) # Find with both sha256 and blake2s256 finder = { 'sha256': duplicate_cont['sha256'], 'blake2s256': duplicate_cont['blake2s256'] } actual_result = list(self.storage.content_find(finder)) actual_result[0].pop('ctime') expected_result = [ duplicate_cont ] self.assertCountEqual(expected_result, actual_result) def test_content_find_with_duplicate_blake2s256(self): cont1 = self.cont duplicate_cont = cont1.copy() # Create fake data with colliding sha256 and blake2s256 sha1_array = bytearray(duplicate_cont['sha1']) sha1_array[0] += 1 duplicate_cont['sha1'] = bytes(sha1_array) sha1git_array = bytearray(duplicate_cont['sha1_git']) sha1git_array[0] += 1 duplicate_cont['sha1_git'] = bytes(sha1git_array) sha256_array = bytearray(duplicate_cont['sha256']) sha256_array[0] += 1 duplicate_cont['sha256'] = bytes(sha256_array) self.storage.content_add([cont1, duplicate_cont]) finder = { 'blake2s256': duplicate_cont['blake2s256'] } actual_result = list(self.storage.content_find(finder)) cont1.pop('data') duplicate_cont.pop('data') actual_result[0].pop('ctime') actual_result[1].pop('ctime') expected_result = [ cont1, duplicate_cont ] self.assertCountEqual(expected_result, actual_result) # Find with both sha256 and blake2s256 finder = { 'sha256': duplicate_cont['sha256'], 'blake2s256': duplicate_cont['blake2s256'] } actual_result = list(self.storage.content_find(finder)) actual_result[0].pop('ctime') expected_result = [ duplicate_cont ] self.assertCountEqual(expected_result, actual_result) def test_content_find_bad_input(self): # 1. with bad input with self.assertRaises(ValueError): self.storage.content_find({}) # empty is bad # 2. with bad input with self.assertRaises(ValueError): self.storage.content_find( {'unknown-sha1': 'something'}) # not the right key def test_object_find_by_sha1_git(self): sha1_gits = [b'00000000000000000000'] expected = { b'00000000000000000000': [], } self.storage.content_add([self.cont]) sha1_gits.append(self.cont['sha1_git']) expected[self.cont['sha1_git']] = [{ 'sha1_git': self.cont['sha1_git'], 'type': 'content', 'id': self.cont['sha1'], }] self.storage.directory_add([self.dir]) sha1_gits.append(self.dir['id']) expected[self.dir['id']] = [{ 'sha1_git': self.dir['id'], 'type': 'directory', 'id': self.dir['id'], }] self.storage.revision_add([self.revision]) sha1_gits.append(self.revision['id']) expected[self.revision['id']] = [{ 'sha1_git': self.revision['id'], 'type': 'revision', 'id': self.revision['id'], }] self.storage.release_add([self.release]) sha1_gits.append(self.release['id']) expected[self.release['id']] = [{ 'sha1_git': self.release['id'], 'type': 'release', 'id': self.release['id'], }] ret = self.storage.object_find_by_sha1_git(sha1_gits) for val in ret.values(): for obj in val: if 'object_id' in obj: del obj['object_id'] self.assertEqual(expected, ret) def test_tool_add(self): tool = { 'name': 'some-unknown-tool', 'version': 'some-version', 'configuration': {"debian-package": "some-package"}, } actual_tool = self.storage.tool_get(tool) self.assertIsNone(actual_tool) # does not exist # add it actual_tools = self.storage.tool_add([tool]) self.assertEqual(len(actual_tools), 1) actual_tool = actual_tools[0] self.assertIsNotNone(actual_tool) # now it exists new_id = actual_tool.pop('id') self.assertEqual(actual_tool, tool) actual_tools2 = self.storage.tool_add([tool]) actual_tool2 = actual_tools2[0] self.assertIsNotNone(actual_tool2) # now it exists new_id2 = actual_tool2.pop('id') self.assertEqual(new_id, new_id2) self.assertEqual(actual_tool, actual_tool2) def test_tool_add_multiple(self): tool = { 'name': 'some-unknown-tool', 'version': 'some-version', 'configuration': {"debian-package": "some-package"}, } actual_tools = list(self.storage.tool_add([tool])) self.assertEqual(len(actual_tools), 1) new_tools = [tool, { 'name': 'yet-another-tool', 'version': 'version', 'configuration': {}, }] actual_tools = self.storage.tool_add(new_tools) self.assertEqual(len(actual_tools), 2) # order not guaranteed, so we iterate over results to check for tool in actual_tools: _id = tool.pop('id') self.assertIsNotNone(_id) self.assertIn(tool, new_tools) def test_tool_get_missing(self): tool = { 'name': 'unknown-tool', 'version': '3.1.0rc2-31-ga2cbb8c', 'configuration': {"command_line": "nomossa "}, } actual_tool = self.storage.tool_get(tool) self.assertIsNone(actual_tool) def test_tool_metadata_get_missing_context(self): tool = { 'name': 'swh-metadata-translator', 'version': '0.0.1', 'configuration': {"context": "unknown-context"}, } actual_tool = self.storage.tool_get(tool) self.assertIsNone(actual_tool) def test_tool_metadata_get(self): tool = { 'name': 'swh-metadata-translator', 'version': '0.0.1', 'configuration': {"type": "local", "context": "npm"}, } tools = self.storage.tool_add([tool]) expected_tool = tools[0] # when actual_tool = self.storage.tool_get(tool) # then self.assertEqual(expected_tool, actual_tool) def test_metadata_provider_get(self): # given no_provider = self.storage.metadata_provider_get(6459456445615) self.assertIsNone(no_provider) # when provider_id = self.storage.metadata_provider_add( self.provider['name'], self.provider['type'], self.provider['url'], self.provider['metadata']) actual_provider = self.storage.metadata_provider_get(provider_id) expected_provider = { 'provider_name': self.provider['name'], 'provider_url': self.provider['url'] } # then del actual_provider['id'] self.assertTrue(actual_provider, expected_provider) def test_metadata_provider_get_by(self): # given no_provider = self.storage.metadata_provider_get_by({ 'provider_name': self.provider['name'], 'provider_url': self.provider['url'] }) self.assertIsNone(no_provider) # when provider_id = self.storage.metadata_provider_add( self.provider['name'], self.provider['type'], self.provider['url'], self.provider['metadata']) actual_provider = self.storage.metadata_provider_get_by({ 'provider_name': self.provider['name'], 'provider_url': self.provider['url'] }) # then self.assertTrue(provider_id, actual_provider['id']) def test_origin_metadata_add(self): # given origin_id = self.storage.origin_add([self.origin])[0]['id'] origin_metadata0 = list(self.storage.origin_metadata_get_by( origin_id)) self.assertTrue(len(origin_metadata0) == 0) tools = self.storage.tool_add([self.metadata_tool]) tool = tools[0] self.storage.metadata_provider_add( self.provider['name'], self.provider['type'], self.provider['url'], self.provider['metadata']) provider = self.storage.metadata_provider_get_by({ 'provider_name': self.provider['name'], 'provider_url': self.provider['url'] }) # when adding for the same origin 2 metadatas self.storage.origin_metadata_add( origin_id, self.origin_metadata['discovery_date'], provider['id'], tool['id'], self.origin_metadata['metadata']) self.storage.origin_metadata_add( origin_id, '2015-01-01 23:00:00+00', provider['id'], tool['id'], self.origin_metadata2['metadata']) actual_om = list(self.storage.origin_metadata_get_by( origin_id)) # then self.assertCountEqual( [item['origin_id'] for item in actual_om], [origin_id, origin_id]) def test_origin_metadata_get(self): # given origin_id = self.storage.origin_add([self.origin])[0]['id'] origin_id2 = self.storage.origin_add([self.origin2])[0]['id'] self.storage.metadata_provider_add(self.provider['name'], self.provider['type'], self.provider['url'], self.provider['metadata']) provider = self.storage.metadata_provider_get_by({ 'provider_name': self.provider['name'], 'provider_url': self.provider['url'] }) tool = self.storage.tool_add([self.metadata_tool])[0] # when adding for the same origin 2 metadatas self.storage.origin_metadata_add( origin_id, self.origin_metadata['discovery_date'], provider['id'], tool['id'], self.origin_metadata['metadata']) self.storage.origin_metadata_add( origin_id2, self.origin_metadata2['discovery_date'], provider['id'], tool['id'], self.origin_metadata2['metadata']) self.storage.origin_metadata_add( origin_id, self.origin_metadata2['discovery_date'], provider['id'], tool['id'], self.origin_metadata2['metadata']) all_metadatas = list(self.storage.origin_metadata_get_by( origin_id)) metadatas_for_origin2 = list(self.storage.origin_metadata_get_by( origin_id2)) expected_results = [{ 'origin_id': origin_id, 'discovery_date': datetime.datetime( 2017, 1, 1, 23, 0, tzinfo=datetime.timezone.utc), 'metadata': { 'name': 'test_origin_metadata', 'version': '0.0.1' }, 'provider_id': provider['id'], 'provider_name': 'hal', 'provider_type': 'deposit-client', 'provider_url': 'http:///hal/inria', 'tool_id': tool['id'] }, { 'origin_id': origin_id, 'discovery_date': datetime.datetime( 2015, 1, 1, 23, 0, tzinfo=datetime.timezone.utc), 'metadata': { 'name': 'test_origin_metadata', 'version': '0.0.1' }, 'provider_id': provider['id'], 'provider_name': 'hal', 'provider_type': 'deposit-client', 'provider_url': 'http:///hal/inria', 'tool_id': tool['id'] }] # then self.assertEqual(len(all_metadatas), 2) self.assertEqual(len(metadatas_for_origin2), 1) self.assertCountEqual(all_metadatas, expected_results) def test_metadata_provider_add(self): provider = { 'provider_name': 'swMATH', 'provider_type': 'registry', 'provider_url': 'http://www.swmath.org/', 'metadata': { 'email': 'contact@swmath.org', 'license': 'All rights reserved' } } provider['id'] = provider_id = self.storage.metadata_provider_add( **provider) self.assertEqual( provider, self.storage.metadata_provider_get_by({ 'provider_name': 'swMATH', 'provider_url': 'http://www.swmath.org/' })) self.assertEqual( provider, self.storage.metadata_provider_get(provider_id)) def test_origin_metadata_get_by_provider_type(self): # given origin_id = self.storage.origin_add([self.origin])[0]['id'] origin_id2 = self.storage.origin_add([self.origin2])[0]['id'] provider1_id = self.storage.metadata_provider_add( self.provider['name'], self.provider['type'], self.provider['url'], self.provider['metadata']) provider1 = self.storage.metadata_provider_get_by({ 'provider_name': self.provider['name'], 'provider_url': self.provider['url'] }) self.assertEqual(provider1, self.storage.metadata_provider_get(provider1_id)) provider2_id = self.storage.metadata_provider_add( 'swMATH', 'registry', 'http://www.swmath.org/', {'email': 'contact@swmath.org', 'license': 'All rights reserved'}) provider2 = self.storage.metadata_provider_get_by({ 'provider_name': 'swMATH', 'provider_url': 'http://www.swmath.org/' }) self.assertEqual(provider2, self.storage.metadata_provider_get(provider2_id)) # using the only tool now inserted in the data.sql, but for this # provider should be a crawler tool (not yet implemented) tool = self.storage.tool_add([self.metadata_tool])[0] # when adding for the same origin 2 metadatas self.storage.origin_metadata_add( origin_id, self.origin_metadata['discovery_date'], provider1['id'], tool['id'], self.origin_metadata['metadata']) self.storage.origin_metadata_add( origin_id2, self.origin_metadata2['discovery_date'], provider2['id'], tool['id'], self.origin_metadata2['metadata']) provider_type = 'registry' m_by_provider = list(self.storage.origin_metadata_get_by( origin_id2, provider_type)) for item in m_by_provider: if 'id' in item: del item['id'] expected_results = [{ 'origin_id': origin_id2, 'discovery_date': datetime.datetime( 2017, 1, 1, 23, 0, tzinfo=datetime.timezone.utc), 'metadata': { 'name': 'test_origin_metadata', 'version': '0.0.1' }, 'provider_id': provider2['id'], 'provider_name': 'swMATH', 'provider_type': provider_type, 'provider_url': 'http://www.swmath.org/', 'tool_id': tool['id'] }] # then self.assertEqual(len(m_by_provider), 1) self.assertEqual(m_by_provider, expected_results) class CommonPropTestStorage: _test_origin_ids = True def assert_contents_ok(self, expected_contents, actual_contents, keys_to_check={'sha1', 'data'}): """Assert that a given list of contents matches on a given set of keys. """ for k in keys_to_check: expected_list = sorted([c[k] for c in expected_contents]) actual_list = sorted([c[k] for c in actual_contents]) self.assertEqual(actual_list, expected_list) @given(gen_contents(min_size=1, max_size=4)) def test_generate_content_get(self, contents): self.reset_storage() # add contents to storage self.storage.content_add(contents) # input the list of sha1s we want from storage get_sha1s = [c['sha1'] for c in contents] # retrieve contents actual_contents = list(self.storage.content_get(get_sha1s)) self.assert_contents_ok(contents, actual_contents) @given(gen_contents(min_size=1, max_size=4)) def test_generate_content_get_metadata(self, contents): self.reset_storage() # add contents to storage self.storage.content_add(contents) # input the list of sha1s we want from storage get_sha1s = [c['sha1'] for c in contents] # retrieve contents actual_contents = list(self.storage.content_get_metadata(get_sha1s)) self.assertEqual(len(actual_contents), len(contents)) # will check that all contents are retrieved correctly one_content = contents[0] # content_get_metadata does not return data keys_to_check = set(one_content.keys()) - {'data'} self.assert_contents_ok(contents, actual_contents, keys_to_check=keys_to_check) @given(gen_contents(), strategies.binary(min_size=20, max_size=20), strategies.binary(min_size=20, max_size=20)) def test_generate_content_get_range(self, contents, start, end): """content_get_range paginates results if limit exceeded""" self.reset_storage() # add contents to storage self.storage.content_add(contents) actual_result = self.storage.content_get_range(start, end) actual_contents = actual_result['contents'] actual_next = actual_result['next'] self.assertEqual(actual_next, None) expected_contents = [c for c in contents if start <= c['sha1'] <= end] if expected_contents: keys_to_check = set(contents[0].keys()) - {'data'} self.assert_contents_ok(expected_contents, actual_contents, keys_to_check) else: self.assertEqual(actual_contents, []) def test_generate_content_get_range_limit_none(self): """content_get_range call with wrong limit input should fail""" with self.assertRaises(ValueError) as e: self.storage.content_get_range(start=None, end=None, limit=None) self.assertEqual(e.exception.args, ( 'Development error: limit should not be None',)) @given(gen_contents(min_size=1, max_size=4)) def test_generate_content_get_range_no_limit(self, contents): """content_get_range returns contents within range provided""" self.reset_storage() # add contents to storage self.storage.content_add(contents) # input the list of sha1s we want from storage get_sha1s = sorted([c['sha1'] for c in contents]) start = get_sha1s[0] end = get_sha1s[-1] # retrieve contents actual_result = self.storage.content_get_range(start, end) actual_contents = actual_result['contents'] actual_next = actual_result['next'] self.assertEqual(len(contents), len(actual_contents)) self.assertIsNone(actual_next) one_content = contents[0] keys_to_check = set(one_content.keys()) - {'data'} self.assert_contents_ok(contents, actual_contents, keys_to_check) @given(gen_contents(min_size=4, max_size=4)) def test_generate_content_get_range_limit(self, contents): """content_get_range paginates results if limit exceeded""" self.reset_storage() contents_map = {c['sha1']: c for c in contents} # add contents to storage self.storage.content_add(contents) # input the list of sha1s we want from storage get_sha1s = sorted([c['sha1'] for c in contents]) start = get_sha1s[0] end = get_sha1s[-1] # retrieve contents limited to 3 results limited_results = len(contents) - 1 actual_result = self.storage.content_get_range(start, end, limit=limited_results) actual_contents = actual_result['contents'] actual_next = actual_result['next'] self.assertEqual(limited_results, len(actual_contents)) self.assertIsNotNone(actual_next) self.assertEqual(actual_next, get_sha1s[-1]) expected_contents = [contents_map[sha1] for sha1 in get_sha1s[:-1]] keys_to_check = set(contents[0].keys()) - {'data'} self.assert_contents_ok(expected_contents, actual_contents, keys_to_check) # retrieve next part actual_results2 = self.storage.content_get_range(start=end, end=end) actual_contents2 = actual_results2['contents'] actual_next2 = actual_results2['next'] self.assertEqual(1, len(actual_contents2)) self.assertIsNone(actual_next2) self.assert_contents_ok([contents_map[actual_next]], actual_contents2, keys_to_check) def test_origin_get_invalid_id_legacy(self): if self._test_origin_ids: invalid_origin_id = 1 origin_info = self.storage.origin_get({'id': invalid_origin_id}) self.assertIsNone(origin_info) origin_visits = list(self.storage.origin_visit_get( invalid_origin_id)) self.assertEqual(origin_visits, []) def test_origin_get_invalid_id(self): if self._test_origin_ids: origin_info = self.storage.origin_get([{'id': 1}, {'id': 2}]) self.assertEqual(origin_info, [None, None]) origin_visits = list(self.storage.origin_visit_get(1)) self.assertEqual(origin_visits, []) @given(strategies.lists(origins().map(lambda x: x.to_dict()), unique_by=lambda x: x['url'], min_size=6, max_size=15)) def test_origin_get_range(self, new_origins): self.reset_storage() nb_origins = len(new_origins) self.storage.origin_add(new_origins) origin_from = random.randint(1, nb_origins-1) origin_count = random.randint(1, nb_origins - origin_from) actual_origins = list( self.storage.origin_get_range(origin_from=origin_from, origin_count=origin_count)) for origin in actual_origins: del origin['id'] for origin in actual_origins: self.assertIn(origin, new_origins) origin_from = -1 origin_count = 5 origins = list( self.storage.origin_get_range(origin_from=origin_from, origin_count=origin_count)) self.assertEqual(len(origins), origin_count) origin_from = 10000 origins = list( self.storage.origin_get_range(origin_from=origin_from, origin_count=origin_count)) self.assertEqual(len(origins), 0) def test_origin_count(self): new_origins = [ { 'type': 'git', 'url': 'https://github.com/user1/repo1' }, { 'type': 'git', 'url': 'https://github.com/user2/repo1' }, { 'type': 'git', 'url': 'https://github.com/user3/repo1' }, { 'type': 'git', 'url': 'https://gitlab.com/user1/repo1' }, { 'type': 'git', 'url': 'https://gitlab.com/user2/repo1' } ] self.storage.origin_add(new_origins) self.assertEqual(self.storage.origin_count('github'), 3) self.assertEqual(self.storage.origin_count('gitlab'), 2) self.assertEqual( self.storage.origin_count('.*user.*', regexp=True), 5) self.assertEqual( self.storage.origin_count('.*user.*', regexp=False), 0) self.assertEqual( self.storage.origin_count('.*user1.*', regexp=True), 2) self.assertEqual( self.storage.origin_count('.*user1.*', regexp=False), 0) @settings(suppress_health_check=[HealthCheck.too_slow]) @given(strategies.lists(objects(), max_size=2)) def test_add_arbitrary(self, objects): self.reset_storage() for (obj_type, obj) in objects: obj = obj.to_dict() if obj_type == 'origin_visit': origin_id = self.storage.origin_add_one(obj.pop('origin')) if 'visit' in obj: del obj['visit'] self.storage.origin_visit_add( origin_id, obj['date'], obj['type']) else: method = getattr(self.storage, obj_type + '_add') try: method([obj]) except HashCollision: pass @pytest.mark.db class TestLocalStorage(CommonTestStorage, StorageTestDbFixture, unittest.TestCase): """Test the local storage""" # Can only be tested with local storage as you can't mock # datetimes for the remote server @given(strategies.booleans()) def test_fetch_history(self, use_url): if not self._test_origin_ids and not use_url: return self.reset_storage() origin_id = self.storage.origin_add_one(self.origin) origin_id_or_url = self.origin['url'] if use_url else origin_id with patch('datetime.datetime'): datetime.datetime.now.return_value = self.fetch_history_date fetch_history_id = self.storage.fetch_history_start( origin_id_or_url) datetime.datetime.now.assert_called_with(tz=datetime.timezone.utc) with patch('datetime.datetime'): datetime.datetime.now.return_value = self.fetch_history_end self.storage.fetch_history_end(fetch_history_id, self.fetch_history_data) fetch_history = self.storage.fetch_history_get(fetch_history_id) expected_fetch_history = self.fetch_history_data.copy() expected_fetch_history['id'] = fetch_history_id expected_fetch_history['origin'] = origin_id expected_fetch_history['date'] = self.fetch_history_date expected_fetch_history['duration'] = self.fetch_history_duration self.assertEqual(expected_fetch_history, fetch_history) # This test is only relevant on the local storage, with an actual # objstorage raising an exception def test_content_add_objstorage_exception(self): self.storage.objstorage.add = Mock( side_effect=Exception('mocked broken objstorage') ) with self.assertRaises(Exception) as e: self.storage.content_add([self.cont]) self.assertEqual(e.exception.args, ('mocked broken objstorage',)) missing = list(self.storage.content_missing([self.cont])) self.assertEqual(missing, [self.cont['sha1']]) @pytest.mark.db @pytest.mark.property_based class PropTestLocalStorage(CommonPropTestStorage, StorageTestDbFixture, unittest.TestCase): pass class AlteringSchemaTest(TestStorageData, StorageTestDbFixture, unittest.TestCase): """This class is dedicated for the rare case where the schema needs to be altered dynamically. Otherwise, the tests could be blocking when ran altogether. """ def test_content_update(self): self.storage.journal_writer = None # TODO, not supported cont = copy.deepcopy(self.cont) self.storage.content_add([cont]) # alter the sha1_git for example cont['sha1_git'] = hash_to_bytes( '3a60a5275d0333bf13468e8b3dcab90f4046e654') self.storage.content_update([cont], keys=['sha1_git']) with self.storage.get_db().transaction() as cur: cur.execute('SELECT sha1, sha1_git, sha256, length, status' ' FROM content WHERE sha1 = %s', (cont['sha1'],)) datum = cur.fetchone() self.assertEqual( (datum[0], datum[1], datum[2], datum[3], datum[4]), (cont['sha1'], cont['sha1_git'], cont['sha256'], cont['length'], 'visible')) def test_content_update_with_new_cols(self): self.storage.journal_writer = None # TODO, not supported with self.storage.get_db().transaction() as cur: cur.execute("""alter table content add column test text default null, add column test2 text default null""") cont = copy.deepcopy(self.cont2) self.storage.content_add([cont]) cont['test'] = 'value-1' cont['test2'] = 'value-2' self.storage.content_update([cont], keys=['test', 'test2']) with self.storage.get_db().transaction() as cur: cur.execute( 'SELECT sha1, sha1_git, sha256, length, status, test, test2' ' FROM content WHERE sha1 = %s', (cont['sha1'],)) datum = cur.fetchone() self.assertEqual( (datum[0], datum[1], datum[2], datum[3], datum[4], datum[5], datum[6]), (cont['sha1'], cont['sha1_git'], cont['sha256'], cont['length'], 'visible', cont['test'], cont['test2'])) with self.storage.get_db().transaction() as cur: cur.execute("""alter table content drop column test, drop column test2""")