diff --git a/swh/storage/api/client.py b/swh/storage/api/client.py
index 3b067956..222076f7 100644
--- a/swh/storage/api/client.py
+++ b/swh/storage/api/client.py
@@ -1,24 +1,24 @@
# Copyright (C) 2015-2017 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
from swh.core.api import RPCClient
from ..exc import StorageAPIError
-from ..storage import Storage
+from ..interface import StorageInterface
class RemoteStorage(RPCClient):
"""Proxy to a remote storage API"""
api_exception = StorageAPIError
- backend_class = Storage
+ backend_class = StorageInterface
def reset(self):
return self.post('reset', {})
def stat_counters(self):
return self.get('stat/counters')
def refresh_stat_counters(self):
return self.get('stat/refresh')
diff --git a/swh/storage/api/server.py b/swh/storage/api/server.py
index cc6634ae..05cf12e0 100644
--- a/swh/storage/api/server.py
+++ b/swh/storage/api/server.py
@@ -1,118 +1,118 @@
# Copyright (C) 2015-2019 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import os
import logging
from swh.core import config
from swh.storage import get_storage as get_swhstorage
from swh.core.api import (RPCServerApp,
error_handler,
encode_data_server as encode_data)
-from ..storage import Storage
+from ..interface import StorageInterface
from ..metrics import timed
def get_storage():
global storage
if not storage:
storage = get_swhstorage(**app.config['storage'])
return storage
app = RPCServerApp(__name__,
- backend_class=Storage,
+ backend_class=StorageInterface,
backend_factory=get_storage)
storage = None
@app.errorhandler(Exception)
def my_error_handler(exception):
return error_handler(exception, encode_data)
@app.route('/')
@timed
def index():
return '''
Software Heritage storage server
You have reached the
Software Heritage
storage server.
See its
documentation
and API for more information
'''
@app.route('/stat/counters', methods=['GET'])
@timed
def stat_counters():
return encode_data(get_storage().stat_counters())
@app.route('/stat/refresh', methods=['GET'])
@timed
def refresh_stat_counters():
return encode_data(get_storage().refresh_stat_counters())
api_cfg = None
def load_and_check_config(config_file, type='local'):
"""Check the minimal configuration is set to run the api or raise an
error explanation.
Args:
config_file (str): Path to the configuration file to load
type (str): configuration type. For 'local' type, more
checks are done.
Raises:
Error if the setup is not as expected
Returns:
configuration as a dict
"""
if not config_file:
raise EnvironmentError('Configuration file must be defined')
if not os.path.exists(config_file):
raise FileNotFoundError('Configuration file %s does not exist' % (
config_file, ))
cfg = config.read(config_file)
if 'storage' not in cfg:
raise KeyError("Missing '%storage' configuration")
return cfg
def make_app_from_configfile():
"""Run the WSGI app from the webserver, loading the configuration from
a configuration file.
SWH_CONFIG_FILENAME environment variable defines the
configuration path to load.
"""
global api_cfg
if not api_cfg:
config_file = os.environ.get('SWH_CONFIG_FILENAME')
api_cfg = load_and_check_config(config_file)
app.config.update(api_cfg)
handler = logging.StreamHandler()
app.logger.addHandler(handler)
return app
if __name__ == '__main__':
print('Deprecated. Use swh-storage')
diff --git a/swh/storage/in_memory.py b/swh/storage/in_memory.py
index 9cdfa5ad..656f040d 100644
--- a/swh/storage/in_memory.py
+++ b/swh/storage/in_memory.py
@@ -1,1915 +1,1053 @@
# Copyright (C) 2015-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import re
import bisect
import dateutil
import collections
import copy
import datetime
import itertools
import random
from collections import defaultdict
from datetime import timedelta
from typing import Any, Dict, List, Optional
import attr
from swh.model.model import (
Content, Directory, Revision, Release, Snapshot, OriginVisit, Origin,
SHA1_SIZE)
from swh.model.hashutil import DEFAULT_ALGORITHMS, hash_to_bytes, hash_to_hex
from swh.objstorage import get_objstorage
from swh.objstorage.exc import ObjNotFoundError
from .storage import get_journal_writer
from .converters import origin_url_to_sha1
from .utils import get_partition_bounds_bytes
# Max block size of contents to return
BULK_BLOCK_CONTENT_LEN_MAX = 10000
def now():
return datetime.datetime.now(tz=datetime.timezone.utc)
class Storage:
def __init__(self, journal_writer=None):
self._contents = {}
self._content_indexes = defaultdict(lambda: defaultdict(set))
self._skipped_contents = {}
self._skipped_content_indexes = defaultdict(lambda: defaultdict(set))
self.reset()
if journal_writer:
self.journal_writer = get_journal_writer(**journal_writer)
else:
self.journal_writer = None
def reset(self):
self._directories = {}
self._revisions = {}
self._releases = {}
self._snapshots = {}
self._origins = {}
self._origins_by_id = []
self._origins_by_sha1 = {}
self._origin_visits = {}
self._persons = []
self._origin_metadata = defaultdict(list)
self._tools = {}
self._metadata_providers = {}
self._objects = defaultdict(list)
# ideally we would want a skip list for both fast inserts and searches
self._sorted_sha1s = []
self.objstorage = get_objstorage('memory', {})
def check_config(self, *, check_write):
- """Check that the storage is configured and ready to go."""
return True
def _content_add(self, contents, with_data):
content_with_data = []
content_without_data = []
for content in contents:
if content.status is None:
content.status = 'visible'
if content.length is None:
content.length = -1
if content.status != 'absent':
if self._content_key(content) not in self._contents:
content_with_data.append(content)
else:
if self._content_key(content) not in self._skipped_contents:
content_without_data.append(content)
if self.journal_writer:
for content in content_with_data:
content = attr.evolve(content, data=None)
self.journal_writer.write_addition('content', content)
for content in content_without_data:
self.journal_writer.write_addition('content', content)
count_content_added, count_content_bytes_added = \
self._content_add_present(content_with_data, with_data)
count_skipped_content_added = self._content_add_absent(
content_without_data
)
summary = {
'content:add': count_content_added,
'skipped_content:add': count_skipped_content_added,
}
if with_data:
summary['content:add:bytes'] = count_content_bytes_added
return summary
def _content_add_present(self, contents, with_data):
count_content_added = 0
count_content_bytes_added = 0
for content in contents:
key = self._content_key(content)
if key in self._contents:
continue
for algorithm in DEFAULT_ALGORITHMS:
hash_ = content.get_hash(algorithm)
if hash_ in self._content_indexes[algorithm]\
and (algorithm not in {'blake2s256', 'sha256'}):
from . import HashCollision
raise HashCollision(algorithm, hash_, key)
for algorithm in DEFAULT_ALGORITHMS:
hash_ = content.get_hash(algorithm)
self._content_indexes[algorithm][hash_].add(key)
self._objects[content.sha1_git].append(
('content', content.sha1))
self._contents[key] = content
bisect.insort(self._sorted_sha1s, content.sha1)
count_content_added += 1
if with_data:
content_data = self._contents[key].data
self._contents[key] = attr.evolve(
self._contents[key],
data=None)
count_content_bytes_added += len(content_data)
self.objstorage.add(content_data, content.sha1)
return (count_content_added, count_content_bytes_added)
def _content_add_absent(self, contents):
count = 0
skipped_content_missing = self.skipped_content_missing(contents)
for content in skipped_content_missing:
key = self._content_key(content)
for algo in DEFAULT_ALGORITHMS:
self._skipped_content_indexes[algo][content.get_hash(algo)] \
.add(key)
self._skipped_contents[key] = content
count += 1
return count
def _content_to_model(self, contents):
- """Takes a list of content dicts, optionally with an extra 'origin'
- key, and yields tuples (model.Content, origin)."""
for content in contents:
content = content.copy()
content.pop('origin', None)
yield Content.from_dict(content)
def content_add(self, content):
- """Add content blobs to the storage
-
- Args:
- content (iterable): iterable of dictionaries representing
- individual pieces of content to add. Each dictionary has the
- following keys:
-
- - data (bytes): the actual content
- - length (int): content length (default: -1)
- - one key for each checksum algorithm in
- :data:`swh.model.hashutil.DEFAULT_ALGORITHMS`, mapped to the
- corresponding checksum
- - status (str): one of visible, hidden, absent
- - reason (str): if status = absent, the reason why
- - origin (int): if status = absent, the origin we saw the
- content in
-
- Raises:
- HashCollision in case of collision
-
- Returns:
- Summary dict with the following key and associated values:
-
- content:add: New contents added
- content_bytes:add: Sum of the contents' length data
- skipped_content:add: New skipped contents (no data) added
-
- """
now = datetime.datetime.now(tz=datetime.timezone.utc)
content = [attr.evolve(c, ctime=now)
for c in self._content_to_model(content)]
return self._content_add(content, with_data=True)
def content_update(self, content, keys=[]):
- """Update content blobs to the storage. Does nothing for unknown
- contents or skipped ones.
-
- Args:
- content (iterable): iterable of dictionaries representing
- individual pieces of content to update. Each dictionary has the
- following keys:
-
- - data (bytes): the actual content
- - length (int): content length (default: -1)
- - one key for each checksum algorithm in
- :data:`swh.model.hashutil.ALGORITHMS`, mapped to the
- corresponding checksum
- - status (str): one of visible, hidden, absent
-
- keys (list): List of keys (str) whose values needs an update, e.g.,
- new hash column
- """
if self.journal_writer:
raise NotImplementedError(
'content_update is not yet supported with a journal_writer.')
for cont_update in content:
cont_update = cont_update.copy()
sha1 = cont_update.pop('sha1')
for old_key in self._content_indexes['sha1'][sha1]:
old_cont = self._contents.pop(old_key)
for algorithm in DEFAULT_ALGORITHMS:
hash_ = old_cont.get_hash(algorithm)
self._content_indexes[algorithm][hash_].remove(old_key)
new_cont = attr.evolve(old_cont, **cont_update)
new_key = self._content_key(new_cont)
self._contents[new_key] = new_cont
for algorithm in DEFAULT_ALGORITHMS:
hash_ = new_cont.get_hash(algorithm)
self._content_indexes[algorithm][hash_].add(new_key)
def content_add_metadata(self, content):
- """Add content metadata to the storage (like `content_add`, but
- without inserting to the objstorage).
-
- Args:
- content (iterable): iterable of dictionaries representing
- individual pieces of content to add. Each dictionary has the
- following keys:
-
- - length (int): content length (default: -1)
- - one key for each checksum algorithm in
- :data:`swh.model.hashutil.DEFAULT_ALGORITHMS`, mapped to the
- corresponding checksum
- - status (str): one of visible, hidden, absent
- - reason (str): if status = absent, the reason why
- - origin (int): if status = absent, the origin we saw the
- content in
- - ctime (datetime): time of insertion in the archive
-
- Raises:
- HashCollision in case of collision
-
- Returns:
- Summary dict with the following key and associated values:
-
- content:add: New contents added
- skipped_content:add: New skipped contents (no data) added
-
- """
content = list(self._content_to_model(content))
return self._content_add(content, with_data=False)
def content_get(self, content):
- """Retrieve in bulk contents and their data.
-
- This function may yield more blobs than provided sha1 identifiers,
- in case they collide.
-
- Args:
- content: iterables of sha1
-
- Yields:
- Dict[str, bytes]: Generates streams of contents as dict with their
- raw data:
-
- - sha1 (bytes): content id
- - data (bytes): content's raw data
-
- Raises:
- ValueError in case of too much contents are required.
- cf. BULK_BLOCK_CONTENT_LEN_MAX
-
- """
# FIXME: Make this method support slicing the `data`.
if len(content) > BULK_BLOCK_CONTENT_LEN_MAX:
raise ValueError(
"Sending at most %s contents." % BULK_BLOCK_CONTENT_LEN_MAX)
for obj_id in content:
try:
data = self.objstorage.get(obj_id)
except ObjNotFoundError:
yield None
continue
yield {'sha1': obj_id, 'data': data}
def content_get_range(self, start, end, limit=1000):
- """Retrieve contents within range [start, end] bound by limit.
-
- Note that this function may return more than one blob per hash. The
- limit is enforced with multiplicity (ie. two blobs with the same hash
- will count twice toward the limit).
-
- Args:
- **start** (bytes): Starting identifier range (expected smaller
- than end)
- **end** (bytes): Ending identifier range (expected larger
- than start)
- **limit** (int): Limit result (default to 1000)
-
- Returns:
- a dict with keys:
- - contents [dict]: iterable of contents in between the range.
- - next (bytes): There remains content in the range
- starting from this next sha1
-
- """
if limit is None:
raise ValueError('Development error: limit should not be None')
from_index = bisect.bisect_left(self._sorted_sha1s, start)
sha1s = itertools.islice(self._sorted_sha1s, from_index, None)
sha1s = ((sha1, content_key)
for sha1 in sha1s
for content_key in self._content_indexes['sha1'][sha1])
matched = []
next_content = None
for sha1, key in sha1s:
if sha1 > end:
break
if len(matched) >= limit:
next_content = sha1
break
matched.append(self._contents[key].to_dict())
return {
'contents': matched,
'next': next_content,
}
def content_get_partition(
self, partition_id: int, nb_partitions: int, limit: int = 1000,
page_token: str = None):
- """Splits contents into nb_partitions, and returns one of these based on
- partition_id (which must be in [0, nb_partitions-1])
-
- There is no guarantee on how the partitioning is done, or the
- result order.
-
- Args:
- partition_id (int): index of the partition to fetch
- nb_partitions (int): total number of partitions to split into
- limit (int): Limit result (default to 1000)
- page_token (Optional[str]): opaque token used for pagination.
-
- Returns:
- a dict with keys:
- - contents (List[dict]): iterable of contents in the partition.
- - **next_page_token** (Optional[str]): opaque token to be used as
- `page_token` for retrieving the next page. if absent, there is
- no more pages to gather.
- """
if limit is None:
raise ValueError('Development error: limit should not be None')
(start, end) = get_partition_bounds_bytes(
partition_id, nb_partitions, SHA1_SIZE)
if page_token:
start = hash_to_bytes(page_token)
if end is None:
end = b'\xff'*SHA1_SIZE
result = self.content_get_range(start, end, limit)
result2 = {
'contents': result['contents'],
'next_page_token': None,
}
if result['next']:
result2['next_page_token'] = hash_to_hex(result['next'])
return result2
def content_get_metadata(
self, contents: List[bytes]) -> Dict[bytes, List[Dict]]:
- """Retrieve content metadata in bulk
-
- Args:
- content: iterable of content identifiers (sha1)
-
- Returns:
- a dict with keys the content's sha1 and the associated value
- either the existing content's metadata or None if the content does
- not exist.
-
- """
result: Dict = {sha1: [] for sha1 in contents}
for sha1 in contents:
if sha1 in self._content_indexes['sha1']:
objs = self._content_indexes['sha1'][sha1]
# only 1 element as content_add_metadata would have raised a
# hash collision otherwise
for key in objs:
d = self._contents[key].to_dict()
del d['ctime']
if 'data' in d:
del d['data']
result[sha1].append(d)
return result
def content_find(self, content):
if not set(content).intersection(DEFAULT_ALGORITHMS):
raise ValueError('content keys must contain at least one of: '
'%s' % ', '.join(sorted(DEFAULT_ALGORITHMS)))
found = []
for algo in DEFAULT_ALGORITHMS:
hash = content.get(algo)
if hash and hash in self._content_indexes[algo]:
found.append(self._content_indexes[algo][hash])
if not found:
return []
keys = list(set.intersection(*found))
return [self._contents[key].to_dict() for key in keys]
def content_missing(self, content, key_hash='sha1'):
- """List content missing from storage
-
- Args:
- contents ([dict]): iterable of dictionaries whose keys are
- either 'length' or an item of
- :data:`swh.model.hashutil.ALGORITHMS`;
- mapped to the corresponding checksum
- (or length).
-
- key_hash (str): name of the column to use as hash id
- result (default: 'sha1')
-
- Returns:
- iterable ([bytes]): missing content ids (as per the
- key_hash column)
- """
for cont in content:
for (algo, hash_) in cont.items():
if algo not in DEFAULT_ALGORITHMS:
continue
if hash_ not in self._content_indexes.get(algo, []):
yield cont[key_hash]
break
else:
for result in self.content_find(cont):
if result['status'] == 'missing':
yield cont[key_hash]
def content_missing_per_sha1(self, contents):
- """List content missing from storage based only on sha1.
-
- Args:
- contents: Iterable of sha1 to check for absence.
-
- Returns:
- iterable: missing ids
-
- Raises:
- TODO: an exception when we get a hash collision.
-
- """
for content in contents:
if content not in self._content_indexes['sha1']:
yield content
def content_missing_per_sha1_git(self, contents):
- """List content missing from storage based only on sha1_git.
-
- Args:
- contents: An iterable of content id (sha1_git)
-
- Yields:
- missing contents sha1_git
- """
for content in contents:
if content not in self._content_indexes['sha1_git']:
yield content
def skipped_content_missing(self, contents):
- """List all skipped_content missing from storage
-
- Args:
- contents: Iterable of sha1 to check for skipped content entry
-
- Returns:
- iterable: dict of skipped content entry
- """
-
for content in contents:
for (key, algorithm) in self._content_key_algorithm(content):
if algorithm == 'blake2s256':
continue
if key not in self._skipped_content_indexes[algorithm]:
# index must contain hashes of algos except blake2s256
# else the content is considered skipped
yield content
break
def content_get_random(self):
- """Finds a random content id.
-
- Returns:
- a sha1_git
- """
return random.choice(list(self._content_indexes['sha1_git']))
def directory_add(self, directories):
- """Add directories to the storage
-
- Args:
- directories (iterable): iterable of dictionaries representing the
- individual directories to add. Each dict has the following
- keys:
-
- - id (sha1_git): the id of the directory to add
- - entries (list): list of dicts for each entry in the
- directory. Each dict has the following keys:
-
- - name (bytes)
- - type (one of 'file', 'dir', 'rev'): type of the
- directory entry (file, directory, revision)
- - target (sha1_git): id of the object pointed at by the
- directory entry
- - perms (int): entry permissions
- Returns:
- Summary dict of keys with associated count as values:
-
- directory:add: Number of directories actually added
-
- """
directories = list(directories)
if self.journal_writer:
self.journal_writer.write_additions(
'directory',
(dir_ for dir_ in directories
if dir_['id'] not in self._directories))
directories = [Directory.from_dict(d) for d in directories]
count = 0
for directory in directories:
if directory.id not in self._directories:
count += 1
self._directories[directory.id] = directory
self._objects[directory.id].append(
('directory', directory.id))
return {'directory:add': count}
def directory_missing(self, directories):
- """List directories missing from storage
-
- Args:
- directories (iterable): an iterable of directory ids
-
- Yields:
- missing directory ids
-
- """
for id in directories:
if id not in self._directories:
yield id
def _join_dentry_to_content(self, dentry):
keys = (
'status',
'sha1',
'sha1_git',
'sha256',
'length',
)
ret = dict.fromkeys(keys)
ret.update(dentry)
if ret['type'] == 'file':
# TODO: Make it able to handle more than one content
content = self.content_find({'sha1_git': ret['target']})
if content:
content = content[0]
for key in keys:
ret[key] = content[key]
return ret
def _directory_ls(self, directory_id, recursive, prefix=b''):
if directory_id in self._directories:
for entry in self._directories[directory_id].entries:
ret = self._join_dentry_to_content(entry.to_dict())
ret['name'] = prefix + ret['name']
ret['dir_id'] = directory_id
yield ret
if recursive and ret['type'] == 'dir':
yield from self._directory_ls(
ret['target'], True, prefix + ret['name'] + b'/')
def directory_ls(self, directory, recursive=False):
- """Get entries for one directory.
-
- Args:
- - directory: the directory to list entries from.
- - recursive: if flag on, this list recursively from this directory.
-
- Returns:
- List of entries for such directory.
-
- If `recursive=True`, names in the path of a dir/file not at the
- root are concatenated with a slash (`/`).
- """
yield from self._directory_ls(directory, recursive)
def directory_entry_get_by_path(self, directory, paths):
- """Get the directory entry (either file or dir) from directory with path.
-
- Args:
- - directory: sha1 of the top level directory
- - paths: path to lookup from the top level directory. From left
- (top) to right (bottom).
-
- Returns:
- The corresponding directory entry if found, None otherwise.
-
- """
return self._directory_entry_get_by_path(directory, paths, b'')
def directory_get_random(self):
- """Finds a random directory id.
-
- Returns:
- a sha1_git if any
-
- """
if not self._directories:
return None
return random.choice(list(self._directories))
def _directory_entry_get_by_path(self, directory, paths, prefix):
if not paths:
return
contents = list(self.directory_ls(directory))
if not contents:
return
def _get_entry(entries, name):
for entry in entries:
if entry['name'] == name:
entry = entry.copy()
entry['name'] = prefix + entry['name']
return entry
first_item = _get_entry(contents, paths[0])
if len(paths) == 1:
return first_item
if not first_item or first_item['type'] != 'dir':
return
return self._directory_entry_get_by_path(
first_item['target'], paths[1:], prefix + paths[0] + b'/')
def revision_add(self, revisions):
- """Add revisions to the storage
-
- Args:
- revisions (Iterable[dict]): iterable of dictionaries representing
- the individual revisions to add. Each dict has the following
- keys:
-
- - **id** (:class:`sha1_git`): id of the revision to add
- - **date** (:class:`dict`): date the revision was written
- - **committer_date** (:class:`dict`): date the revision got
- added to the origin
- - **type** (one of 'git', 'tar'): type of the
- revision added
- - **directory** (:class:`sha1_git`): the directory the
- revision points at
- - **message** (:class:`bytes`): the message associated with
- the revision
- - **author** (:class:`Dict[str, bytes]`): dictionary with
- keys: name, fullname, email
- - **committer** (:class:`Dict[str, bytes]`): dictionary with
- keys: name, fullname, email
- - **metadata** (:class:`jsonb`): extra information as
- dictionary
- - **synthetic** (:class:`bool`): revision's nature (tarball,
- directory creates synthetic revision`)
- - **parents** (:class:`list[sha1_git]`): the parents of
- this revision
-
- date dictionaries have the form defined in :mod:`swh.model`.
-
- Returns:
- Summary dict of keys with associated count as values
-
- revision_added: New objects actually stored in db
-
- """
revisions = list(revisions)
if self.journal_writer:
self.journal_writer.write_additions(
'revision',
(rev for rev in revisions
if rev['id'] not in self._revisions))
revisions = [Revision.from_dict(rev) for rev in revisions]
count = 0
for revision in revisions:
if revision.id not in self._revisions:
revision = attr.evolve(
revision,
committer=self._person_add(revision.committer),
author=self._person_add(revision.author))
self._revisions[revision.id] = revision
self._objects[revision.id].append(
('revision', revision.id))
count += 1
return {'revision:add': count}
def revision_missing(self, revisions):
- """List revisions missing from storage
-
- Args:
- revisions (iterable): revision ids
-
- Yields:
- missing revision ids
-
- """
for id in revisions:
if id not in self._revisions:
yield id
def revision_get(self, revisions):
for id in revisions:
if id in self._revisions:
yield self._revisions.get(id).to_dict()
else:
yield None
def _get_parent_revs(self, rev_id, seen, limit):
if limit and len(seen) >= limit:
return
if rev_id in seen or rev_id not in self._revisions:
return
seen.add(rev_id)
yield self._revisions[rev_id].to_dict()
for parent in self._revisions[rev_id].parents:
yield from self._get_parent_revs(parent, seen, limit)
def revision_log(self, revisions, limit=None):
- """Fetch revision entry from the given root revisions.
-
- Args:
- revisions: array of root revision to lookup
- limit: limitation on the output result. Default to None.
-
- Yields:
- List of revision log from such revisions root.
-
- """
seen = set()
for rev_id in revisions:
yield from self._get_parent_revs(rev_id, seen, limit)
def revision_shortlog(self, revisions, limit=None):
- """Fetch the shortlog for the given revisions
-
- Args:
- revisions: list of root revisions to lookup
- limit: depth limitation for the output
-
- Yields:
- a list of (id, parents) tuples.
-
- """
yield from ((rev['id'], rev['parents'])
for rev in self.revision_log(revisions, limit))
def revision_get_random(self):
- """Finds a random revision id.
-
- Returns:
- a sha1_git
- """
return random.choice(list(self._revisions))
def release_add(self, releases):
- """Add releases to the storage
-
- Args:
- releases (Iterable[dict]): iterable of dictionaries representing
- the individual releases to add. Each dict has the following
- keys:
-
- - **id** (:class:`sha1_git`): id of the release to add
- - **revision** (:class:`sha1_git`): id of the revision the
- release points to
- - **date** (:class:`dict`): the date the release was made
- - **name** (:class:`bytes`): the name of the release
- - **comment** (:class:`bytes`): the comment associated with
- the release
- - **author** (:class:`Dict[str, bytes]`): dictionary with
- keys: name, fullname, email
-
- the date dictionary has the form defined in :mod:`swh.model`.
-
- Returns:
- Summary dict of keys with associated count as values
-
- release:add: New objects contents actually stored in db
-
- """
releases = list(releases)
if self.journal_writer:
self.journal_writer.write_additions(
'release',
(rel for rel in releases
if rel['id'] not in self._releases))
releases = [Release.from_dict(rel) for rel in releases]
count = 0
for rel in releases:
if rel.id not in self._releases:
if rel.author:
self._person_add(rel.author)
self._objects[rel.id].append(
('release', rel.id))
self._releases[rel.id] = rel
count += 1
return {'release:add': count}
def release_missing(self, releases):
- """List releases missing from storage
-
- Args:
- releases: an iterable of release ids
-
- Returns:
- a list of missing release ids
-
- """
yield from (rel for rel in releases if rel not in self._releases)
def release_get(self, releases):
- """Given a list of sha1, return the releases's information
-
- Args:
- releases: list of sha1s
-
- Yields:
- dicts with the same keys as those given to `release_add`
- (or ``None`` if a release does not exist)
-
- """
for rel_id in releases:
if rel_id in self._releases:
yield self._releases[rel_id].to_dict()
else:
yield None
def release_get_random(self):
- """Finds a random release id.
-
- Returns:
- a sha1_git
- """
return random.choice(list(self._releases))
def snapshot_add(self, snapshots):
- """Add a snapshot to the storage
-
- Args:
- snapshot ([dict]): the snapshots to add, containing the
- following keys:
-
- - **id** (:class:`bytes`): id of the snapshot
- - **branches** (:class:`dict`): branches the snapshot contains,
- mapping the branch name (:class:`bytes`) to the branch target,
- itself a :class:`dict` (or ``None`` if the branch points to an
- unknown object)
-
- - **target_type** (:class:`str`): one of ``content``,
- ``directory``, ``revision``, ``release``,
- ``snapshot``, ``alias``
- - **target** (:class:`bytes`): identifier of the target
- (currently a ``sha1_git`` for all object kinds, or the name
- of the target branch for aliases)
-
- Raises:
- ValueError: if the origin's or visit's identifier does not exist.
-
- Returns:
- Summary dict of keys with associated count as values
-
- snapshot_added: Count of object actually stored in db
-
- """
count = 0
snapshots = (Snapshot.from_dict(d) for d in snapshots)
snapshots = (snap for snap in snapshots
if snap.id not in self._snapshots)
for snapshot in snapshots:
if self.journal_writer:
self.journal_writer.write_addition('snapshot', snapshot)
sorted_branch_names = sorted(snapshot.branches)
self._snapshots[snapshot.id] = (snapshot, sorted_branch_names)
self._objects[snapshot.id].append(('snapshot', snapshot.id))
count += 1
return {'snapshot:add': count}
def snapshot_missing(self, snapshots):
- """List snapshot missing from storage
-
- Args:
- snapshots (iterable): an iterable of snapshot ids
-
- Yields:
- missing snapshot ids
- """
for id in snapshots:
if id not in self._snapshots:
yield id
def snapshot_get(self, snapshot_id):
- """Get the content, possibly partial, of a snapshot with the given id
-
- The branches of the snapshot are iterated in the lexicographical
- order of their names.
-
- .. warning:: At most 1000 branches contained in the snapshot will be
- returned for performance reasons. In order to browse the whole
- set of branches, the method :meth:`snapshot_get_branches`
- should be used instead.
-
- Args:
- snapshot_id (bytes): identifier of the snapshot
- Returns:
- dict: a dict with three keys:
- * **id**: identifier of the snapshot
- * **branches**: a dict of branches contained in the snapshot
- whose keys are the branches' names.
- * **next_branch**: the name of the first branch not returned
- or :const:`None` if the snapshot has less than 1000
- branches.
- """
return self.snapshot_get_branches(snapshot_id)
def snapshot_get_by_origin_visit(self, origin, visit):
- """Get the content, possibly partial, of a snapshot for the given origin visit
-
- The branches of the snapshot are iterated in the lexicographical
- order of their names.
-
- .. warning:: At most 1000 branches contained in the snapshot will be
- returned for performance reasons. In order to browse the whole
- set of branches, the method :meth:`snapshot_get_branches`
- should be used instead.
-
- Args:
- origin (int): the origin's identifier
- visit (int): the visit's identifier
- Returns:
- dict: None if the snapshot does not exist;
- a dict with three keys otherwise:
- * **id**: identifier of the snapshot
- * **branches**: a dict of branches contained in the snapshot
- whose keys are the branches' names.
- * **next_branch**: the name of the first branch not returned
- or :const:`None` if the snapshot has less than 1000
- branches.
-
- """
origin_url = self._get_origin_url(origin)
if not origin_url:
return
if origin_url not in self._origins or \
visit > len(self._origin_visits[origin_url]):
return None
snapshot_id = self._origin_visits[origin_url][visit-1].snapshot
if snapshot_id:
return self.snapshot_get(snapshot_id)
else:
return None
def snapshot_get_latest(self, origin, allowed_statuses=None):
- """Get the content, possibly partial, of the latest snapshot for the
- given origin, optionally only from visits that have one of the given
- allowed_statuses
-
- The branches of the snapshot are iterated in the lexicographical
- order of their names.
-
- .. warning:: At most 1000 branches contained in the snapshot will be
- returned for performance reasons. In order to browse the whole
- set of branches, the methods :meth:`origin_visit_get_latest`
- and :meth:`snapshot_get_branches` should be used instead.
-
- Args:
- origin (str): the origin's URL
- allowed_statuses (list of str): list of visit statuses considered
- to find the latest snapshot for the origin. For instance,
- ``allowed_statuses=['full']`` will only consider visits that
- have successfully run to completion.
- Returns:
- dict: a dict with three keys:
- * **id**: identifier of the snapshot
- * **branches**: a dict of branches contained in the snapshot
- whose keys are the branches' names.
- * **next_branch**: the name of the first branch not returned
- or :const:`None` if the snapshot has less than 1000
- branches.
- """
origin_url = self._get_origin_url(origin)
if not origin_url:
return
visit = self.origin_visit_get_latest(
origin_url,
allowed_statuses=allowed_statuses,
require_snapshot=True)
if visit and visit['snapshot']:
snapshot = self.snapshot_get(visit['snapshot'])
if not snapshot:
raise ValueError(
'last origin visit references an unknown snapshot')
return snapshot
def snapshot_count_branches(self, snapshot_id):
- """Count the number of branches in the snapshot with the given id
-
- Args:
- snapshot_id (bytes): identifier of the snapshot
-
- Returns:
- dict: A dict whose keys are the target types of branches and
- values their corresponding amount
- """
(snapshot, _) = self._snapshots[snapshot_id]
return collections.Counter(branch.target_type.value if branch else None
for branch in snapshot.branches.values())
def snapshot_get_branches(self, snapshot_id, branches_from=b'',
branches_count=1000, target_types=None):
- """Get the content, possibly partial, of a snapshot with the given id
-
- The branches of the snapshot are iterated in the lexicographical
- order of their names.
-
- Args:
- snapshot_id (bytes): identifier of the snapshot
- branches_from (bytes): optional parameter used to skip branches
- whose name is lesser than it before returning them
- branches_count (int): optional parameter used to restrain
- the amount of returned branches
- target_types (list): optional parameter used to filter the
- target types of branch to return (possible values that can be
- contained in that list are `'content', 'directory',
- 'revision', 'release', 'snapshot', 'alias'`)
- Returns:
- dict: None if the snapshot does not exist;
- a dict with three keys otherwise:
- * **id**: identifier of the snapshot
- * **branches**: a dict of branches contained in the snapshot
- whose keys are the branches' names.
- * **next_branch**: the name of the first branch not returned
- or :const:`None` if the snapshot has less than
- `branches_count` branches after `branches_from` included.
- """
res = self._snapshots.get(snapshot_id)
if res is None:
return None
(snapshot, sorted_branch_names) = res
from_index = bisect.bisect_left(
sorted_branch_names, branches_from)
if target_types:
next_branch = None
branches = {}
for branch_name in sorted_branch_names[from_index:]:
branch = snapshot.branches[branch_name]
if branch and branch.target_type.value in target_types:
if len(branches) < branches_count:
branches[branch_name] = branch
else:
next_branch = branch_name
break
else:
# As there is no 'target_types', we can do that much faster
to_index = from_index + branches_count
returned_branch_names = sorted_branch_names[from_index:to_index]
branches = {branch_name: snapshot.branches[branch_name]
for branch_name in returned_branch_names}
if to_index >= len(sorted_branch_names):
next_branch = None
else:
next_branch = sorted_branch_names[to_index]
branches = {name: branch.to_dict() if branch else None
for (name, branch) in branches.items()}
return {
'id': snapshot_id,
'branches': branches,
'next_branch': next_branch,
}
def snapshot_get_random(self):
- """Finds a random snapshot id.
-
- Returns:
- a sha1_git
- """
return random.choice(list(self._snapshots))
def object_find_by_sha1_git(self, ids):
- """Return the objects found with the given ids.
-
- Args:
- ids: a generator of sha1_gits
-
- Returns:
- dict: a mapping from id to the list of objects found. Each object
- found is itself a dict with keys:
-
- - sha1_git: the input id
- - type: the type of object found
-
- """
ret = {}
for id_ in ids:
objs = self._objects.get(id_, [])
ret[id_] = [{
'sha1_git': id_,
'type': obj[0],
} for obj in objs]
return ret
def _convert_origin(self, t):
if t is None:
return None
return t.to_dict()
def origin_get(self, origins):
- """Return origins, either all identified by their ids or all
- identified by urls.
-
- Args:
- origin: a list of dictionaries representing the individual
- origins to find.
- These dicts have either the key url (and optionally type):
-
- - url (bytes): the url the origin points to
-
- or the id:
-
- - id (int): the origin's identifier
-
- Returns:
- dict: the origin dictionary with the keys:
-
- - id: origin's id
- - url: origin's url
-
- Raises:
- ValueError: if the keys does not match (url and type) nor id.
-
- """
if isinstance(origins, dict):
# Old API
return_single = True
origins = [origins]
else:
return_single = False
# Sanity check to be error-compatible with the pgsql backend
if any('id' in origin for origin in origins) \
and not all('id' in origin for origin in origins):
raise ValueError(
'Either all origins or none at all should have an "id".')
if any('url' in origin for origin in origins) \
and not all('url' in origin for origin in origins):
raise ValueError(
'Either all origins or none at all should have '
'an "url" key.')
results = []
for origin in origins:
result = None
if 'url' in origin:
if origin['url'] in self._origins:
result = self._origins[origin['url']]
else:
raise ValueError(
'Origin must have an url.')
results.append(self._convert_origin(result))
if return_single:
assert len(results) == 1
return results[0]
else:
return results
def origin_get_by_sha1(self, sha1s):
- """Return origins, identified by the sha1 of their URLs.
-
- Args:
- sha1s (list[bytes]): a list of sha1s
-
- Yields:
- dicts containing origin information as returned
- by :meth:`swh.storage.in_memory.Storage.origin_get`, or None if an
- origin matching the sha1 is not found.
- """
return [
self._convert_origin(self._origins_by_sha1.get(sha1))
for sha1 in sha1s
]
def origin_get_range(self, origin_from=1, origin_count=100):
- """Retrieve ``origin_count`` origins whose ids are greater
- or equal than ``origin_from``.
-
- Origins are sorted by id before retrieving them.
-
- Args:
- origin_from (int): the minimum id of origins to retrieve
- origin_count (int): the maximum number of origins to retrieve
-
- Yields:
- dicts containing origin information as returned
- by :meth:`swh.storage.in_memory.Storage.origin_get`, plus
- an 'id' key.
- """
origin_from = max(origin_from, 1)
if origin_from <= len(self._origins_by_id):
max_idx = origin_from + origin_count - 1
if max_idx > len(self._origins_by_id):
max_idx = len(self._origins_by_id)
for idx in range(origin_from-1, max_idx):
origin = self._convert_origin(
self._origins[self._origins_by_id[idx]])
yield {'id': idx+1, **origin}
def origin_list(self, page_token: Optional[str] = None, limit: int = 100
) -> dict:
- """Returns the list of origins
-
- Args:
- page_token: opaque token used for pagination.
- limit: the maximum number of results to return
-
- Returns:
- dict: dict with the following keys:
- - **next_page_token** (str, optional): opaque token to be used as
- `page_token` for retrieving the next page. if absent, there is
- no more pages to gather.
- - **origins** (List[dict]): list of origins, as returned by
- `origin_get`.
- """
origin_urls = sorted(self._origins)
if page_token:
from_ = bisect.bisect_left(origin_urls, page_token)
else:
from_ = 0
result = {
'origins': [{'url': origin_url}
for origin_url in origin_urls[from_:from_+limit]]
}
if from_+limit < len(origin_urls):
result['next_page_token'] = origin_urls[from_+limit]
return result
def origin_search(self, url_pattern, offset=0, limit=50,
regexp=False, with_visit=False):
- """Search for origins whose urls contain a provided string pattern
- or match a provided regular expression.
- The search is performed in a case insensitive way.
-
- Args:
- url_pattern (str): the string pattern to search for in origin urls
- offset (int): number of found origins to skip before returning
- results
- limit (int): the maximum number of found origins to return
- regexp (bool): if True, consider the provided pattern as a regular
- expression and return origins whose urls match it
- with_visit (bool): if True, filter out origins with no visit
-
- Returns:
- An iterable of dict containing origin information as returned
- by :meth:`swh.storage.storage.Storage.origin_get`.
- """
origins = map(self._convert_origin, self._origins.values())
if regexp:
pat = re.compile(url_pattern)
origins = [orig for orig in origins if pat.search(orig['url'])]
else:
origins = [orig for orig in origins if url_pattern in orig['url']]
if with_visit:
origins = [
orig for orig in origins
if len(self._origin_visits[orig['url']]) > 0 and
set(ov.snapshot
for ov in self._origin_visits[orig['url']]
if ov.snapshot) &
set(self._snapshots)]
return origins[offset:offset+limit]
def origin_count(self, url_pattern, regexp=False, with_visit=False):
- """Count origins whose urls contain a provided string pattern
- or match a provided regular expression.
- The pattern search in origin urls is performed in a case insensitive
- way.
-
- Args:
- url_pattern (str): the string pattern to search for in origin urls
- regexp (bool): if True, consider the provided pattern as a regular
- expression and return origins whose urls match it
- with_visit (bool): if True, filter out origins with no visit
-
- Returns:
- int: The number of origins matching the search criterion.
- """
return len(self.origin_search(url_pattern, regexp=regexp,
with_visit=with_visit,
limit=len(self._origins)))
def origin_add(self, origins):
- """Add origins to the storage
-
- Args:
- origins: list of dictionaries representing the individual origins,
- with the following keys:
-
- - url (bytes): the url the origin points to
-
- Returns:
- list: given origins as dict updated with their id
-
- """
origins = copy.deepcopy(list(origins))
for origin in origins:
self.origin_add_one(origin)
return origins
def origin_add_one(self, origin):
- """Add origin to the storage
-
- Args:
- origin: dictionary representing the individual origin to add. This
- dict has the following keys:
-
- - url (bytes): the url the origin points to
-
- Returns:
- the id of the added origin, or of the identical one that already
- exists.
-
- """
origin = Origin.from_dict(origin)
if origin.url not in self._origins:
if self.journal_writer:
self.journal_writer.write_addition('origin', origin)
# generate an origin_id because it is needed by origin_get_range.
# TODO: remove this when we remove origin_get_range
origin_id = len(self._origins) + 1
self._origins_by_id.append(origin.url)
assert len(self._origins_by_id) == origin_id
self._origins[origin.url] = origin
self._origins_by_sha1[origin_url_to_sha1(origin.url)] = origin
self._origin_visits[origin.url] = []
self._objects[origin.url].append(('origin', origin.url))
return origin.url
def origin_visit_add(self, origin, date, type):
- """Add an origin_visit for the origin at date with status 'ongoing'.
-
- Args:
- origin (str): visited origin's identifier or URL
- date (Union[str,datetime]): timestamp of such visit
- type (str): the type of loader used for the visit (hg, git, ...)
-
- Returns:
- dict: dictionary with keys origin and visit where:
-
- - origin: origin's identifier
- - visit: the visit's identifier for the new visit occurrence
-
- """
origin_url = origin
if origin_url is None:
raise ValueError('Unknown origin.')
if isinstance(date, str):
# FIXME: Converge on iso8601 at some point
date = dateutil.parser.parse(date)
elif not isinstance(date, datetime.datetime):
raise TypeError('date must be a datetime or a string.')
visit_ret = None
if origin_url in self._origins:
origin = self._origins[origin_url]
# visit ids are in the range [1, +inf[
visit_id = len(self._origin_visits[origin_url]) + 1
status = 'ongoing'
visit = OriginVisit(
origin=origin.url,
date=date,
type=type,
status=status,
snapshot=None,
metadata=None,
visit=visit_id,
)
self._origin_visits[origin_url].append(visit)
visit_ret = {
'origin': origin.url,
'visit': visit_id,
}
self._objects[(origin_url, visit_id)].append(
('origin_visit', None))
if self.journal_writer:
self.journal_writer.write_addition('origin_visit', visit)
return visit_ret
def origin_visit_update(self, origin, visit_id, status=None,
metadata=None, snapshot=None):
- """Update an origin_visit's status.
-
- Args:
- origin (str): visited origin's URL
- visit_id (int): visit's identifier
- status: visit's new status
- metadata: data associated to the visit
- snapshot (sha1_git): identifier of the snapshot to add to
- the visit
-
- Returns:
- None
-
- """
if not isinstance(origin, str):
raise TypeError('origin must be a string, not %r' % (origin,))
origin_url = self._get_origin_url(origin)
if origin_url is None:
raise ValueError('Unknown origin.')
try:
visit = self._origin_visits[origin_url][visit_id-1]
except IndexError:
raise ValueError('Unknown visit_id for this origin') \
from None
updates = {}
if status:
updates['status'] = status
if metadata:
updates['metadata'] = metadata
if snapshot:
updates['snapshot'] = snapshot
visit = attr.evolve(visit, **updates)
if self.journal_writer:
self.journal_writer.write_update('origin_visit', visit)
self._origin_visits[origin_url][visit_id-1] = visit
def origin_visit_upsert(self, visits):
- """Add a origin_visits with a specific id and with all its data.
- If there is already an origin_visit with the same
- `(origin_url, visit_id)`, updates it instead of inserting a new one.
-
- Args:
- visits: iterable of dicts with keys:
-
- - **origin**: origin url
- - **visit**: origin visit id
- - **type**: type of loader used for the visit
- - **date**: timestamp of such visit
- - **status**: Visit's new status
- - **metadata**: Data associated to the visit
- - **snapshot**: identifier of the snapshot to add to
- the visit
- """
for visit in visits:
if not isinstance(visit['origin'], str):
raise TypeError("visit['origin'] must be a string, not %r"
% (visit['origin'],))
visits = [OriginVisit.from_dict(d) for d in visits]
if self.journal_writer:
for visit in visits:
self.journal_writer.write_addition('origin_visit', visit)
for visit in visits:
visit_id = visit.visit
origin_url = visit.origin
visit = attr.evolve(visit, origin=origin_url)
self._objects[(origin_url, visit_id)].append(
('origin_visit', None))
while len(self._origin_visits[origin_url]) <= visit_id:
self._origin_visits[origin_url].append(None)
self._origin_visits[origin_url][visit_id-1] = visit
def _convert_visit(self, visit):
if visit is None:
return
visit = visit.to_dict()
return visit
def origin_visit_get(self, origin, last_visit=None, limit=None):
- """Retrieve all the origin's visit's information.
-
- Args:
- origin (int): the origin's identifier
- last_visit (int): visit's id from which listing the next ones,
- default to None
- limit (int): maximum number of results to return,
- default to None
-
- Yields:
- List of visits.
-
- """
origin_url = self._get_origin_url(origin)
if origin_url in self._origin_visits:
visits = self._origin_visits[origin_url]
if last_visit is not None:
visits = visits[last_visit:]
if limit is not None:
visits = visits[:limit]
for visit in visits:
if not visit:
continue
visit_id = visit.visit
yield self._convert_visit(
self._origin_visits[origin_url][visit_id-1])
def origin_visit_find_by_date(self, origin, visit_date):
- """Retrieves the origin visit whose date is closest to the provided
- timestamp.
- In case of a tie, the visit with largest id is selected.
-
- Args:
- origin (str): The occurrence's origin (URL).
- target (datetime): target timestamp
-
- Returns:
- A visit.
-
- """
origin_url = self._get_origin_url(origin)
if origin_url in self._origin_visits:
visits = self._origin_visits[origin_url]
visit = min(
visits,
key=lambda v: (abs(v.date - visit_date), -v.visit))
return self._convert_visit(visit)
def origin_visit_get_by(self, origin, visit):
- """Retrieve origin visit's information.
-
- Args:
- origin (int): the origin's identifier
-
- Returns:
- The information on that particular (origin, visit) or None if
- it does not exist
-
- """
origin_url = self._get_origin_url(origin)
if origin_url in self._origin_visits and \
visit <= len(self._origin_visits[origin_url]):
return self._convert_visit(
self._origin_visits[origin_url][visit-1])
def origin_visit_get_latest(
self, origin, allowed_statuses=None, require_snapshot=False):
- """Get the latest origin visit for the given origin, optionally
- looking only for those with one of the given allowed_statuses
- or for those with a known snapshot.
-
- Args:
- origin (str): the origin's URL
- allowed_statuses (list of str): list of visit statuses considered
- to find the latest visit. For instance,
- ``allowed_statuses=['full']`` will only consider visits that
- have successfully run to completion.
- require_snapshot (bool): If True, only a visit with a snapshot
- will be returned.
-
- Returns:
- dict: a dict with the following keys:
-
- - **origin**: the URL of the origin
- - **visit**: origin visit id
- - **type**: type of loader used for the visit
- - **date**: timestamp of such visit
- - **status**: Visit's new status
- - **metadata**: Data associated to the visit
- - **snapshot** (Optional[sha1_git]): identifier of the snapshot
- associated to the visit
- """
origin = self._origins.get(origin)
if not origin:
return
visits = self._origin_visits[origin.url]
if allowed_statuses is not None:
visits = [visit for visit in visits
if visit.status in allowed_statuses]
if require_snapshot:
visits = [visit for visit in visits
if visit.snapshot]
visit = max(
visits, key=lambda v: (v.date, v.visit), default=None)
return self._convert_visit(visit)
def _select_random_origin_visit_by_type(self, type: str) -> str:
- """Select randomly an origin visit """
while True:
url = random.choice(list(self._origin_visits.keys()))
random_origin_visits = self._origin_visits[url]
if random_origin_visits[0].type == type:
return url
def origin_visit_get_random(self, type: str) -> Optional[Dict[str, Any]]:
- """Randomly select one successful origin visit with
- made in the last 3 months.
-
- Returns:
- dict representing an origin visit, in the same format as
- `origin_visit_get`.
-
- """
url = self._select_random_origin_visit_by_type(type)
random_origin_visits = copy.deepcopy(self._origin_visits[url])
random_origin_visits.reverse()
back_in_the_day = now() - timedelta(weeks=12) # 3 months back
# This should be enough for tests
for visit in random_origin_visits:
if visit.date > back_in_the_day and visit.status == 'full':
return visit.to_dict()
else:
return None
def stat_counters(self):
- """compute statistics about the number of tuples in various tables
-
- Returns:
- dict: a dictionary mapping textual labels (e.g., content) to
- integer values (e.g., the number of tuples in table content)
-
- """
keys = (
'content',
'directory',
'origin',
'origin_visit',
'person',
'release',
'revision',
'skipped_content',
'snapshot'
)
stats = {key: 0 for key in keys}
stats.update(collections.Counter(
obj_type
for (obj_type, obj_id)
in itertools.chain(*self._objects.values())))
return stats
def refresh_stat_counters(self):
- """Recomputes the statistics for `stat_counters`."""
pass
def origin_metadata_add(self, origin_url, ts, provider, tool, metadata):
- """ Add an origin_metadata for the origin at ts with provenance and
- metadata.
-
- Args:
- origin_url (str): the origin url for which the metadata is added
- ts (datetime): timestamp of the found metadata
- provider: id of the provider of metadata (ex:'hal')
- tool: id of the tool used to extract metadata
- metadata (jsonb): the metadata retrieved at the time and location
- """
if not isinstance(origin_url, str):
raise TypeError('origin_id must be str, not %r' % (origin_url,))
if isinstance(ts, str):
ts = dateutil.parser.parse(ts)
origin_metadata = {
'origin_url': origin_url,
'discovery_date': ts,
'tool_id': tool,
'metadata': metadata,
'provider_id': provider,
}
self._origin_metadata[origin_url].append(origin_metadata)
return None
def origin_metadata_get_by(self, origin_url, provider_type=None):
- """Retrieve list of all origin_metadata entries for the origin_url
-
- Args:
- origin_url (str): the origin's url
- provider_type (str): (optional) type of provider
-
- Returns:
- list of dicts: the origin_metadata dictionary with the keys:
-
- - origin_url (int): origin's URL
- - discovery_date (datetime): timestamp of discovery
- - tool_id (int): metadata's extracting tool
- - metadata (jsonb)
- - provider_id (int): metadata's provider
- - provider_name (str)
- - provider_type (str)
- - provider_url (str)
-
- """
if not isinstance(origin_url, str):
raise TypeError('origin_url must be str, not %r' % (origin_url,))
metadata = []
for item in self._origin_metadata[origin_url]:
item = copy.deepcopy(item)
provider = self.metadata_provider_get(item['provider_id'])
for attr_name in ('name', 'type', 'url'):
item['provider_' + attr_name] = \
provider['provider_' + attr_name]
metadata.append(item)
return metadata
def tool_add(self, tools):
- """Add new tools to the storage.
-
- Args:
- tools (iterable of :class:`dict`): Tool information to add to
- storage. Each tool is a :class:`dict` with the following keys:
-
- - name (:class:`str`): name of the tool
- - version (:class:`str`): version of the tool
- - configuration (:class:`dict`): configuration of the tool,
- must be json-encodable
-
- Returns:
- :class:`dict`: All the tools inserted in storage
- (including the internal ``id``). The order of the list is not
- guaranteed to match the order of the initial list.
-
- """
inserted = []
for tool in tools:
key = self._tool_key(tool)
assert 'id' not in tool
record = copy.deepcopy(tool)
record['id'] = key # TODO: remove this
if key not in self._tools:
self._tools[key] = record
inserted.append(copy.deepcopy(self._tools[key]))
return inserted
def tool_get(self, tool):
- """Retrieve tool information.
-
- Args:
- tool (dict): Tool information we want to retrieve from storage.
- The dicts have the same keys as those used in :func:`tool_add`.
-
- Returns:
- dict: The full tool information if it exists (``id`` included),
- None otherwise.
-
- """
return self._tools.get(self._tool_key(tool))
def metadata_provider_add(self, provider_name, provider_type, provider_url,
metadata):
- """Add a metadata provider.
-
- Args:
- provider_name (str): Its name
- provider_type (str): Its type
- provider_url (str): Its URL
- metadata: JSON-encodable object
-
- Returns:
- an identifier of the provider
- """
provider = {
'provider_name': provider_name,
'provider_type': provider_type,
'provider_url': provider_url,
'metadata': metadata,
}
key = self._metadata_provider_key(provider)
provider['id'] = key
self._metadata_providers[key] = provider
return key
def metadata_provider_get(self, provider_id):
- """Get a metadata provider
-
- Args:
- provider_id: Its identifier, as given by `metadata_provider_add`.
-
- Returns:
- dict: same as `metadata_provider_add`;
- or None if it does not exist.
- """
return self._metadata_providers.get(provider_id)
def metadata_provider_get_by(self, provider):
- """Get a metadata provider
-
- Args:
- provider_name: Its name
- provider_url: Its URL
-
- Returns:
- dict: same as `metadata_provider_add`;
- or None if it does not exist.
- """
key = self._metadata_provider_key(provider)
return self._metadata_providers.get(key)
def _get_origin_url(self, origin):
if isinstance(origin, str):
return origin
else:
raise TypeError('origin must be a string.')
def _person_add(self, person):
- """Add a person in storage.
-
- Note: Private method, do not use outside of this class.
-
- Args:
- person: dictionary with keys fullname, name and email.
-
- """
key = ('person', person.fullname)
if key not in self._objects:
person_id = len(self._persons) + 1
self._persons.append(person)
self._objects[key].append(('person', person_id))
else:
person_id = self._objects[key][0][1]
person = self._persons[person_id-1]
return person
@staticmethod
def _content_key(content):
"""A stable key for a content"""
return tuple(getattr(content, key)
for key in sorted(DEFAULT_ALGORITHMS))
@staticmethod
def _content_key_algorithm(content):
""" A stable key and the algorithm for a content"""
if isinstance(content, Content):
content = content.to_dict()
return tuple((content.get(key), key)
for key in sorted(DEFAULT_ALGORITHMS))
@staticmethod
def _tool_key(tool):
return '%r %r %r' % (tool['name'], tool['version'],
tuple(sorted(tool['configuration'].items())))
@staticmethod
def _metadata_provider_key(provider):
return '%r %r' % (provider['provider_name'], provider['provider_url'])
+
+ def diff_directories(self, from_dir, to_dir, track_renaming=False):
+ raise NotImplementedError('InMemoryStorage.diff_directories')
+
+ def diff_revisions(self, from_rev, to_rev, track_renaming=False):
+ raise NotImplementedError('InMemoryStorage.diff_revisions')
+
+ def diff_revision(self, revision, track_renaming=False):
+ raise NotImplementedError('InMemoryStorage.diff_revision')
diff --git a/swh/storage/interface.py b/swh/storage/interface.py
new file mode 100644
index 00000000..7824d024
--- /dev/null
+++ b/swh/storage/interface.py
@@ -0,0 +1,1224 @@
+# Copyright (C) 2015-2020 The Software Heritage developers
+# See the AUTHORS file at the top-level directory of this distribution
+# License: GNU General Public License version 3, or any later version
+# See top-level LICENSE file for more information
+
+from typing import Any, Dict, List, Optional
+
+from swh.core.api import remote_api_endpoint
+
+
+class StorageInterface:
+ @remote_api_endpoint('check_config')
+ def check_config(self, *, check_write):
+ """Check that the storage is configured and ready to go."""
+ ...
+
+ @remote_api_endpoint('content/add')
+ def content_add(self, content):
+ """Add content blobs to the storage
+
+ Args:
+ contents (iterable): iterable of dictionaries representing
+ individual pieces of content to add. Each dictionary has the
+ following keys:
+
+ - data (bytes): the actual content
+ - length (int): content length (default: -1)
+ - one key for each checksum algorithm in
+ :data:`swh.model.hashutil.ALGORITHMS`, mapped to the
+ corresponding checksum
+ - status (str): one of visible, hidden, absent
+ - reason (str): if status = absent, the reason why
+ - origin (int): if status = absent, the origin we saw the
+ content in
+
+ Raises:
+
+ The following exceptions can occur:
+
+ - HashCollision in case of collision
+ - Any other exceptions raise by the db
+
+ In case of errors, some of the content may have been stored in
+ the DB and in the objstorage.
+ Since additions to both idempotent, that should not be a problem.
+
+ Returns:
+ Summary dict with the following key and associated values:
+
+ content:add: New contents added
+ content:add:bytes: Sum of the contents' length data
+ skipped_content:add: New skipped contents (no data) added
+ """
+ ...
+
+ @remote_api_endpoint('content/update')
+ def content_update(self, content, keys=[]):
+ """Update content blobs to the storage. Does nothing for unknown
+ contents or skipped ones.
+
+ Args:
+ content (iterable): iterable of dictionaries representing
+ individual pieces of content to update. Each dictionary has the
+ following keys:
+
+ - data (bytes): the actual content
+ - length (int): content length (default: -1)
+ - one key for each checksum algorithm in
+ :data:`swh.model.hashutil.ALGORITHMS`, mapped to the
+ corresponding checksum
+ - status (str): one of visible, hidden, absent
+
+ keys (list): List of keys (str) whose values needs an update, e.g.,
+ new hash column
+
+ """
+ ...
+
+ @remote_api_endpoint('content/add_metadata')
+ def content_add_metadata(self, content):
+ """Add content metadata to the storage (like `content_add`, but
+ without inserting to the objstorage).
+
+ Args:
+ content (iterable): iterable of dictionaries representing
+ individual pieces of content to add. Each dictionary has the
+ following keys:
+
+ - length (int): content length (default: -1)
+ - one key for each checksum algorithm in
+ :data:`swh.model.hashutil.ALGORITHMS`, mapped to the
+ corresponding checksum
+ - status (str): one of visible, hidden, absent
+ - reason (str): if status = absent, the reason why
+ - origin (int): if status = absent, the origin we saw the
+ content in
+ - ctime (datetime): time of insertion in the archive
+
+ Returns:
+ Summary dict with the following key and associated values:
+
+ content:add: New contents added
+ skipped_content:add: New skipped contents (no data) added
+ """
+ ...
+
+ @remote_api_endpoint('content/data')
+ def content_get(self, content):
+ """Retrieve in bulk contents and their data.
+
+ This generator yields exactly as many items than provided sha1
+ identifiers, but callers should not assume this will always be true.
+
+ It may also yield `None` values in case an object was not found.
+
+ Args:
+ content: iterables of sha1
+
+ Yields:
+ Dict[str, bytes]: Generates streams of contents as dict with their
+ raw data:
+
+ - sha1 (bytes): content id
+ - data (bytes): content's raw data
+
+ Raises:
+ ValueError in case of too much contents are required.
+ cf. BULK_BLOCK_CONTENT_LEN_MAX
+
+ """
+ ...
+
+ @remote_api_endpoint('content/range')
+ def content_get_range(self, start, end, limit=1000):
+ """Retrieve contents within range [start, end] bound by limit.
+
+ Note that this function may return more than one blob per hash. The
+ limit is enforced with multiplicity (ie. two blobs with the same hash
+ will count twice toward the limit).
+
+ Args:
+ **start** (bytes): Starting identifier range (expected smaller
+ than end)
+ **end** (bytes): Ending identifier range (expected larger
+ than start)
+ **limit** (int): Limit result (default to 1000)
+
+ Returns:
+ a dict with keys:
+ - contents [dict]: iterable of contents in between the range.
+ - next (bytes): There remains content in the range
+ starting from this next sha1
+
+ """
+ ...
+
+ @remote_api_endpoint('content/partition')
+ def content_get_partition(
+ self, partition_id: int, nb_partitions: int, limit: int = 1000,
+ page_token: str = None):
+ """Splits contents into nb_partitions, and returns one of these based on
+ partition_id (which must be in [0, nb_partitions-1])
+
+ There is no guarantee on how the partitioning is done, or the
+ result order.
+
+ Args:
+ partition_id (int): index of the partition to fetch
+ nb_partitions (int): total number of partitions to split into
+ limit (int): Limit result (default to 1000)
+ page_token (Optional[str]): opaque token used for pagination.
+
+ Returns:
+ a dict with keys:
+ - contents (List[dict]): iterable of contents in the partition.
+ - **next_page_token** (Optional[str]): opaque token to be used as
+ `page_token` for retrieving the next page. if absent, there is
+ no more pages to gather.
+ """
+ ...
+
+ @remote_api_endpoint('content/metadata')
+ def content_get_metadata(
+ self, contents: List[bytes]) -> Dict[bytes, List[Dict]]:
+ """Retrieve content metadata in bulk
+
+ Args:
+ content: iterable of content identifiers (sha1)
+
+ Returns:
+ a dict with keys the content's sha1 and the associated value
+ either the existing content's metadata or None if the content does
+ not exist.
+
+ """
+ ...
+
+ @remote_api_endpoint('content/missing')
+ def content_missing(self, content, key_hash='sha1'):
+ """List content missing from storage
+
+ Args:
+ content ([dict]): iterable of dictionaries whose keys are
+ either 'length' or an item of
+ :data:`swh.model.hashutil.ALGORITHMS`;
+ mapped to the corresponding checksum
+ (or length).
+
+ key_hash (str): name of the column to use as hash id
+ result (default: 'sha1')
+
+ Returns:
+ iterable ([bytes]): missing content ids (as per the
+ key_hash column)
+
+ Raises:
+ TODO: an exception when we get a hash collision.
+
+ """
+ ...
+
+ @remote_api_endpoint('content/missing/sha1')
+ def content_missing_per_sha1(self, contents):
+ """List content missing from storage based only on sha1.
+
+ Args:
+ contents: Iterable of sha1 to check for absence.
+
+ Returns:
+ iterable: missing ids
+
+ Raises:
+ TODO: an exception when we get a hash collision.
+
+ """
+ ...
+
+ @remote_api_endpoint('content/missing/sha1_git')
+ def content_missing_per_sha1_git(self, contents):
+ """List content missing from storage based only on sha1_git.
+
+ Args:
+ contents (Iterable): An iterable of content id (sha1_git)
+
+ Yields:
+ missing contents sha1_git
+ """
+ ...
+
+ @remote_api_endpoint('content/skipped/missing')
+ def skipped_content_missing(self, contents):
+ """List skipped_content missing from storage
+
+ Args:
+ content: iterable of dictionaries containing the data for each
+ checksum algorithm.
+
+ Returns:
+ iterable: missing signatures
+
+ """
+ ...
+
+ @remote_api_endpoint('content/present')
+ def content_find(self, content):
+ """Find a content hash in db.
+
+ Args:
+ content: a dictionary representing one content hash, mapping
+ checksum algorithm names (see swh.model.hashutil.ALGORITHMS) to
+ checksum values
+
+ Returns:
+ a triplet (sha1, sha1_git, sha256) if the content exist
+ or None otherwise.
+
+ Raises:
+ ValueError: in case the key of the dictionary is not sha1, sha1_git
+ nor sha256.
+
+ """
+ ...
+
+ @remote_api_endpoint('content/get_random')
+ def content_get_random(self):
+ """Finds a random content id.
+
+ Returns:
+ a sha1_git
+ """
+ ...
+
+ @remote_api_endpoint('directory/add')
+ def directory_add(self, directories):
+ """Add directories to the storage
+
+ Args:
+ directories (iterable): iterable of dictionaries representing the
+ individual directories to add. Each dict has the following
+ keys:
+
+ - id (sha1_git): the id of the directory to add
+ - entries (list): list of dicts for each entry in the
+ directory. Each dict has the following keys:
+
+ - name (bytes)
+ - type (one of 'file', 'dir', 'rev'): type of the
+ directory entry (file, directory, revision)
+ - target (sha1_git): id of the object pointed at by the
+ directory entry
+ - perms (int): entry permissions
+
+ Returns:
+ Summary dict of keys with associated count as values:
+
+ directory:add: Number of directories actually added
+
+ """
+ ...
+
+ @remote_api_endpoint('directory/missing')
+ def directory_missing(self, directories):
+ """List directories missing from storage
+
+ Args:
+ directories (iterable): an iterable of directory ids
+
+ Yields:
+ missing directory ids
+
+ """
+ ...
+
+ @remote_api_endpoint('directory/ls')
+ def directory_ls(self, directory, recursive=False):
+ """Get entries for one directory.
+
+ Args:
+ - directory: the directory to list entries from.
+ - recursive: if flag on, this list recursively from this directory.
+
+ Returns:
+ List of entries for such directory.
+
+ If `recursive=True`, names in the path of a dir/file not at the
+ root are concatenated with a slash (`/`).
+
+ """
+ ...
+
+ @remote_api_endpoint('directory/path')
+ def directory_entry_get_by_path(self, directory, paths):
+ """Get the directory entry (either file or dir) from directory with path.
+
+ Args:
+ - directory: sha1 of the top level directory
+ - paths: path to lookup from the top level directory. From left
+ (top) to right (bottom).
+
+ Returns:
+ The corresponding directory entry if found, None otherwise.
+
+ """
+ ...
+
+ @remote_api_endpoint('directory/get_random')
+ def directory_get_random(self):
+ """Finds a random directory id.
+
+ Returns:
+ a sha1_git
+ """
+ ...
+
+ @remote_api_endpoint('revision/add')
+ def revision_add(self, revisions):
+ """Add revisions to the storage
+
+ Args:
+ revisions (Iterable[dict]): iterable of dictionaries representing
+ the individual revisions to add. Each dict has the following
+ keys:
+
+ - **id** (:class:`sha1_git`): id of the revision to add
+ - **date** (:class:`dict`): date the revision was written
+ - **committer_date** (:class:`dict`): date the revision got
+ added to the origin
+ - **type** (one of 'git', 'tar'): type of the
+ revision added
+ - **directory** (:class:`sha1_git`): the directory the
+ revision points at
+ - **message** (:class:`bytes`): the message associated with
+ the revision
+ - **author** (:class:`Dict[str, bytes]`): dictionary with
+ keys: name, fullname, email
+ - **committer** (:class:`Dict[str, bytes]`): dictionary with
+ keys: name, fullname, email
+ - **metadata** (:class:`jsonb`): extra information as
+ dictionary
+ - **synthetic** (:class:`bool`): revision's nature (tarball,
+ directory creates synthetic revision`)
+ - **parents** (:class:`list[sha1_git]`): the parents of
+ this revision
+
+ date dictionaries have the form defined in :mod:`swh.model`.
+
+ Returns:
+ Summary dict of keys with associated count as values
+
+ revision:add: New objects actually stored in db
+
+ """
+ ...
+
+ @remote_api_endpoint('revision/missing')
+ def revision_missing(self, revisions):
+ """List revisions missing from storage
+
+ Args:
+ revisions (iterable): revision ids
+
+ Yields:
+ missing revision ids
+
+ """
+ ...
+
+ @remote_api_endpoint('revision')
+ def revision_get(self, revisions):
+ """Get all revisions from storage
+
+ Args:
+ revisions: an iterable of revision ids
+
+ Returns:
+ iterable: an iterable of revisions as dictionaries (or None if the
+ revision doesn't exist)
+
+ """
+ ...
+
+ @remote_api_endpoint('revision/log')
+ def revision_log(self, revisions, limit=None):
+ """Fetch revision entry from the given root revisions.
+
+ Args:
+ revisions: array of root revision to lookup
+ limit: limitation on the output result. Default to None.
+
+ Yields:
+ List of revision log from such revisions root.
+
+ """
+ ...
+
+ @remote_api_endpoint('revision/shortlog')
+ def revision_shortlog(self, revisions, limit=None):
+ """Fetch the shortlog for the given revisions
+
+ Args:
+ revisions: list of root revisions to lookup
+ limit: depth limitation for the output
+
+ Yields:
+ a list of (id, parents) tuples.
+
+ """
+ ...
+
+ @remote_api_endpoint('revision/get_random')
+ def revision_get_random(self):
+ """Finds a random revision id.
+
+ Returns:
+ a sha1_git
+ """
+ ...
+
+ @remote_api_endpoint('release/add')
+ def release_add(self, releases):
+ """Add releases to the storage
+
+ Args:
+ releases (Iterable[dict]): iterable of dictionaries representing
+ the individual releases to add. Each dict has the following
+ keys:
+
+ - **id** (:class:`sha1_git`): id of the release to add
+ - **revision** (:class:`sha1_git`): id of the revision the
+ release points to
+ - **date** (:class:`dict`): the date the release was made
+ - **name** (:class:`bytes`): the name of the release
+ - **comment** (:class:`bytes`): the comment associated with
+ the release
+ - **author** (:class:`Dict[str, bytes]`): dictionary with
+ keys: name, fullname, email
+
+ the date dictionary has the form defined in :mod:`swh.model`.
+
+ Returns:
+ Summary dict of keys with associated count as values
+
+ release:add: New objects contents actually stored in db
+
+ """
+ ...
+
+ @remote_api_endpoint('release/missing')
+ def release_missing(self, releases):
+ """List releases missing from storage
+
+ Args:
+ releases: an iterable of release ids
+
+ Returns:
+ a list of missing release ids
+
+ """
+ ...
+
+ @remote_api_endpoint('release')
+ def release_get(self, releases):
+ """Given a list of sha1, return the releases's information
+
+ Args:
+ releases: list of sha1s
+
+ Yields:
+ dicts with the same keys as those given to `release_add`
+ (or ``None`` if a release does not exist)
+
+ """
+ ...
+
+ @remote_api_endpoint('release/get_random')
+ def release_get_random(self):
+ """Finds a random release id.
+
+ Returns:
+ a sha1_git
+ """
+ ...
+
+ @remote_api_endpoint('snapshot/add')
+ def snapshot_add(self, snapshots):
+ """Add snapshots to the storage.
+
+ Args:
+ snapshot ([dict]): the snapshots to add, containing the
+ following keys:
+
+ - **id** (:class:`bytes`): id of the snapshot
+ - **branches** (:class:`dict`): branches the snapshot contains,
+ mapping the branch name (:class:`bytes`) to the branch target,
+ itself a :class:`dict` (or ``None`` if the branch points to an
+ unknown object)
+
+ - **target_type** (:class:`str`): one of ``content``,
+ ``directory``, ``revision``, ``release``,
+ ``snapshot``, ``alias``
+ - **target** (:class:`bytes`): identifier of the target
+ (currently a ``sha1_git`` for all object kinds, or the name
+ of the target branch for aliases)
+
+ Raises:
+ ValueError: if the origin or visit id does not exist.
+
+ Returns:
+
+ Summary dict of keys with associated count as values
+
+ snapshot:add: Count of object actually stored in db
+
+ """
+ ...
+
+ @remote_api_endpoint('snapshot/missing')
+ def snapshot_missing(self, snapshots):
+ """List snapshots missing from storage
+
+ Args:
+ snapshots (iterable): an iterable of snapshot ids
+
+ Yields:
+ missing snapshot ids
+
+ """
+ ...
+
+ @remote_api_endpoint('snapshot')
+ def snapshot_get(self, snapshot_id):
+ """Get the content, possibly partial, of a snapshot with the given id
+
+ The branches of the snapshot are iterated in the lexicographical
+ order of their names.
+
+ .. warning:: At most 1000 branches contained in the snapshot will be
+ returned for performance reasons. In order to browse the whole
+ set of branches, the method :meth:`snapshot_get_branches`
+ should be used instead.
+
+ Args:
+ snapshot_id (bytes): identifier of the snapshot
+ Returns:
+ dict: a dict with three keys:
+ * **id**: identifier of the snapshot
+ * **branches**: a dict of branches contained in the snapshot
+ whose keys are the branches' names.
+ * **next_branch**: the name of the first branch not returned
+ or :const:`None` if the snapshot has less than 1000
+ branches.
+ """
+ ...
+
+ @remote_api_endpoint('snapshot/by_origin_visit')
+ def snapshot_get_by_origin_visit(self, origin, visit):
+ """Get the content, possibly partial, of a snapshot for the given origin visit
+
+ The branches of the snapshot are iterated in the lexicographical
+ order of their names.
+
+ .. warning:: At most 1000 branches contained in the snapshot will be
+ returned for performance reasons. In order to browse the whole
+ set of branches, the method :meth:`snapshot_get_branches`
+ should be used instead.
+
+ Args:
+ origin (int): the origin identifier
+ visit (int): the visit identifier
+ Returns:
+ dict: None if the snapshot does not exist;
+ a dict with three keys otherwise:
+ * **id**: identifier of the snapshot
+ * **branches**: a dict of branches contained in the snapshot
+ whose keys are the branches' names.
+ * **next_branch**: the name of the first branch not returned
+ or :const:`None` if the snapshot has less than 1000
+ branches.
+
+ """
+ ...
+
+ @remote_api_endpoint('snapshot/latest')
+ def snapshot_get_latest(self, origin, allowed_statuses=None):
+ """Get the content, possibly partial, of the latest snapshot for the
+ given origin, optionally only from visits that have one of the given
+ allowed_statuses
+
+ The branches of the snapshot are iterated in the lexicographical
+ order of their names.
+
+ .. warning:: At most 1000 branches contained in the snapshot will be
+ returned for performance reasons. In order to browse the whole
+ set of branches, the method :meth:`snapshot_get_branches`
+ should be used instead.
+
+ Args:
+ origin (str): the origin's URL
+ allowed_statuses (list of str): list of visit statuses considered
+ to find the latest snapshot for the visit. For instance,
+ ``allowed_statuses=['full']`` will only consider visits that
+ have successfully run to completion.
+ Returns:
+ dict: a dict with three keys:
+ * **id**: identifier of the snapshot
+ * **branches**: a dict of branches contained in the snapshot
+ whose keys are the branches' names.
+ * **next_branch**: the name of the first branch not returned
+ or :const:`None` if the snapshot has less than 1000
+ branches.
+ """
+ ...
+
+ @remote_api_endpoint('snapshot/count_branches')
+ def snapshot_count_branches(self, snapshot_id):
+ """Count the number of branches in the snapshot with the given id
+
+ Args:
+ snapshot_id (bytes): identifier of the snapshot
+
+ Returns:
+ dict: A dict whose keys are the target types of branches and
+ values their corresponding amount
+ """
+ ...
+
+ @remote_api_endpoint('snapshot/get_branches')
+ def snapshot_get_branches(self, snapshot_id, branches_from=b'',
+ branches_count=1000, target_types=None):
+ """Get the content, possibly partial, of a snapshot with the given id
+
+ The branches of the snapshot are iterated in the lexicographical
+ order of their names.
+
+ Args:
+ snapshot_id (bytes): identifier of the snapshot
+ branches_from (bytes): optional parameter used to skip branches
+ whose name is lesser than it before returning them
+ branches_count (int): optional parameter used to restrain
+ the amount of returned branches
+ target_types (list): optional parameter used to filter the
+ target types of branch to return (possible values that can be
+ contained in that list are `'content', 'directory',
+ 'revision', 'release', 'snapshot', 'alias'`)
+ Returns:
+ dict: None if the snapshot does not exist;
+ a dict with three keys otherwise:
+ * **id**: identifier of the snapshot
+ * **branches**: a dict of branches contained in the snapshot
+ whose keys are the branches' names.
+ * **next_branch**: the name of the first branch not returned
+ or :const:`None` if the snapshot has less than
+ `branches_count` branches after `branches_from` included.
+ """
+ ...
+
+ @remote_api_endpoint('snapshot/get_random')
+ def snapshot_get_random(self):
+ """Finds a random snapshot id.
+
+ Returns:
+ a sha1_git
+ """
+ ...
+
+ @remote_api_endpoint('origin/visit/add')
+ def origin_visit_add(self, origin, date, type):
+ """Add an origin_visit for the origin at ts with status 'ongoing'.
+
+ Args:
+ origin (str): visited origin's identifier or URL
+ date (Union[str,datetime]): timestamp of such visit
+ type (str): the type of loader used for the visit (hg, git, ...)
+
+ Returns:
+ dict: dictionary with keys origin and visit where:
+
+ - origin: origin identifier
+ - visit: the visit identifier for the new visit occurrence
+
+ """
+ ...
+
+ @remote_api_endpoint('origin/visit/update')
+ def origin_visit_update(self, origin, visit_id, status=None,
+ metadata=None, snapshot=None):
+ """Update an origin_visit's status.
+
+ Args:
+ origin (str): visited origin's URL
+ visit_id: Visit's id
+ status: Visit's new status
+ metadata: Data associated to the visit
+ snapshot (sha1_git): identifier of the snapshot to add to
+ the visit
+
+ Returns:
+ None
+
+ """
+ ...
+
+ @remote_api_endpoint('origin/visit/upsert')
+ def origin_visit_upsert(self, visits):
+ """Add a origin_visits with a specific id and with all its data.
+ If there is already an origin_visit with the same
+ `(origin_id, visit_id)`, overwrites it.
+
+ Args:
+ visits: iterable of dicts with keys:
+
+ - **origin**: dict with keys either `id` or `url`
+ - **visit**: origin visit id
+ - **date**: timestamp of such visit
+ - **status**: Visit's new status
+ - **metadata**: Data associated to the visit
+ - **snapshot**: identifier of the snapshot to add to
+ the visit
+ """
+ ...
+
+ @remote_api_endpoint('origin/visit/get')
+ def origin_visit_get(self, origin, last_visit=None, limit=None):
+ """Retrieve all the origin's visit's information.
+
+ Args:
+ origin (str): The visited origin
+ last_visit: Starting point from which listing the next visits
+ Default to None
+ limit (int): Number of results to return from the last visit.
+ Default to None
+
+ Yields:
+ List of visits.
+
+ """
+ ...
+
+ @remote_api_endpoint('origin/visit/find_by_date')
+ def origin_visit_find_by_date(self, origin, visit_date):
+ """Retrieves the origin visit whose date is closest to the provided
+ timestamp.
+ In case of a tie, the visit with largest id is selected.
+
+ Args:
+ origin (str): The occurrence's origin (URL).
+ target (datetime): target timestamp
+
+ Returns:
+ A visit.
+
+ """
+ ...
+
+ @remote_api_endpoint('origin/visit/getby')
+ def origin_visit_get_by(self, origin, visit):
+ """Retrieve origin visit's information.
+
+ Args:
+ origin: The occurrence's origin (identifier).
+
+ Returns:
+ The information on that particular (origin, visit) or None if
+ it does not exist
+
+ """
+ ...
+
+ @remote_api_endpoint('origin/visit/get_latest')
+ def origin_visit_get_latest(
+ self, origin, allowed_statuses=None, require_snapshot=False):
+ """Get the latest origin visit for the given origin, optionally
+ looking only for those with one of the given allowed_statuses
+ or for those with a known snapshot.
+
+ Args:
+ origin (str): the origin's URL
+ allowed_statuses (list of str): list of visit statuses considered
+ to find the latest visit. For instance,
+ ``allowed_statuses=['full']`` will only consider visits that
+ have successfully run to completion.
+ require_snapshot (bool): If True, only a visit with a snapshot
+ will be returned.
+
+ Returns:
+ dict: a dict with the following keys:
+
+ - **origin**: the URL of the origin
+ - **visit**: origin visit id
+ - **type**: type of loader used for the visit
+ - **date**: timestamp of such visit
+ - **status**: Visit's new status
+ - **metadata**: Data associated to the visit
+ - **snapshot** (Optional[sha1_git]): identifier of the snapshot
+ associated to the visit
+ """
+ ...
+
+ @remote_api_endpoint('origin/visit/get_random')
+ def origin_visit_get_random(
+ self, type: str) -> Optional[Dict[str, Any]]:
+ """Randomly select one successful origin visit with
+ made in the last 3 months.
+
+ Returns:
+ dict representing an origin visit, in the same format as
+ :py:meth:`origin_visit_get`.
+
+ """
+ ...
+
+ @remote_api_endpoint('object/find_by_sha1_git')
+ def object_find_by_sha1_git(self, ids):
+ """Return the objects found with the given ids.
+
+ Args:
+ ids: a generator of sha1_gits
+
+ Returns:
+ dict: a mapping from id to the list of objects found. Each object
+ found is itself a dict with keys:
+
+ - sha1_git: the input id
+ - type: the type of object found
+
+ """
+ ...
+
+ @remote_api_endpoint('origin/get')
+ def origin_get(self, origins):
+ """Return origins, either all identified by their ids or all
+ identified by tuples (type, url).
+
+ If the url is given and the type is omitted, one of the origins with
+ that url is returned.
+
+ Args:
+ origin: a list of dictionaries representing the individual
+ origins to find.
+ These dicts have the key url:
+
+ - url (bytes): the url the origin points to
+
+ Returns:
+ dict: the origin dictionary with the keys:
+
+ - id: origin's id
+ - url: origin's url
+
+ Raises:
+ ValueError: if the url or the id don't exist.
+
+ """
+ ...
+
+ @remote_api_endpoint('origin/get_sha1')
+ def origin_get_by_sha1(self, sha1s):
+ """Return origins, identified by the sha1 of their URLs.
+
+ Args:
+ sha1s (list[bytes]): a list of sha1s
+
+ Yields:
+ dicts containing origin information as returned
+ by :meth:`swh.storage.storage.Storage.origin_get`, or None if an
+ origin matching the sha1 is not found.
+
+ """
+ ...
+
+ @remote_api_endpoint('origin/get_range')
+ def origin_get_range(self, origin_from=1, origin_count=100):
+ """Retrieve ``origin_count`` origins whose ids are greater
+ or equal than ``origin_from``.
+
+ Origins are sorted by id before retrieving them.
+
+ Args:
+ origin_from (int): the minimum id of origins to retrieve
+ origin_count (int): the maximum number of origins to retrieve
+
+ Yields:
+ dicts containing origin information as returned
+ by :meth:`swh.storage.storage.Storage.origin_get`.
+ """
+ ...
+
+ @remote_api_endpoint('origin/list')
+ def origin_list(
+ self, page_token: Optional[str] = None, limit: int = 100) -> dict:
+ """Returns the list of origins
+
+ Args:
+ page_token: opaque token used for pagination.
+ limit: the maximum number of results to return
+
+ Returns:
+ dict: dict with the following keys:
+ - **next_page_token** (str, optional): opaque token to be used as
+ `page_token` for retrieving the next page. if absent, there is
+ no more pages to gather.
+ - **origins** (List[dict]): list of origins, as returned by
+ `origin_get`.
+ """
+ ...
+
+ @remote_api_endpoint('origin/search')
+ def origin_search(self, url_pattern, offset=0, limit=50,
+ regexp=False, with_visit=False):
+ """Search for origins whose urls contain a provided string pattern
+ or match a provided regular expression.
+ The search is performed in a case insensitive way.
+
+ Args:
+ url_pattern (str): the string pattern to search for in origin urls
+ offset (int): number of found origins to skip before returning
+ results
+ limit (int): the maximum number of found origins to return
+ regexp (bool): if True, consider the provided pattern as a regular
+ expression and return origins whose urls match it
+ with_visit (bool): if True, filter out origins with no visit
+
+ Yields:
+ dicts containing origin information as returned
+ by :meth:`swh.storage.storage.Storage.origin_get`.
+ """
+ ...
+
+ @remote_api_endpoint('origin/count')
+ def origin_count(self, url_pattern, regexp=False,
+ with_visit=False):
+ """Count origins whose urls contain a provided string pattern
+ or match a provided regular expression.
+ The pattern search in origin urls is performed in a case insensitive
+ way.
+
+ Args:
+ url_pattern (str): the string pattern to search for in origin urls
+ regexp (bool): if True, consider the provided pattern as a regular
+ expression and return origins whose urls match it
+ with_visit (bool): if True, filter out origins with no visit
+
+ Returns:
+ int: The number of origins matching the search criterion.
+ """
+ ...
+
+ @remote_api_endpoint('origin/add_multi')
+ def origin_add(self, origins):
+ """Add origins to the storage
+
+ Args:
+ origins: list of dictionaries representing the individual origins,
+ with the following keys:
+
+ - type: the origin type ('git', 'svn', 'deb', ...)
+ - url (bytes): the url the origin points to
+
+ Returns:
+ list: given origins as dict updated with their id
+
+ """
+ ...
+
+ @remote_api_endpoint('origin/add')
+ def origin_add_one(self, origin):
+ """Add origin to the storage
+
+ Args:
+ origin: dictionary representing the individual origin to add. This
+ dict has the following keys:
+
+ - type (FIXME: enum TBD): the origin type ('git', 'wget', ...)
+ - url (bytes): the url the origin points to
+
+ Returns:
+ the id of the added origin, or of the identical one that already
+ exists.
+
+ """
+ ...
+
+ def stat_counters(self):
+ """compute statistics about the number of tuples in various tables
+
+ Returns:
+ dict: a dictionary mapping textual labels (e.g., content) to
+ integer values (e.g., the number of tuples in table content)
+
+ """
+ ...
+
+ def refresh_stat_counters(self):
+ """Recomputes the statistics for `stat_counters`."""
+ ...
+
+ @remote_api_endpoint('origin/metadata/add')
+ def origin_metadata_add(self, origin_url, ts, provider, tool, metadata):
+ """ Add an origin_metadata for the origin at ts with provenance and
+ metadata.
+
+ Args:
+ origin_url (str): the origin url for which the metadata is added
+ ts (datetime): timestamp of the found metadata
+ provider (int): the provider of metadata (ex:'hal')
+ tool (int): tool used to extract metadata
+ metadata (jsonb): the metadata retrieved at the time and location
+ """
+ ...
+
+ @remote_api_endpoint('origin/metadata/get')
+ def origin_metadata_get_by(self, origin_url, provider_type=None):
+ """Retrieve list of all origin_metadata entries for the origin_id
+
+ Args:
+ origin_url (str): the origin's URL
+ provider_type (str): (optional) type of provider
+
+ Returns:
+ list of dicts: the origin_metadata dictionary with the keys:
+
+ - origin_id (int): origin's id
+ - discovery_date (datetime): timestamp of discovery
+ - tool_id (int): metadata's extracting tool
+ - metadata (jsonb)
+ - provider_id (int): metadata's provider
+ - provider_name (str)
+ - provider_type (str)
+ - provider_url (str)
+
+ """
+ ...
+
+ @remote_api_endpoint('tool/add')
+ def tool_add(self, tools):
+ """Add new tools to the storage.
+
+ Args:
+ tools (iterable of :class:`dict`): Tool information to add to
+ storage. Each tool is a :class:`dict` with the following keys:
+
+ - name (:class:`str`): name of the tool
+ - version (:class:`str`): version of the tool
+ - configuration (:class:`dict`): configuration of the tool,
+ must be json-encodable
+
+ Returns:
+ :class:`dict`: All the tools inserted in storage
+ (including the internal ``id``). The order of the list is not
+ guaranteed to match the order of the initial list.
+
+ """
+ ...
+
+ @remote_api_endpoint('tool/data')
+ def tool_get(self, tool):
+ """Retrieve tool information.
+
+ Args:
+ tool (dict): Tool information we want to retrieve from storage.
+ The dicts have the same keys as those used in :func:`tool_add`.
+
+ Returns:
+ dict: The full tool information if it exists (``id`` included),
+ None otherwise.
+
+ """
+ ...
+
+ @remote_api_endpoint('provider/add')
+ def metadata_provider_add(self, provider_name, provider_type, provider_url,
+ metadata):
+ """Add a metadata provider.
+
+ Args:
+ provider_name (str): Its name
+ provider_type (str): Its type (eg. `'deposit-client'`)
+ provider_url (str): Its URL
+ metadata: JSON-encodable object
+
+ Returns:
+ int: an identifier of the provider
+ """
+ ...
+
+ @remote_api_endpoint('provider/get')
+ def metadata_provider_get(self, provider_id):
+ """Get a metadata provider
+
+ Args:
+ provider_id: Its identifier, as given by `metadata_provider_add`.
+
+ Returns:
+ dict: same as `metadata_provider_add`;
+ or None if it does not exist.
+ """
+ ...
+
+ @remote_api_endpoint('provider/getby')
+ def metadata_provider_get_by(self, provider):
+ """Get a metadata provider
+
+ Args:
+ provider (dict): A dictionary with keys:
+ * provider_name: Its name
+ * provider_url: Its URL
+
+ Returns:
+ dict: same as `metadata_provider_add`;
+ or None if it does not exist.
+ """
+ ...
+
+ @remote_api_endpoint('algos/diff_directories')
+ def diff_directories(self, from_dir, to_dir, track_renaming=False):
+ """Compute the list of file changes introduced between two arbitrary
+ directories (insertion / deletion / modification / renaming of files).
+
+ Args:
+ from_dir (bytes): identifier of the directory to compare from
+ to_dir (bytes): identifier of the directory to compare to
+ track_renaming (bool): whether or not to track files renaming
+
+ Returns:
+ A list of dict describing the introduced file changes
+ (see :func:`swh.storage.algos.diff.diff_directories`
+ for more details).
+ """
+ ...
+
+ @remote_api_endpoint('algos/diff_revisions')
+ def diff_revisions(self, from_rev, to_rev, track_renaming=False):
+ """Compute the list of file changes introduced between two arbitrary
+ revisions (insertion / deletion / modification / renaming of files).
+
+ Args:
+ from_rev (bytes): identifier of the revision to compare from
+ to_rev (bytes): identifier of the revision to compare to
+ track_renaming (bool): whether or not to track files renaming
+
+ Returns:
+ A list of dict describing the introduced file changes
+ (see :func:`swh.storage.algos.diff.diff_directories`
+ for more details).
+ """
+ ...
+
+ @remote_api_endpoint('algos/diff_revision')
+ def diff_revision(self, revision, track_renaming=False):
+ """Compute the list of file changes introduced by a specific revision
+ (insertion / deletion / modification / renaming of files) by comparing
+ it against its first parent.
+
+ Args:
+ revision (bytes): identifier of the revision from which to
+ compute the list of files changes
+ track_renaming (bool): whether or not to track files renaming
+
+ Returns:
+ A list of dict describing the introduced file changes
+ (see :func:`swh.storage.algos.diff.diff_directories`
+ for more details).
+ """
+ ...
diff --git a/swh/storage/storage.py b/swh/storage/storage.py
index b74d31b8..5a919934 100644
--- a/swh/storage/storage.py
+++ b/swh/storage/storage.py
@@ -1,2171 +1,1162 @@
# Copyright (C) 2015-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import copy
import datetime
import itertools
import json
from collections import defaultdict
from concurrent.futures import ThreadPoolExecutor
from contextlib import contextmanager
from typing import Any, Dict, List, Optional
import dateutil.parser
import psycopg2
import psycopg2.pool
-from swh.core.api import remote_api_endpoint
from swh.model.model import SHA1_SIZE
from swh.model.hashutil import ALGORITHMS, hash_to_bytes, hash_to_hex
from swh.objstorage import get_objstorage
from swh.objstorage.exc import ObjNotFoundError
try:
from swh.journal.writer import get_journal_writer
except ImportError:
get_journal_writer = None # type: ignore
# mypy limitation, see https://github.com/python/mypy/issues/1153
from . import converters
from .common import db_transaction_generator, db_transaction
from .db import Db
from .exc import StorageDBError
from .algos import diff
from .metrics import timed, send_metric, process_metrics
from .utils import get_partition_bounds_bytes
# Max block size of contents to return
BULK_BLOCK_CONTENT_LEN_MAX = 10000
EMPTY_SNAPSHOT_ID = hash_to_bytes('1a8893e6a86f444e8be8e7bda6cb34fb1735a00e')
"""Identifier for the empty snapshot"""
class Storage():
"""SWH storage proxy, encompassing DB and object storage
"""
def __init__(self, db, objstorage, min_pool_conns=1, max_pool_conns=10,
journal_writer=None):
"""
Args:
db_conn: either a libpq connection string, or a psycopg2 connection
obj_root: path to the root of the object storage
"""
try:
if isinstance(db, psycopg2.extensions.connection):
self._pool = None
self._db = Db(db)
else:
self._pool = psycopg2.pool.ThreadedConnectionPool(
min_pool_conns, max_pool_conns, db
)
self._db = None
except psycopg2.OperationalError as e:
raise StorageDBError(e)
self.objstorage = get_objstorage(**objstorage)
if journal_writer:
if get_journal_writer is None:
raise EnvironmentError(
'You need the swh.journal package to use the '
'journal_writer feature')
self.journal_writer = get_journal_writer(**journal_writer)
else:
self.journal_writer = None
def get_db(self):
if self._db:
return self._db
else:
return Db.from_pool(self._pool)
def put_db(self, db):
if db is not self._db:
db.put_conn()
@contextmanager
def db(self):
db = None
try:
db = self.get_db()
yield db
finally:
if db:
self.put_db(db)
- @remote_api_endpoint('check_config')
@timed
@db_transaction()
def check_config(self, *, check_write, db=None, cur=None):
- """Check that the storage is configured and ready to go."""
if not self.objstorage.check_config(check_write=check_write):
return False
# Check permissions on one of the tables
if check_write:
check = 'INSERT'
else:
check = 'SELECT'
cur.execute(
"select has_table_privilege(current_user, 'content', %s)",
(check,)
)
return cur.fetchone()[0]
def _content_unique_key(self, hash, db):
"""Given a hash (tuple or dict), return a unique key from the
aggregation of keys.
"""
keys = db.content_hash_keys
if isinstance(hash, tuple):
return hash
return tuple([hash[k] for k in keys])
@staticmethod
def _normalize_content(d):
d = d.copy()
if 'status' not in d:
d['status'] = 'visible'
if 'length' not in d:
d['length'] = -1
return d
@staticmethod
def _validate_content(d):
"""Sanity checks on status / reason / length, that postgresql
doesn't enforce."""
if d['status'] not in ('visible', 'absent', 'hidden'):
raise ValueError('Invalid content status: {}'.format(d['status']))
if d['status'] != 'absent' and d.get('reason') is not None:
raise ValueError(
'Must not provide a reason if content is not absent.')
if d['length'] < -1:
raise ValueError('Content length must be positive or -1.')
def _filter_new_content(self, content, db=None, cur=None):
"""Sort contents into buckets 'with data' and 'without data',
and filter out those already in the database."""
content_by_status = defaultdict(list)
for d in content:
content_by_status[d['status']].append(d)
content_with_data = content_by_status['visible'] \
+ content_by_status['hidden']
content_without_data = content_by_status['absent']
missing_content = set(self.content_missing(content_with_data,
db=db, cur=cur))
missing_skipped = set(self._content_unique_key(hashes, db)
for hashes in self.skipped_content_missing(
content_without_data, db=db, cur=cur))
content_with_data = [
cont for cont in content_with_data
if cont['sha1'] in missing_content]
content_without_data = [
cont for cont in content_without_data
if self._content_unique_key(cont, db) in missing_skipped]
summary = {
'content:add': len(missing_content),
'skipped_content:add': len(missing_skipped),
}
return (content_with_data, content_without_data, summary)
def _content_add_metadata(self, db, cur,
content_with_data, content_without_data):
"""Add content to the postgresql database but not the object storage.
"""
if content_with_data:
# create temporary table for metadata injection
db.mktemp('content', cur)
db.copy_to(content_with_data, 'tmp_content',
db.content_add_keys, cur)
# move metadata in place
try:
db.content_add_from_temp(cur)
except psycopg2.IntegrityError as e:
from . import HashCollision
if e.diag.sqlstate == '23505' and \
e.diag.table_name == 'content':
constraint_to_hash_name = {
'content_pkey': 'sha1',
'content_sha1_git_idx': 'sha1_git',
'content_sha256_idx': 'sha256',
}
colliding_hash_name = constraint_to_hash_name \
.get(e.diag.constraint_name)
raise HashCollision(colliding_hash_name) from None
else:
raise
if content_without_data:
content_without_data = \
[cont.copy() for cont in content_without_data]
origin_ids = db.origin_id_get_by_url(
[cont.get('origin') for cont in content_without_data],
cur=cur)
for (cont, origin_id) in zip(content_without_data, origin_ids):
if 'origin' in cont:
cont['origin'] = origin_id
db.mktemp('skipped_content', cur)
db.copy_to(content_without_data, 'tmp_skipped_content',
db.skipped_content_keys, cur)
# move metadata in place
db.skipped_content_add_from_temp(cur)
- @remote_api_endpoint('content/add')
@timed
@process_metrics
@db_transaction()
def content_add(self, content, db=None, cur=None):
- """Add content blobs to the storage
-
- Note: in case of DB errors, objects might have already been added to
- the object storage and will not be removed. Since addition to the
- object storage is idempotent, that should not be a problem.
-
- Args:
- contents (iterable): iterable of dictionaries representing
- individual pieces of content to add. Each dictionary has the
- following keys:
-
- - data (bytes): the actual content
- - length (int): content length (default: -1)
- - one key for each checksum algorithm in
- :data:`swh.model.hashutil.ALGORITHMS`, mapped to the
- corresponding checksum
- - status (str): one of visible, hidden, absent
- - reason (str): if status = absent, the reason why
- - origin (int): if status = absent, the origin we saw the
- content in
-
- Raises:
-
- In case of errors, nothing is stored in the db (in the
- objstorage, it could though). The following exceptions can
- occur:
-
- - HashCollision in case of collision
- - Any other exceptions raise by the db
-
- Returns:
- Summary dict with the following key and associated values:
-
- content:add: New contents added
- content:add:bytes: Sum of the contents' length data
- skipped_content:add: New skipped contents (no data) added
- """
content = [dict(c.items()) for c in content] # semi-shallow copy
now = datetime.datetime.now(tz=datetime.timezone.utc)
for item in content:
item['ctime'] = now
content = [self._normalize_content(c) for c in content]
for c in content:
self._validate_content(c)
(content_with_data, content_without_data, summary) = \
self._filter_new_content(content, db, cur)
if self.journal_writer:
for item in content_with_data:
if 'data' in item:
item = item.copy()
del item['data']
self.journal_writer.write_addition('content', item)
for item in content_without_data:
self.journal_writer.write_addition('content', item)
def add_to_objstorage():
"""Add to objstorage the new missing_content
Returns:
Sum of all the content's data length pushed to the
objstorage. Content present twice is only sent once.
"""
content_bytes_added = 0
data = {}
for cont in content_with_data:
if cont['sha1'] not in data:
data[cont['sha1']] = cont['data']
content_bytes_added += max(0, cont['length'])
# FIXME: Since we do the filtering anyway now, we might as
# well make the objstorage's add_batch call return what we
# want here (real bytes added)... that'd simplify this...
self.objstorage.add_batch(data)
return content_bytes_added
with ThreadPoolExecutor(max_workers=1) as executor:
added_to_objstorage = executor.submit(add_to_objstorage)
self._content_add_metadata(
db, cur, content_with_data, content_without_data)
# Wait for objstorage addition before returning from the
# transaction, bubbling up any exception
content_bytes_added = added_to_objstorage.result()
summary['content:add:bytes'] = content_bytes_added
return summary
- @remote_api_endpoint('content/update')
@timed
@db_transaction()
def content_update(self, content, keys=[], db=None, cur=None):
- """Update content blobs to the storage. Does nothing for unknown
- contents or skipped ones.
-
- Args:
- content (iterable): iterable of dictionaries representing
- individual pieces of content to update. Each dictionary has the
- following keys:
-
- - data (bytes): the actual content
- - length (int): content length (default: -1)
- - one key for each checksum algorithm in
- :data:`swh.model.hashutil.ALGORITHMS`, mapped to the
- corresponding checksum
- - status (str): one of visible, hidden, absent
-
- keys (list): List of keys (str) whose values needs an update, e.g.,
- new hash column
-
- """
# TODO: Add a check on input keys. How to properly implement
# this? We don't know yet the new columns.
if self.journal_writer:
raise NotImplementedError(
- 'content_update is not yet support with a journal_writer.')
+ 'content_update is not yet supported with a journal_writer.')
db.mktemp('content', cur)
select_keys = list(set(db.content_get_metadata_keys).union(set(keys)))
db.copy_to(content, 'tmp_content', select_keys, cur)
db.content_update_from_temp(keys_to_update=keys,
cur=cur)
- @remote_api_endpoint('content/add_metadata')
@timed
@process_metrics
@db_transaction()
def content_add_metadata(self, content, db=None, cur=None):
- """Add content metadata to the storage (like `content_add`, but
- without inserting to the objstorage).
-
- Args:
- content (iterable): iterable of dictionaries representing
- individual pieces of content to add. Each dictionary has the
- following keys:
-
- - length (int): content length (default: -1)
- - one key for each checksum algorithm in
- :data:`swh.model.hashutil.ALGORITHMS`, mapped to the
- corresponding checksum
- - status (str): one of visible, hidden, absent
- - reason (str): if status = absent, the reason why
- - origin (int): if status = absent, the origin we saw the
- content in
- - ctime (datetime): time of insertion in the archive
-
- Returns:
- Summary dict with the following key and associated values:
-
- content:add: New contents added
- skipped_content:add: New skipped contents (no data) added
- """
-
content = [self._normalize_content(c) for c in content]
for c in content:
self._validate_content(c)
(content_with_data, content_without_data, summary) = \
self._filter_new_content(content, db, cur)
if self.journal_writer:
for item in itertools.chain(content_with_data,
content_without_data):
assert 'data' not in content
self.journal_writer.write_addition('content', item)
self._content_add_metadata(
db, cur, content_with_data, content_without_data)
return summary
- @remote_api_endpoint('content/data')
@timed
def content_get(self, content):
- """Retrieve in bulk contents and their data.
-
- This generator yields exactly as many items than provided sha1
- identifiers, but callers should not assume this will always be true.
-
- It may also yield `None` values in case an object was not found.
-
- Args:
- content: iterables of sha1
-
- Yields:
- Dict[str, bytes]: Generates streams of contents as dict with their
- raw data:
-
- - sha1 (bytes): content id
- - data (bytes): content's raw data
-
- Raises:
- ValueError in case of too much contents are required.
- cf. BULK_BLOCK_CONTENT_LEN_MAX
-
- """
# FIXME: Make this method support slicing the `data`.
if len(content) > BULK_BLOCK_CONTENT_LEN_MAX:
raise ValueError(
"Send at maximum %s contents." % BULK_BLOCK_CONTENT_LEN_MAX)
for obj_id in content:
try:
data = self.objstorage.get(obj_id)
except ObjNotFoundError:
yield None
continue
yield {'sha1': obj_id, 'data': data}
- @remote_api_endpoint('content/range')
@timed
@db_transaction()
def content_get_range(self, start, end, limit=1000, db=None, cur=None):
- """Retrieve contents within range [start, end] bound by limit.
-
- Note that this function may return more than one blob per hash. The
- limit is enforced with multiplicity (ie. two blobs with the same hash
- will count twice toward the limit).
-
- Args:
- **start** (bytes): Starting identifier range (expected smaller
- than end)
- **end** (bytes): Ending identifier range (expected larger
- than start)
- **limit** (int): Limit result (default to 1000)
-
- Returns:
- a dict with keys:
- - contents [dict]: iterable of contents in between the range.
- - next (bytes): There remains content in the range
- starting from this next sha1
-
- """
if limit is None:
raise ValueError('Development error: limit should not be None')
contents = []
next_content = None
for counter, content_row in enumerate(
db.content_get_range(start, end, limit+1, cur)):
content = dict(zip(db.content_get_metadata_keys, content_row))
if counter >= limit:
# take the last commit for the next page starting from this
next_content = content['sha1']
break
contents.append(content)
return {
'contents': contents,
'next': next_content,
}
- @remote_api_endpoint('content/partition')
@timed
@db_transaction()
def content_get_partition(
self, partition_id: int, nb_partitions: int, limit: int = 1000,
page_token: str = None, db=None, cur=None):
- """Splits contents into nb_partitions, and returns one of these based on
- partition_id (which must be in [0, nb_partitions-1])
-
- There is no guarantee on how the partitioning is done, or the
- result order.
-
- Args:
- partition_id (int): index of the partition to fetch
- nb_partitions (int): total number of partitions to split into
- limit (int): Limit result (default to 1000)
- page_token (Optional[str]): opaque token used for pagination.
-
- Returns:
- a dict with keys:
- - contents (List[dict]): iterable of contents in the partition.
- - **next_page_token** (Optional[str]): opaque token to be used as
- `page_token` for retrieving the next page. if absent, there is
- no more pages to gather.
- """
if limit is None:
raise ValueError('Development error: limit should not be None')
(start, end) = get_partition_bounds_bytes(
partition_id, nb_partitions, SHA1_SIZE)
if page_token:
start = hash_to_bytes(page_token)
if end is None:
end = b'\xff'*SHA1_SIZE
result = self.content_get_range(start, end, limit)
result2 = {
'contents': result['contents'],
'next_page_token': None,
}
if result['next']:
result2['next_page_token'] = hash_to_hex(result['next'])
return result2
- @remote_api_endpoint('content/metadata')
@timed
@db_transaction(statement_timeout=500)
def content_get_metadata(
self, contents: List[bytes],
db=None, cur=None) -> Dict[bytes, List[Dict]]:
- """Retrieve content metadata in bulk
-
- Args:
- content: iterable of content identifiers (sha1)
-
- Returns:
- a dict with keys the content's sha1 and the associated value
- either the existing content's metadata or None if the content does
- not exist.
-
- """
result: Dict[bytes, List[Dict]] = {sha1: [] for sha1 in contents}
for row in db.content_get_metadata_from_sha1s(contents, cur):
content_meta = dict(zip(db.content_get_metadata_keys, row))
result[content_meta['sha1']].append(content_meta)
return result
- @remote_api_endpoint('content/missing')
@timed
@db_transaction_generator()
def content_missing(self, content, key_hash='sha1', db=None, cur=None):
- """List content missing from storage
-
- Args:
- content ([dict]): iterable of dictionaries whose keys are
- either 'length' or an item of
- :data:`swh.model.hashutil.ALGORITHMS`;
- mapped to the corresponding checksum
- (or length).
-
- key_hash (str): name of the column to use as hash id
- result (default: 'sha1')
-
- Returns:
- iterable ([bytes]): missing content ids (as per the
- key_hash column)
-
- Raises:
- TODO: an exception when we get a hash collision.
-
- """
keys = db.content_hash_keys
if key_hash not in keys:
raise ValueError("key_hash should be one of %s" % keys)
key_hash_idx = keys.index(key_hash)
if not content:
return
for obj in db.content_missing_from_list(content, cur):
yield obj[key_hash_idx]
- @remote_api_endpoint('content/missing/sha1')
@timed
@db_transaction_generator()
def content_missing_per_sha1(self, contents, db=None, cur=None):
- """List content missing from storage based only on sha1.
-
- Args:
- contents: Iterable of sha1 to check for absence.
-
- Returns:
- iterable: missing ids
-
- Raises:
- TODO: an exception when we get a hash collision.
-
- """
for obj in db.content_missing_per_sha1(contents, cur):
yield obj[0]
- @remote_api_endpoint('content/missing/sha1_git')
@timed
@db_transaction_generator()
def content_missing_per_sha1_git(self, contents, db=None, cur=None):
- """List content missing from storage based only on sha1_git.
-
- Args:
- contents (Iterable): An iterable of content id (sha1_git)
-
- Yields:
- missing contents sha1_git
- """
for obj in db.content_missing_per_sha1_git(contents, cur):
yield obj[0]
- @remote_api_endpoint('content/skipped/missing')
@timed
@db_transaction_generator()
def skipped_content_missing(self, contents, db=None, cur=None):
- """List skipped_content missing from storage
-
- Args:
- content: iterable of dictionaries containing the data for each
- checksum algorithm.
-
- Returns:
- iterable: missing signatures
-
- """
for content in db.skipped_content_missing(contents, cur):
yield dict(zip(db.content_hash_keys, content))
- @remote_api_endpoint('content/present')
@timed
@db_transaction()
def content_find(self, content, db=None, cur=None):
- """Find a content hash in db.
-
- Args:
- content: a dictionary representing one content hash, mapping
- checksum algorithm names (see swh.model.hashutil.ALGORITHMS) to
- checksum values
-
- Returns:
- a triplet (sha1, sha1_git, sha256) if the content exist
- or None otherwise.
-
- Raises:
- ValueError: in case the key of the dictionary is not sha1, sha1_git
- nor sha256.
-
- """
if not set(content).intersection(ALGORITHMS):
raise ValueError('content keys must contain at least one of: '
'sha1, sha1_git, sha256, blake2s256')
contents = db.content_find(sha1=content.get('sha1'),
sha1_git=content.get('sha1_git'),
sha256=content.get('sha256'),
blake2s256=content.get('blake2s256'),
cur=cur)
return [dict(zip(db.content_find_cols, content))
for content in contents]
- @remote_api_endpoint('content/get_random')
@timed
@db_transaction()
def content_get_random(self, db=None, cur=None):
- """Finds a random content id.
-
- Returns:
- a sha1_git
- """
return db.content_get_random(cur)
- @remote_api_endpoint('directory/add')
@timed
@process_metrics
@db_transaction()
def directory_add(self, directories, db=None, cur=None):
- """Add directories to the storage
-
- Args:
- directories (iterable): iterable of dictionaries representing the
- individual directories to add. Each dict has the following
- keys:
-
- - id (sha1_git): the id of the directory to add
- - entries (list): list of dicts for each entry in the
- directory. Each dict has the following keys:
-
- - name (bytes)
- - type (one of 'file', 'dir', 'rev'): type of the
- directory entry (file, directory, revision)
- - target (sha1_git): id of the object pointed at by the
- directory entry
- - perms (int): entry permissions
-
- Returns:
- Summary dict of keys with associated count as values:
-
- directory:add: Number of directories actually added
-
- """
directories = list(directories)
summary = {'directory:add': 0}
dirs = set()
dir_entries = {
'file': defaultdict(list),
'dir': defaultdict(list),
'rev': defaultdict(list),
}
for cur_dir in directories:
dir_id = cur_dir['id']
dirs.add(dir_id)
for src_entry in cur_dir['entries']:
entry = src_entry.copy()
entry['dir_id'] = dir_id
if entry['type'] not in ('file', 'dir', 'rev'):
raise ValueError(
'Entry type must be file, dir, or rev; not %s'
% entry['type'])
dir_entries[entry['type']][dir_id].append(entry)
dirs_missing = set(self.directory_missing(dirs, db=db, cur=cur))
if not dirs_missing:
return summary
if self.journal_writer:
self.journal_writer.write_additions(
'directory',
(dir_ for dir_ in directories
if dir_['id'] in dirs_missing))
# Copy directory ids
dirs_missing_dict = ({'id': dir} for dir in dirs_missing)
db.mktemp('directory', cur)
db.copy_to(dirs_missing_dict, 'tmp_directory', ['id'], cur)
# Copy entries
for entry_type, entry_list in dir_entries.items():
entries = itertools.chain.from_iterable(
entries_for_dir
for dir_id, entries_for_dir
in entry_list.items()
if dir_id in dirs_missing)
db.mktemp_dir_entry(entry_type)
db.copy_to(
entries,
'tmp_directory_entry_%s' % entry_type,
['target', 'name', 'perms', 'dir_id'],
cur,
)
# Do the final copy
db.directory_add_from_temp(cur)
summary['directory:add'] = len(dirs_missing)
return summary
- @remote_api_endpoint('directory/missing')
@timed
@db_transaction_generator()
def directory_missing(self, directories, db=None, cur=None):
- """List directories missing from storage
-
- Args:
- directories (iterable): an iterable of directory ids
-
- Yields:
- missing directory ids
-
- """
for obj in db.directory_missing_from_list(directories, cur):
yield obj[0]
- @remote_api_endpoint('directory/ls')
@timed
@db_transaction_generator(statement_timeout=20000)
def directory_ls(self, directory, recursive=False, db=None, cur=None):
- """Get entries for one directory.
-
- Args:
- - directory: the directory to list entries from.
- - recursive: if flag on, this list recursively from this directory.
-
- Returns:
- List of entries for such directory.
-
- If `recursive=True`, names in the path of a dir/file not at the
- root are concatenated with a slash (`/`).
-
- """
if recursive:
res_gen = db.directory_walk(directory, cur=cur)
else:
res_gen = db.directory_walk_one(directory, cur=cur)
for line in res_gen:
yield dict(zip(db.directory_ls_cols, line))
- @remote_api_endpoint('directory/path')
@timed
@db_transaction(statement_timeout=2000)
def directory_entry_get_by_path(self, directory, paths, db=None, cur=None):
- """Get the directory entry (either file or dir) from directory with path.
-
- Args:
- - directory: sha1 of the top level directory
- - paths: path to lookup from the top level directory. From left
- (top) to right (bottom).
-
- Returns:
- The corresponding directory entry if found, None otherwise.
-
- """
res = db.directory_entry_get_by_path(directory, paths, cur)
if res:
return dict(zip(db.directory_ls_cols, res))
- @remote_api_endpoint('directory/get_random')
@timed
@db_transaction()
def directory_get_random(self, db=None, cur=None):
- """Finds a random directory id.
-
- Returns:
- a sha1_git
- """
return db.directory_get_random(cur)
- @remote_api_endpoint('revision/add')
@timed
@process_metrics
@db_transaction()
def revision_add(self, revisions, db=None, cur=None):
- """Add revisions to the storage
-
- Args:
- revisions (Iterable[dict]): iterable of dictionaries representing
- the individual revisions to add. Each dict has the following
- keys:
-
- - **id** (:class:`sha1_git`): id of the revision to add
- - **date** (:class:`dict`): date the revision was written
- - **committer_date** (:class:`dict`): date the revision got
- added to the origin
- - **type** (one of 'git', 'tar'): type of the
- revision added
- - **directory** (:class:`sha1_git`): the directory the
- revision points at
- - **message** (:class:`bytes`): the message associated with
- the revision
- - **author** (:class:`Dict[str, bytes]`): dictionary with
- keys: name, fullname, email
- - **committer** (:class:`Dict[str, bytes]`): dictionary with
- keys: name, fullname, email
- - **metadata** (:class:`jsonb`): extra information as
- dictionary
- - **synthetic** (:class:`bool`): revision's nature (tarball,
- directory creates synthetic revision`)
- - **parents** (:class:`list[sha1_git]`): the parents of
- this revision
-
- date dictionaries have the form defined in :mod:`swh.model`.
-
- Returns:
- Summary dict of keys with associated count as values
-
- revision:add: New objects actually stored in db
-
- """
revisions = list(revisions)
summary = {'revision:add': 0}
revisions_missing = set(self.revision_missing(
set(revision['id'] for revision in revisions),
db=db, cur=cur))
if not revisions_missing:
return summary
db.mktemp_revision(cur)
revisions_filtered = [
revision for revision in revisions
if revision['id'] in revisions_missing]
if self.journal_writer:
self.journal_writer.write_additions('revision', revisions_filtered)
revisions_filtered = map(converters.revision_to_db, revisions_filtered)
parents_filtered = []
db.copy_to(
revisions_filtered, 'tmp_revision', db.revision_add_cols,
cur,
lambda rev: parents_filtered.extend(rev['parents']))
db.revision_add_from_temp(cur)
db.copy_to(parents_filtered, 'revision_history',
['id', 'parent_id', 'parent_rank'], cur)
return {'revision:add': len(revisions_missing)}
- @remote_api_endpoint('revision/missing')
@timed
@db_transaction_generator()
def revision_missing(self, revisions, db=None, cur=None):
- """List revisions missing from storage
-
- Args:
- revisions (iterable): revision ids
-
- Yields:
- missing revision ids
-
- """
if not revisions:
return
for obj in db.revision_missing_from_list(revisions, cur):
yield obj[0]
- @remote_api_endpoint('revision')
@timed
@db_transaction_generator(statement_timeout=1000)
def revision_get(self, revisions, db=None, cur=None):
- """Get all revisions from storage
-
- Args:
- revisions: an iterable of revision ids
-
- Returns:
- iterable: an iterable of revisions as dictionaries (or None if the
- revision doesn't exist)
-
- """
for line in db.revision_get_from_list(revisions, cur):
data = converters.db_to_revision(
dict(zip(db.revision_get_cols, line))
)
if not data['type']:
yield None
continue
yield data
- @remote_api_endpoint('revision/log')
@timed
@db_transaction_generator(statement_timeout=2000)
def revision_log(self, revisions, limit=None, db=None, cur=None):
- """Fetch revision entry from the given root revisions.
-
- Args:
- revisions: array of root revision to lookup
- limit: limitation on the output result. Default to None.
-
- Yields:
- List of revision log from such revisions root.
-
- """
for line in db.revision_log(revisions, limit, cur):
data = converters.db_to_revision(
dict(zip(db.revision_get_cols, line))
)
if not data['type']:
yield None
continue
yield data
- @remote_api_endpoint('revision/shortlog')
@timed
@db_transaction_generator(statement_timeout=2000)
def revision_shortlog(self, revisions, limit=None, db=None, cur=None):
- """Fetch the shortlog for the given revisions
-
- Args:
- revisions: list of root revisions to lookup
- limit: depth limitation for the output
-
- Yields:
- a list of (id, parents) tuples.
-
- """
yield from db.revision_shortlog(revisions, limit, cur)
- @remote_api_endpoint('revision/get_random')
@timed
@db_transaction()
def revision_get_random(self, db=None, cur=None):
- """Finds a random revision id.
-
- Returns:
- a sha1_git
- """
return db.revision_get_random(cur)
- @remote_api_endpoint('release/add')
@timed
@process_metrics
@db_transaction()
def release_add(self, releases, db=None, cur=None):
- """Add releases to the storage
-
- Args:
- releases (Iterable[dict]): iterable of dictionaries representing
- the individual releases to add. Each dict has the following
- keys:
-
- - **id** (:class:`sha1_git`): id of the release to add
- - **revision** (:class:`sha1_git`): id of the revision the
- release points to
- - **date** (:class:`dict`): the date the release was made
- - **name** (:class:`bytes`): the name of the release
- - **comment** (:class:`bytes`): the comment associated with
- the release
- - **author** (:class:`Dict[str, bytes]`): dictionary with
- keys: name, fullname, email
-
- the date dictionary has the form defined in :mod:`swh.model`.
-
- Returns:
- Summary dict of keys with associated count as values
-
- release:add: New objects contents actually stored in db
-
- """
releases = list(releases)
summary = {'release:add': 0}
release_ids = set(release['id'] for release in releases)
releases_missing = set(self.release_missing(release_ids,
db=db, cur=cur))
if not releases_missing:
return summary
db.mktemp_release(cur)
releases_missing = list(releases_missing)
releases_filtered = [
release for release in releases
if release['id'] in releases_missing
]
if self.journal_writer:
self.journal_writer.write_additions('release', releases_filtered)
releases_filtered = map(converters.release_to_db, releases_filtered)
db.copy_to(releases_filtered, 'tmp_release', db.release_add_cols,
cur)
db.release_add_from_temp(cur)
return {'release:add': len(releases_missing)}
- @remote_api_endpoint('release/missing')
@timed
@db_transaction_generator()
def release_missing(self, releases, db=None, cur=None):
- """List releases missing from storage
-
- Args:
- releases: an iterable of release ids
-
- Returns:
- a list of missing release ids
-
- """
if not releases:
return
for obj in db.release_missing_from_list(releases, cur):
yield obj[0]
- @remote_api_endpoint('release')
@timed
@db_transaction_generator(statement_timeout=500)
def release_get(self, releases, db=None, cur=None):
- """Given a list of sha1, return the releases's information
-
- Args:
- releases: list of sha1s
-
- Yields:
- dicts with the same keys as those given to `release_add`
- (or ``None`` if a release does not exist)
-
- """
for release in db.release_get_from_list(releases, cur):
data = converters.db_to_release(
dict(zip(db.release_get_cols, release))
)
yield data if data['target_type'] else None
- @remote_api_endpoint('release/get_random')
@timed
@db_transaction()
def release_get_random(self, db=None, cur=None):
- """Finds a random release id.
-
- Returns:
- a sha1_git
- """
return db.release_get_random(cur)
- @remote_api_endpoint('snapshot/add')
@timed
@process_metrics
@db_transaction()
def snapshot_add(self, snapshots, db=None, cur=None):
- """Add snapshots to the storage.
-
- Args:
- snapshot ([dict]): the snapshots to add, containing the
- following keys:
-
- - **id** (:class:`bytes`): id of the snapshot
- - **branches** (:class:`dict`): branches the snapshot contains,
- mapping the branch name (:class:`bytes`) to the branch target,
- itself a :class:`dict` (or ``None`` if the branch points to an
- unknown object)
-
- - **target_type** (:class:`str`): one of ``content``,
- ``directory``, ``revision``, ``release``,
- ``snapshot``, ``alias``
- - **target** (:class:`bytes`): identifier of the target
- (currently a ``sha1_git`` for all object kinds, or the name
- of the target branch for aliases)
-
- Raises:
- ValueError: if the origin or visit id does not exist.
-
- Returns:
-
- Summary dict of keys with associated count as values
-
- snapshot:add: Count of object actually stored in db
-
- """
created_temp_table = False
count = 0
for snapshot in snapshots:
if not db.snapshot_exists(snapshot['id'], cur):
if not created_temp_table:
db.mktemp_snapshot_branch(cur)
created_temp_table = True
db.copy_to(
(
{
'name': name,
'target': info['target'] if info else None,
'target_type': (info['target_type']
if info else None),
}
for name, info in snapshot['branches'].items()
),
'tmp_snapshot_branch',
['name', 'target', 'target_type'],
cur,
)
if self.journal_writer:
self.journal_writer.write_addition('snapshot', snapshot)
db.snapshot_add(snapshot['id'], cur)
count += 1
return {'snapshot:add': count}
- @remote_api_endpoint('snapshot/missing')
@timed
@db_transaction_generator()
def snapshot_missing(self, snapshots, db=None, cur=None):
- """List snapshots missing from storage
-
- Args:
- snapshots (iterable): an iterable of snapshot ids
-
- Yields:
- missing snapshot ids
-
- """
for obj in db.snapshot_missing_from_list(snapshots, cur):
yield obj[0]
- @remote_api_endpoint('snapshot')
@timed
@db_transaction(statement_timeout=2000)
def snapshot_get(self, snapshot_id, db=None, cur=None):
- """Get the content, possibly partial, of a snapshot with the given id
-
- The branches of the snapshot are iterated in the lexicographical
- order of their names.
-
- .. warning:: At most 1000 branches contained in the snapshot will be
- returned for performance reasons. In order to browse the whole
- set of branches, the method :meth:`snapshot_get_branches`
- should be used instead.
-
- Args:
- snapshot_id (bytes): identifier of the snapshot
- Returns:
- dict: a dict with three keys:
- * **id**: identifier of the snapshot
- * **branches**: a dict of branches contained in the snapshot
- whose keys are the branches' names.
- * **next_branch**: the name of the first branch not returned
- or :const:`None` if the snapshot has less than 1000
- branches.
- """
return self.snapshot_get_branches(snapshot_id, db=db, cur=cur)
- @remote_api_endpoint('snapshot/by_origin_visit')
@timed
@db_transaction(statement_timeout=2000)
def snapshot_get_by_origin_visit(self, origin, visit, db=None, cur=None):
- """Get the content, possibly partial, of a snapshot for the given origin visit
-
- The branches of the snapshot are iterated in the lexicographical
- order of their names.
-
- .. warning:: At most 1000 branches contained in the snapshot will be
- returned for performance reasons. In order to browse the whole
- set of branches, the method :meth:`snapshot_get_branches`
- should be used instead.
-
- Args:
- origin (int): the origin identifier
- visit (int): the visit identifier
- Returns:
- dict: None if the snapshot does not exist;
- a dict with three keys otherwise:
- * **id**: identifier of the snapshot
- * **branches**: a dict of branches contained in the snapshot
- whose keys are the branches' names.
- * **next_branch**: the name of the first branch not returned
- or :const:`None` if the snapshot has less than 1000
- branches.
-
- """
snapshot_id = db.snapshot_get_by_origin_visit(origin, visit, cur)
if snapshot_id:
return self.snapshot_get(snapshot_id, db=db, cur=cur)
return None
- @remote_api_endpoint('snapshot/latest')
@timed
@db_transaction(statement_timeout=4000)
def snapshot_get_latest(self, origin, allowed_statuses=None, db=None,
cur=None):
- """Get the content, possibly partial, of the latest snapshot for the
- given origin, optionally only from visits that have one of the given
- allowed_statuses
-
- The branches of the snapshot are iterated in the lexicographical
- order of their names.
-
- .. warning:: At most 1000 branches contained in the snapshot will be
- returned for performance reasons. In order to browse the whole
- set of branches, the method :meth:`snapshot_get_branches`
- should be used instead.
-
- Args:
- origin (str): the origin's URL
- allowed_statuses (list of str): list of visit statuses considered
- to find the latest snapshot for the visit. For instance,
- ``allowed_statuses=['full']`` will only consider visits that
- have successfully run to completion.
- Returns:
- dict: a dict with three keys:
- * **id**: identifier of the snapshot
- * **branches**: a dict of branches contained in the snapshot
- whose keys are the branches' names.
- * **next_branch**: the name of the first branch not returned
- or :const:`None` if the snapshot has less than 1000
- branches.
- """
if isinstance(origin, int):
origin = self.origin_get({'id': origin}, db=db, cur=cur)
if not origin:
return
origin = origin['url']
origin_visit = self.origin_visit_get_latest(
origin, allowed_statuses=allowed_statuses, require_snapshot=True,
db=db, cur=cur)
if origin_visit and origin_visit['snapshot']:
snapshot = self.snapshot_get(
origin_visit['snapshot'], db=db, cur=cur)
if not snapshot:
raise ValueError(
'last origin visit references an unknown snapshot')
return snapshot
- @remote_api_endpoint('snapshot/count_branches')
@timed
@db_transaction(statement_timeout=2000)
def snapshot_count_branches(self, snapshot_id, db=None, cur=None):
- """Count the number of branches in the snapshot with the given id
-
- Args:
- snapshot_id (bytes): identifier of the snapshot
-
- Returns:
- dict: A dict whose keys are the target types of branches and
- values their corresponding amount
- """
return dict([bc for bc in
db.snapshot_count_branches(snapshot_id, cur)])
- @remote_api_endpoint('snapshot/get_branches')
@timed
@db_transaction(statement_timeout=2000)
def snapshot_get_branches(self, snapshot_id, branches_from=b'',
branches_count=1000, target_types=None,
db=None, cur=None):
- """Get the content, possibly partial, of a snapshot with the given id
-
- The branches of the snapshot are iterated in the lexicographical
- order of their names.
-
- Args:
- snapshot_id (bytes): identifier of the snapshot
- branches_from (bytes): optional parameter used to skip branches
- whose name is lesser than it before returning them
- branches_count (int): optional parameter used to restrain
- the amount of returned branches
- target_types (list): optional parameter used to filter the
- target types of branch to return (possible values that can be
- contained in that list are `'content', 'directory',
- 'revision', 'release', 'snapshot', 'alias'`)
- Returns:
- dict: None if the snapshot does not exist;
- a dict with three keys otherwise:
- * **id**: identifier of the snapshot
- * **branches**: a dict of branches contained in the snapshot
- whose keys are the branches' names.
- * **next_branch**: the name of the first branch not returned
- or :const:`None` if the snapshot has less than
- `branches_count` branches after `branches_from` included.
- """
if snapshot_id == EMPTY_SNAPSHOT_ID:
return {
'id': snapshot_id,
'branches': {},
'next_branch': None,
}
branches = {}
next_branch = None
fetched_branches = list(db.snapshot_get_by_id(
snapshot_id, branches_from=branches_from,
branches_count=branches_count+1, target_types=target_types,
cur=cur,
))
for branch in fetched_branches[:branches_count]:
branch = dict(zip(db.snapshot_get_cols, branch))
del branch['snapshot_id']
name = branch.pop('name')
if branch == {'target': None, 'target_type': None}:
branch = None
branches[name] = branch
if len(fetched_branches) > branches_count:
branch = dict(zip(db.snapshot_get_cols, fetched_branches[-1]))
next_branch = branch['name']
if branches:
return {
'id': snapshot_id,
'branches': branches,
'next_branch': next_branch,
}
return None
- @remote_api_endpoint('snapshot/get_random')
@timed
@db_transaction()
def snapshot_get_random(self, db=None, cur=None):
- """Finds a random snapshot id.
-
- Returns:
- a sha1_git
- """
return db.snapshot_get_random(cur)
- @remote_api_endpoint('origin/visit/add')
@timed
@db_transaction()
def origin_visit_add(self, origin, date, type,
db=None, cur=None):
- """Add an origin_visit for the origin at ts with status 'ongoing'.
-
- Args:
- origin (str): visited origin's identifier or URL
- date (Union[str,datetime]): timestamp of such visit
- type (str): the type of loader used for the visit (hg, git, ...)
-
- Returns:
- dict: dictionary with keys origin and visit where:
-
- - origin: origin identifier
- - visit: the visit identifier for the new visit occurrence
-
- """
origin_url = origin
if isinstance(date, str):
# FIXME: Converge on iso8601 at some point
date = dateutil.parser.parse(date)
visit_id = db.origin_visit_add(origin_url, date, type, cur)
if self.journal_writer:
# We can write to the journal only after inserting to the
# DB, because we want the id of the visit
self.journal_writer.write_addition('origin_visit', {
'origin': origin_url, 'date': date, 'type': type,
'visit': visit_id,
'status': 'ongoing', 'metadata': None, 'snapshot': None})
send_metric('origin_visit:add', count=1, method_name='origin_visit')
return {
'origin': origin_url,
'visit': visit_id,
}
- @remote_api_endpoint('origin/visit/update')
@timed
@db_transaction()
def origin_visit_update(self, origin, visit_id, status=None,
metadata=None, snapshot=None,
db=None, cur=None):
- """Update an origin_visit's status.
-
- Args:
- origin (str): visited origin's URL
- visit_id: Visit's id
- status: Visit's new status
- metadata: Data associated to the visit
- snapshot (sha1_git): identifier of the snapshot to add to
- the visit
-
- Returns:
- None
-
- """
if not isinstance(origin, str):
raise TypeError('origin must be a string, not %r' % (origin,))
origin_url = origin
visit = db.origin_visit_get(origin_url, visit_id, cur=cur)
if not visit:
raise ValueError('Invalid visit_id for this origin.')
visit = dict(zip(db.origin_visit_get_cols, visit))
updates = {}
if status and status != visit['status']:
updates['status'] = status
if metadata and metadata != visit['metadata']:
updates['metadata'] = metadata
if snapshot and snapshot != visit['snapshot']:
updates['snapshot'] = snapshot
if updates:
if self.journal_writer:
self.journal_writer.write_update('origin_visit', {
**visit, **updates})
db.origin_visit_update(origin_url, visit_id, updates, cur)
- @remote_api_endpoint('origin/visit/upsert')
@timed
@db_transaction()
def origin_visit_upsert(self, visits, db=None, cur=None):
- """Add a origin_visits with a specific id and with all its data.
- If there is already an origin_visit with the same
- `(origin_id, visit_id)`, overwrites it.
-
- Args:
- visits: iterable of dicts with keys:
-
- - **origin**: dict with keys either `id` or `url`
- - **visit**: origin visit id
- - **date**: timestamp of such visit
- - **status**: Visit's new status
- - **metadata**: Data associated to the visit
- - **snapshot**: identifier of the snapshot to add to
- the visit
- """
visits = copy.deepcopy(visits)
for visit in visits:
if isinstance(visit['date'], str):
visit['date'] = dateutil.parser.parse(visit['date'])
if not isinstance(visit['origin'], str):
raise TypeError("visit['origin'] must be a string, not %r"
% (visit['origin'],))
if self.journal_writer:
for visit in visits:
self.journal_writer.write_addition('origin_visit', visit)
for visit in visits:
# TODO: upsert them all in a single query
db.origin_visit_upsert(**visit, cur=cur)
- @remote_api_endpoint('origin/visit/get')
@timed
@db_transaction_generator(statement_timeout=500)
def origin_visit_get(self, origin, last_visit=None, limit=None, db=None,
cur=None):
- """Retrieve all the origin's visit's information.
-
- Args:
- origin (str): The visited origin
- last_visit: Starting point from which listing the next visits
- Default to None
- limit (int): Number of results to return from the last visit.
- Default to None
-
- Yields:
- List of visits.
-
- """
for line in db.origin_visit_get_all(
origin, last_visit=last_visit, limit=limit, cur=cur):
data = dict(zip(db.origin_visit_get_cols, line))
yield data
- @remote_api_endpoint('origin/visit/find_by_date')
@timed
@db_transaction(statement_timeout=500)
def origin_visit_find_by_date(self, origin, visit_date, db=None, cur=None):
- """Retrieves the origin visit whose date is closest to the provided
- timestamp.
- In case of a tie, the visit with largest id is selected.
-
- Args:
- origin (str): The occurrence's origin (URL).
- target (datetime): target timestamp
-
- Returns:
- A visit.
-
- """
line = db.origin_visit_find_by_date(origin, visit_date, cur=cur)
if line:
return dict(zip(db.origin_visit_get_cols, line))
- @remote_api_endpoint('origin/visit/getby')
@timed
@db_transaction(statement_timeout=500)
def origin_visit_get_by(self, origin, visit, db=None, cur=None):
- """Retrieve origin visit's information.
-
- Args:
- origin: The occurrence's origin (identifier).
-
- Returns:
- The information on that particular (origin, visit) or None if
- it does not exist
-
- """
ori_visit = db.origin_visit_get(origin, visit, cur)
if not ori_visit:
return None
return dict(zip(db.origin_visit_get_cols, ori_visit))
- @remote_api_endpoint('origin/visit/get_latest')
@timed
@db_transaction(statement_timeout=4000)
def origin_visit_get_latest(
self, origin, allowed_statuses=None, require_snapshot=False,
db=None, cur=None):
- """Get the latest origin visit for the given origin, optionally
- looking only for those with one of the given allowed_statuses
- or for those with a known snapshot.
-
- Args:
- origin (str): the origin's URL
- allowed_statuses (list of str): list of visit statuses considered
- to find the latest visit. For instance,
- ``allowed_statuses=['full']`` will only consider visits that
- have successfully run to completion.
- require_snapshot (bool): If True, only a visit with a snapshot
- will be returned.
-
- Returns:
- dict: a dict with the following keys:
-
- - **origin**: the URL of the origin
- - **visit**: origin visit id
- - **type**: type of loader used for the visit
- - **date**: timestamp of such visit
- - **status**: Visit's new status
- - **metadata**: Data associated to the visit
- - **snapshot** (Optional[sha1_git]): identifier of the snapshot
- associated to the visit
- """
origin_visit = db.origin_visit_get_latest(
origin, allowed_statuses=allowed_statuses,
require_snapshot=require_snapshot, cur=cur)
if origin_visit:
return dict(zip(db.origin_visit_get_cols, origin_visit))
- @remote_api_endpoint('origin/visit/get_random')
@timed
@db_transaction()
def origin_visit_get_random(
self, type: str, db=None, cur=None) -> Optional[Dict[str, Any]]:
- """Randomly select one successful origin visit with
- made in the last 3 months.
-
- Returns:
- dict representing an origin visit, in the same format as
- :py:meth:`origin_visit_get`.
-
- """
result = db.origin_visit_get_random(type, cur)
if result:
return dict(zip(db.origin_visit_get_cols, result))
else:
return None
- @remote_api_endpoint('object/find_by_sha1_git')
@timed
@db_transaction(statement_timeout=2000)
def object_find_by_sha1_git(self, ids, db=None, cur=None):
- """Return the objects found with the given ids.
-
- Args:
- ids: a generator of sha1_gits
-
- Returns:
- dict: a mapping from id to the list of objects found. Each object
- found is itself a dict with keys:
-
- - sha1_git: the input id
- - type: the type of object found
-
- """
ret = {id: [] for id in ids}
for retval in db.object_find_by_sha1_git(ids, cur=cur):
if retval[1]:
ret[retval[0]].append(dict(zip(db.object_find_by_sha1_git_cols,
retval)))
return ret
- @remote_api_endpoint('origin/get')
@timed
@db_transaction(statement_timeout=500)
def origin_get(self, origins, db=None, cur=None):
- """Return origins, either all identified by their ids or all
- identified by tuples (type, url).
-
- If the url is given and the type is omitted, one of the origins with
- that url is returned.
-
- Args:
- origin: a list of dictionaries representing the individual
- origins to find.
- These dicts have the key url:
-
- - url (bytes): the url the origin points to
-
- Returns:
- dict: the origin dictionary with the keys:
-
- - id: origin's id
- - url: origin's url
-
- Raises:
- ValueError: if the url or the id don't exist.
-
- """
if isinstance(origins, dict):
# Old API
return_single = True
origins = [origins]
elif len(origins) == 0:
return []
else:
return_single = False
origin_urls = [origin['url'] for origin in origins]
results = db.origin_get_by_url(origin_urls, cur)
results = [dict(zip(db.origin_cols, result))
for result in results]
if return_single:
assert len(results) == 1
if results[0]['url'] is not None:
return results[0]
else:
return None
else:
return [None if res['url'] is None else res for res in results]
- @remote_api_endpoint('origin/get_sha1')
@timed
@db_transaction_generator(statement_timeout=500)
def origin_get_by_sha1(self, sha1s, db=None, cur=None):
- """Return origins, identified by the sha1 of their URLs.
-
- Args:
- sha1s (list[bytes]): a list of sha1s
-
- Yields:
- dicts containing origin information as returned
- by :meth:`swh.storage.storage.Storage.origin_get`, or None if an
- origin matching the sha1 is not found.
-
- """
for line in db.origin_get_by_sha1(sha1s, cur):
if line[0] is not None:
yield dict(zip(db.origin_cols, line))
else:
yield None
- @remote_api_endpoint('origin/get_range')
@timed
@db_transaction_generator()
def origin_get_range(self, origin_from=1, origin_count=100,
db=None, cur=None):
- """Retrieve ``origin_count`` origins whose ids are greater
- or equal than ``origin_from``.
-
- Origins are sorted by id before retrieving them.
-
- Args:
- origin_from (int): the minimum id of origins to retrieve
- origin_count (int): the maximum number of origins to retrieve
-
- Yields:
- dicts containing origin information as returned
- by :meth:`swh.storage.storage.Storage.origin_get`.
- """
for origin in db.origin_get_range(origin_from, origin_count, cur):
yield dict(zip(db.origin_get_range_cols, origin))
- @remote_api_endpoint('origin/list')
@timed
@db_transaction()
def origin_list(self, page_token: Optional[str] = None, limit: int = 100,
*, db=None, cur=None) -> dict:
- """Returns the list of origins
-
- Args:
- page_token: opaque token used for pagination.
- limit: the maximum number of results to return
-
- Returns:
- dict: dict with the following keys:
- - **next_page_token** (str, optional): opaque token to be used as
- `page_token` for retrieving the next page. if absent, there is
- no more pages to gather.
- - **origins** (List[dict]): list of origins, as returned by
- `origin_get`.
- """
page_token = page_token or '0'
if not isinstance(page_token, str):
raise TypeError('page_token must be a string.')
origin_from = int(page_token)
result: Dict[str, Any] = {
'origins': [
dict(zip(db.origin_get_range_cols, origin))
for origin in db.origin_get_range(origin_from, limit, cur)
],
}
assert len(result['origins']) <= limit
if len(result['origins']) == limit:
result['next_page_token'] = str(result['origins'][limit-1]['id']+1)
for origin in result['origins']:
del origin['id']
return result
- @remote_api_endpoint('origin/search')
@timed
@db_transaction_generator()
def origin_search(self, url_pattern, offset=0, limit=50,
regexp=False, with_visit=False, db=None, cur=None):
- """Search for origins whose urls contain a provided string pattern
- or match a provided regular expression.
- The search is performed in a case insensitive way.
-
- Args:
- url_pattern (str): the string pattern to search for in origin urls
- offset (int): number of found origins to skip before returning
- results
- limit (int): the maximum number of found origins to return
- regexp (bool): if True, consider the provided pattern as a regular
- expression and return origins whose urls match it
- with_visit (bool): if True, filter out origins with no visit
-
- Yields:
- dicts containing origin information as returned
- by :meth:`swh.storage.storage.Storage.origin_get`.
- """
for origin in db.origin_search(url_pattern, offset, limit,
regexp, with_visit, cur):
yield dict(zip(db.origin_cols, origin))
- @remote_api_endpoint('origin/count')
@timed
@db_transaction()
def origin_count(self, url_pattern, regexp=False,
with_visit=False, db=None, cur=None):
- """Count origins whose urls contain a provided string pattern
- or match a provided regular expression.
- The pattern search in origin urls is performed in a case insensitive
- way.
-
- Args:
- url_pattern (str): the string pattern to search for in origin urls
- regexp (bool): if True, consider the provided pattern as a regular
- expression and return origins whose urls match it
- with_visit (bool): if True, filter out origins with no visit
-
- Returns:
- int: The number of origins matching the search criterion.
- """
return db.origin_count(url_pattern, regexp, with_visit, cur)
- @remote_api_endpoint('origin/add_multi')
@timed
@db_transaction()
def origin_add(self, origins, db=None, cur=None):
- """Add origins to the storage
-
- Args:
- origins: list of dictionaries representing the individual origins,
- with the following keys:
-
- - type: the origin type ('git', 'svn', 'deb', ...)
- - url (bytes): the url the origin points to
-
- Returns:
- list: given origins as dict updated with their id
-
- """
origins = copy.deepcopy(list(origins))
for origin in origins:
self.origin_add_one(origin, db=db, cur=cur)
send_metric('origin:add', count=len(origins), method_name='origin_add')
return origins
- @remote_api_endpoint('origin/add')
@timed
@db_transaction()
def origin_add_one(self, origin, db=None, cur=None):
- """Add origin to the storage
-
- Args:
- origin: dictionary representing the individual origin to add. This
- dict has the following keys:
-
- - type (FIXME: enum TBD): the origin type ('git', 'wget', ...)
- - url (bytes): the url the origin points to
-
- Returns:
- the id of the added origin, or of the identical one that already
- exists.
-
- """
origin_row = list(db.origin_get_by_url([origin['url']], cur))[0]
origin_url = dict(zip(db.origin_cols, origin_row))['url']
if origin_url:
return origin_url
if self.journal_writer:
self.journal_writer.write_addition('origin', origin)
origins = db.origin_add(origin['url'], cur)
send_metric('origin:add', count=len(origins), method_name='origin_add')
return origins
@db_transaction(statement_timeout=500)
def stat_counters(self, db=None, cur=None):
- """compute statistics about the number of tuples in various tables
-
- Returns:
- dict: a dictionary mapping textual labels (e.g., content) to
- integer values (e.g., the number of tuples in table content)
-
- """
return {k: v for (k, v) in db.stat_counters()}
@db_transaction()
def refresh_stat_counters(self, db=None, cur=None):
- """Recomputes the statistics for `stat_counters`."""
keys = [
'content',
'directory',
'directory_entry_dir',
'directory_entry_file',
'directory_entry_rev',
'origin',
'origin_visit',
'person',
'release',
'revision',
'revision_history',
'skipped_content',
'snapshot']
for key in keys:
cur.execute('select * from swh_update_counter(%s)', (key,))
- @remote_api_endpoint('origin/metadata/add')
@timed
@db_transaction()
def origin_metadata_add(self, origin_url, ts, provider, tool, metadata,
db=None, cur=None):
- """ Add an origin_metadata for the origin at ts with provenance and
- metadata.
-
- Args:
- origin_url (str): the origin url for which the metadata is added
- ts (datetime): timestamp of the found metadata
- provider (int): the provider of metadata (ex:'hal')
- tool (int): tool used to extract metadata
- metadata (jsonb): the metadata retrieved at the time and location
- """
if isinstance(ts, str):
ts = dateutil.parser.parse(ts)
db.origin_metadata_add(origin_url, ts, provider, tool,
metadata, cur)
send_metric(
'origin_metadata:add', count=1, method_name='origin_metadata_add')
- @remote_api_endpoint('origin/metadata/get')
@timed
@db_transaction_generator(statement_timeout=500)
def origin_metadata_get_by(self, origin_url, provider_type=None, db=None,
cur=None):
- """Retrieve list of all origin_metadata entries for the origin_id
-
- Args:
- origin_url (str): the origin's URL
- provider_type (str): (optional) type of provider
-
- Returns:
- list of dicts: the origin_metadata dictionary with the keys:
-
- - origin_id (int): origin's id
- - discovery_date (datetime): timestamp of discovery
- - tool_id (int): metadata's extracting tool
- - metadata (jsonb)
- - provider_id (int): metadata's provider
- - provider_name (str)
- - provider_type (str)
- - provider_url (str)
-
- """
for line in db.origin_metadata_get_by(origin_url, provider_type, cur):
yield dict(zip(db.origin_metadata_get_cols, line))
- @remote_api_endpoint('tool/add')
@timed
@db_transaction()
def tool_add(self, tools, db=None, cur=None):
- """Add new tools to the storage.
-
- Args:
- tools (iterable of :class:`dict`): Tool information to add to
- storage. Each tool is a :class:`dict` with the following keys:
-
- - name (:class:`str`): name of the tool
- - version (:class:`str`): version of the tool
- - configuration (:class:`dict`): configuration of the tool,
- must be json-encodable
-
- Returns:
- :class:`dict`: All the tools inserted in storage
- (including the internal ``id``). The order of the list is not
- guaranteed to match the order of the initial list.
-
- """
db.mktemp_tool(cur)
db.copy_to(tools, 'tmp_tool',
['name', 'version', 'configuration'],
cur)
tools = db.tool_add_from_temp(cur)
results = [dict(zip(db.tool_cols, line)) for line in tools]
send_metric('tool:add', count=len(results), method_name='tool_add')
return results
- @remote_api_endpoint('tool/data')
@timed
@db_transaction(statement_timeout=500)
def tool_get(self, tool, db=None, cur=None):
- """Retrieve tool information.
-
- Args:
- tool (dict): Tool information we want to retrieve from storage.
- The dicts have the same keys as those used in :func:`tool_add`.
-
- Returns:
- dict: The full tool information if it exists (``id`` included),
- None otherwise.
-
- """
tool_conf = tool['configuration']
if isinstance(tool_conf, dict):
tool_conf = json.dumps(tool_conf)
idx = db.tool_get(tool['name'],
tool['version'],
tool_conf)
if not idx:
return None
return dict(zip(db.tool_cols, idx))
- @remote_api_endpoint('provider/add')
@timed
@db_transaction()
def metadata_provider_add(self, provider_name, provider_type, provider_url,
metadata, db=None, cur=None):
- """Add a metadata provider.
-
- Args:
- provider_name (str): Its name
- provider_type (str): Its type (eg. `'deposit-client'`)
- provider_url (str): Its URL
- metadata: JSON-encodable object
-
- Returns:
- int: an identifier of the provider
- """
result = db.metadata_provider_add(provider_name, provider_type,
provider_url, metadata, cur)
send_metric(
'metadata_provider:add', count=1, method_name='metadata_provider')
return result
- @remote_api_endpoint('provider/get')
@timed
@db_transaction()
def metadata_provider_get(self, provider_id, db=None, cur=None):
- """Get a metadata provider
-
- Args:
- provider_id: Its identifier, as given by `metadata_provider_add`.
-
- Returns:
- dict: same as `metadata_provider_add`;
- or None if it does not exist.
- """
result = db.metadata_provider_get(provider_id)
if not result:
return None
return dict(zip(db.metadata_provider_cols, result))
- @remote_api_endpoint('provider/getby')
@timed
@db_transaction()
def metadata_provider_get_by(self, provider, db=None, cur=None):
- """Get a metadata provider
-
- Args:
- provider (dict): A dictionary with keys:
- * provider_name: Its name
- * provider_url: Its URL
-
- Returns:
- dict: same as `metadata_provider_add`;
- or None if it does not exist.
- """
result = db.metadata_provider_get_by(provider['provider_name'],
provider['provider_url'])
if not result:
return None
return dict(zip(db.metadata_provider_cols, result))
- @remote_api_endpoint('algos/diff_directories')
@timed
def diff_directories(self, from_dir, to_dir, track_renaming=False):
- """Compute the list of file changes introduced between two arbitrary
- directories (insertion / deletion / modification / renaming of files).
-
- Args:
- from_dir (bytes): identifier of the directory to compare from
- to_dir (bytes): identifier of the directory to compare to
- track_renaming (bool): whether or not to track files renaming
-
- Returns:
- A list of dict describing the introduced file changes
- (see :func:`swh.storage.algos.diff.diff_directories`
- for more details).
- """
return diff.diff_directories(self, from_dir, to_dir, track_renaming)
- @remote_api_endpoint('algos/diff_revisions')
@timed
def diff_revisions(self, from_rev, to_rev, track_renaming=False):
- """Compute the list of file changes introduced between two arbitrary
- revisions (insertion / deletion / modification / renaming of files).
-
- Args:
- from_rev (bytes): identifier of the revision to compare from
- to_rev (bytes): identifier of the revision to compare to
- track_renaming (bool): whether or not to track files renaming
-
- Returns:
- A list of dict describing the introduced file changes
- (see :func:`swh.storage.algos.diff.diff_directories`
- for more details).
- """
return diff.diff_revisions(self, from_rev, to_rev, track_renaming)
- @remote_api_endpoint('algos/diff_revision')
@timed
def diff_revision(self, revision, track_renaming=False):
- """Compute the list of file changes introduced by a specific revision
- (insertion / deletion / modification / renaming of files) by comparing
- it against its first parent.
-
- Args:
- revision (bytes): identifier of the revision from which to
- compute the list of files changes
- track_renaming (bool): whether or not to track files renaming
-
- Returns:
- A list of dict describing the introduced file changes
- (see :func:`swh.storage.algos.diff.diff_directories`
- for more details).
- """
return diff.diff_revision(self, revision, track_renaming)
diff --git a/swh/storage/tests/test_storage.py b/swh/storage/tests/test_storage.py
index e7f854ee..6dd7ced9 100644
--- a/swh/storage/tests/test_storage.py
+++ b/swh/storage/tests/test_storage.py
@@ -1,3750 +1,3780 @@
# Copyright (C) 2015-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import copy
from contextlib import contextmanager
import datetime
+import inspect
import itertools
import math
import queue
import random
import threading
from collections import defaultdict
from datetime import timedelta
from unittest.mock import Mock
import psycopg2
import pytest
from hypothesis import given, strategies, settings, HealthCheck
from typing import ClassVar, Optional
from swh.model import from_disk, identifiers
from swh.model.hashutil import hash_to_bytes
from swh.model.hypothesis_strategies import objects
from swh.storage import HashCollision
from swh.storage.converters import origin_url_to_sha1 as sha1
+from swh.storage.interface import StorageInterface
from .storage_data import data
@contextmanager
def db_transaction(storage):
with storage.db() as db:
with db.transaction() as cur:
yield db, cur
def normalize_entity(entity):
entity = copy.deepcopy(entity)
for key in ('date', 'committer_date'):
if key in entity:
entity[key] = identifiers.normalize_timestamp(entity[key])
return entity
def transform_entries(dir_, *, prefix=b''):
for ent in dir_['entries']:
yield {
'dir_id': dir_['id'],
'type': ent['type'],
'target': ent['target'],
'name': prefix + ent['name'],
'perms': ent['perms'],
'status': None,
'sha1': None,
'sha1_git': None,
'sha256': None,
'length': None,
}
def cmpdir(directory):
return (directory['type'], directory['dir_id'])
def short_revision(revision):
return [revision['id'], revision['parents']]
def assert_contents_ok(expected_contents, actual_contents,
keys_to_check={'sha1', 'data'}):
"""Assert that a given list of contents matches on a given set of keys.
"""
for k in keys_to_check:
expected_list = set([c.get(k) for c in expected_contents])
actual_list = set([c.get(k) for c in actual_contents])
assert actual_list == expected_list, k
class TestStorage:
"""Main class for Storage testing.
This class is used as-is to test local storage (see TestLocalStorage
below) and remote storage (see TestRemoteStorage in
test_remote_storage.py.
We need to have the two classes inherit from this base class
separately to avoid nosetests running the tests from the base
class twice.
"""
maxDiff = None # type: ClassVar[Optional[int]]
+ def test_types(self, swh_storage):
+ """Checks all methods of StorageInterface are implemented by this
+ backend, and that they have the same signature."""
+ # Create an instance of the protocol (which cannot be instantiated
+ # directly, so this creates a subclass, then instantiates it)
+ interface = type('_', (StorageInterface,), {})()
+
+ assert 'content_add' in dir(interface)
+
+ missing_methods = []
+
+ for meth_name in dir(interface):
+ if meth_name.startswith('_'):
+ continue
+ interface_meth = getattr(interface, meth_name)
+ try:
+ concrete_meth = getattr(swh_storage, meth_name)
+ except AttributeError:
+ missing_methods.append(meth_name)
+ continue
+
+ expected_signature = inspect.signature(interface_meth)
+ actual_signature = inspect.signature(concrete_meth)
+
+ assert expected_signature == actual_signature, meth_name
+
+ assert missing_methods == []
+
def test_check_config(self, swh_storage):
assert swh_storage.check_config(check_write=True)
assert swh_storage.check_config(check_write=False)
def test_content_add(self, swh_storage):
cont = data.cont
insertion_start_time = datetime.datetime.now(tz=datetime.timezone.utc)
actual_result = swh_storage.content_add([cont])
insertion_end_time = datetime.datetime.now(tz=datetime.timezone.utc)
assert actual_result == {
'content:add': 1,
'content:add:bytes': cont['length'],
'skipped_content:add': 0
}
assert list(swh_storage.content_get([cont['sha1']])) == \
[{'sha1': cont['sha1'], 'data': cont['data']}]
expected_cont = data.cont
del expected_cont['data']
journal_objects = list(swh_storage.journal_writer.objects)
for (obj_type, obj) in journal_objects:
assert insertion_start_time <= obj['ctime']
assert obj['ctime'] <= insertion_end_time
del obj['ctime']
assert journal_objects == [('content', expected_cont)]
swh_storage.refresh_stat_counters()
assert swh_storage.stat_counters()['content'] == 1
def test_content_add_from_generator(self, swh_storage):
def _cnt_gen():
yield data.cont
actual_result = swh_storage.content_add(_cnt_gen())
assert actual_result == {
'content:add': 1,
'content:add:bytes': data.cont['length'],
'skipped_content:add': 0
}
swh_storage.refresh_stat_counters()
assert swh_storage.stat_counters()['content'] == 1
def test_content_add_validation(self, swh_storage):
cont = data.cont
with pytest.raises(ValueError, match='status'):
swh_storage.content_add([{**cont, 'status': 'foobar'}])
with pytest.raises(ValueError, match="(?i)length"):
swh_storage.content_add([{**cont, 'length': -2}])
with pytest.raises((ValueError, psycopg2.IntegrityError),
match='reason') as cm:
swh_storage.content_add([{**cont, 'status': 'absent'}])
if type(cm.value) == psycopg2.IntegrityError:
assert cm.exception.pgcode == \
psycopg2.errorcodes.NOT_NULL_VIOLATION
with pytest.raises(
ValueError,
match="^Must not provide a reason if content is not absent.$"):
swh_storage.content_add([{**cont, 'reason': 'foobar'}])
def test_content_get_missing(self, swh_storage):
cont = data.cont
swh_storage.content_add([cont])
# Query a single missing content
results = list(swh_storage.content_get(
[data.cont2['sha1']]))
assert results == [None]
# Check content_get does not abort after finding a missing content
results = list(swh_storage.content_get(
[data.cont['sha1'], data.cont2['sha1']]))
assert results == [{'sha1': cont['sha1'], 'data': cont['data']}, None]
# Check content_get does not discard found countent when it finds
# a missing content.
results = list(swh_storage.content_get(
[data.cont2['sha1'], data.cont['sha1']]))
assert results == [None, {'sha1': cont['sha1'], 'data': cont['data']}]
def test_content_add_different_input(self, swh_storage):
cont = data.cont
cont2 = data.cont2
actual_result = swh_storage.content_add([cont, cont2])
assert actual_result == {
'content:add': 2,
'content:add:bytes': cont['length'] + cont2['length'],
'skipped_content:add': 0
}
def test_content_add_twice(self, swh_storage):
actual_result = swh_storage.content_add([data.cont])
assert actual_result == {
'content:add': 1,
'content:add:bytes': data.cont['length'],
'skipped_content:add': 0
}
assert len(swh_storage.journal_writer.objects) == 1
actual_result = swh_storage.content_add([data.cont, data.cont2])
assert actual_result == {
'content:add': 1,
'content:add:bytes': data.cont2['length'],
'skipped_content:add': 0
}
assert len(swh_storage.journal_writer.objects) == 2
assert len(swh_storage.content_find(data.cont)) == 1
assert len(swh_storage.content_find(data.cont2)) == 1
def test_content_add_collision(self, swh_storage):
cont1 = data.cont
# create (corrupted) content with same sha1{,_git} but != sha256
cont1b = cont1.copy()
sha256_array = bytearray(cont1b['sha256'])
sha256_array[0] += 1
cont1b['sha256'] = bytes(sha256_array)
with pytest.raises(HashCollision) as cm:
swh_storage.content_add([cont1, cont1b])
assert cm.value.args[0] in ['sha1', 'sha1_git', 'blake2s256']
def test_content_update(self, swh_storage):
swh_storage.journal_writer = None # TODO, not supported
cont = copy.deepcopy(data.cont)
swh_storage.content_add([cont])
# alter the sha1_git for example
cont['sha1_git'] = hash_to_bytes(
'3a60a5275d0333bf13468e8b3dcab90f4046e654')
swh_storage.content_update([cont], keys=['sha1_git'])
results = swh_storage.content_get_metadata([cont['sha1']])
del cont['data']
assert results == {cont['sha1']: [cont]}
def test_content_add_metadata(self, swh_storage):
cont = data.cont
del cont['data']
cont['ctime'] = datetime.datetime.now()
actual_result = swh_storage.content_add_metadata([cont])
assert actual_result == {
'content:add': 1,
'skipped_content:add': 0
}
expected_cont = cont.copy()
del expected_cont['ctime']
assert swh_storage.content_get_metadata([cont['sha1']]) == {
cont['sha1']: [expected_cont]
}
assert list(swh_storage.journal_writer.objects) == [('content', cont)]
def test_content_add_metadata_different_input(self, swh_storage):
cont = data.cont
del cont['data']
cont['ctime'] = datetime.datetime.now()
cont2 = data.cont2
del cont2['data']
cont2['ctime'] = datetime.datetime.now()
actual_result = swh_storage.content_add_metadata([cont, cont2])
assert actual_result == {
'content:add': 2,
'skipped_content:add': 0
}
def test_content_add_metadata_collision(self, swh_storage):
cont1 = data.cont
del cont1['data']
cont1['ctime'] = datetime.datetime.now()
# create (corrupted) content with same sha1{,_git} but != sha256
cont1b = cont1.copy()
sha256_array = bytearray(cont1b['sha256'])
sha256_array[0] += 1
cont1b['sha256'] = bytes(sha256_array)
with pytest.raises(HashCollision) as cm:
swh_storage.content_add_metadata([cont1, cont1b])
assert cm.value.args[0] in ['sha1', 'sha1_git', 'blake2s256']
def test_skipped_content_add(self, swh_storage):
cont = data.skipped_cont
cont2 = data.skipped_cont2
cont2['blake2s256'] = None
missing = list(swh_storage.skipped_content_missing([cont, cont2]))
assert len(missing) == 2
actual_result = swh_storage.content_add([cont, cont, cont2])
assert actual_result == {
'content:add': 0,
'content:add:bytes': 0,
'skipped_content:add': 2,
}
missing = list(swh_storage.skipped_content_missing([cont, cont2]))
assert missing == []
@pytest.mark.property_based
@settings(deadline=None) # this test is very slow
@given(strategies.sets(
elements=strategies.sampled_from(
['sha256', 'sha1_git', 'blake2s256']),
min_size=0))
def test_content_missing(self, swh_storage, algos):
algos |= {'sha1'}
cont2 = data.cont2
missing_cont = data.missing_cont
swh_storage.content_add([cont2])
test_contents = [cont2]
missing_per_hash = defaultdict(list)
for i in range(256):
test_content = missing_cont.copy()
for hash in algos:
test_content[hash] = bytes([i]) + test_content[hash][1:]
missing_per_hash[hash].append(test_content[hash])
test_contents.append(test_content)
assert set(swh_storage.content_missing(test_contents)) == \
set(missing_per_hash['sha1'])
for hash in algos:
assert set(swh_storage.content_missing(
test_contents, key_hash=hash)) == set(missing_per_hash[hash])
@pytest.mark.property_based
@given(strategies.sets(
elements=strategies.sampled_from(
['sha256', 'sha1_git', 'blake2s256']),
min_size=0))
def test_content_missing_unknown_algo(self, swh_storage, algos):
algos |= {'sha1'}
cont2 = data.cont2
missing_cont = data.missing_cont
swh_storage.content_add([cont2])
test_contents = [cont2]
missing_per_hash = defaultdict(list)
for i in range(16):
test_content = missing_cont.copy()
for hash in algos:
test_content[hash] = bytes([i]) + test_content[hash][1:]
missing_per_hash[hash].append(test_content[hash])
test_content['nonexisting_algo'] = b'\x00'
test_contents.append(test_content)
assert set(
swh_storage.content_missing(test_contents)) == set(
missing_per_hash['sha1'])
for hash in algos:
assert set(swh_storage.content_missing(
test_contents, key_hash=hash)) == set(
missing_per_hash[hash])
def test_content_missing_per_sha1(self, swh_storage):
# given
cont2 = data.cont2
missing_cont = data.missing_cont
swh_storage.content_add([cont2])
# when
gen = swh_storage.content_missing_per_sha1([cont2['sha1'],
missing_cont['sha1']])
# then
assert list(gen) == [missing_cont['sha1']]
def test_content_missing_per_sha1_git(self, swh_storage):
cont = data.cont
cont2 = data.cont2
missing_cont = data.missing_cont
swh_storage.content_add([cont, cont2])
contents = [cont['sha1_git'], cont2['sha1_git'],
missing_cont['sha1_git']]
missing_contents = swh_storage.content_missing_per_sha1_git(contents)
assert list(missing_contents) == [missing_cont['sha1_git']]
def test_content_get_partition(self, swh_storage, swh_contents):
"""content_get_partition paginates results if limit exceeded"""
expected_contents = [c for c in swh_contents
if c['status'] != 'absent']
actual_contents = []
for i in range(16):
actual_result = swh_storage.content_get_partition(i, 16)
assert actual_result['next_page_token'] is None
actual_contents.extend(actual_result['contents'])
assert_contents_ok(
expected_contents, actual_contents, ['sha1'])
def test_content_get_partition_full(self, swh_storage, swh_contents):
"""content_get_partition for a single partition returns all available
contents"""
expected_contents = [c for c in swh_contents
if c['status'] != 'absent']
actual_result = swh_storage.content_get_partition(0, 1)
assert actual_result['next_page_token'] is None
actual_contents = actual_result['contents']
assert_contents_ok(
expected_contents, actual_contents, ['sha1'])
def test_content_get_partition_empty(self, swh_storage, swh_contents):
"""content_get_partition when at least one of the partitions is
empty"""
expected_contents = {cont['sha1'] for cont in swh_contents
if cont['status'] != 'absent'}
# nb_partitions = smallest power of 2 such that at least one of
# the partitions is empty
nb_partitions = 1 << math.floor(math.log2(len(swh_contents)) + 1)
seen_sha1s = []
for i in range(nb_partitions):
actual_result = swh_storage.content_get_partition(
i, nb_partitions, limit=len(swh_contents)+1)
for cont in actual_result['contents']:
seen_sha1s.append(cont['sha1'])
# Limit is higher than the max number of results
assert actual_result['next_page_token'] is None
assert set(seen_sha1s) == expected_contents
def test_content_get_partition_limit_none(self, swh_storage):
"""content_get_partition call with wrong limit input should fail"""
with pytest.raises(ValueError) as e:
swh_storage.content_get_partition(1, 16, limit=None)
assert e.value.args == ('Development error: limit should not be None',)
def test_generate_content_get_partition_pagination(
self, swh_storage, swh_contents):
"""content_get_partition returns contents within range provided"""
expected_contents = [c for c in swh_contents
if c['status'] != 'absent']
# retrieve contents
actual_contents = []
for i in range(4):
page_token = None
while True:
actual_result = swh_storage.content_get_partition(
i, 4, limit=3, page_token=page_token)
actual_contents.extend(actual_result['contents'])
page_token = actual_result['next_page_token']
if page_token is None:
break
assert_contents_ok(
expected_contents, actual_contents, ['sha1'])
def test_content_get_metadata(self, swh_storage):
cont1 = data.cont
cont2 = data.cont2
swh_storage.content_add([cont1, cont2])
actual_md = swh_storage.content_get_metadata(
[cont1['sha1'], cont2['sha1']])
# we only retrieve the metadata
cont1.pop('data')
cont2.pop('data')
assert actual_md[cont1['sha1']] == [cont1]
assert actual_md[cont2['sha1']] == [cont2]
assert len(actual_md.keys()) == 2
def test_content_get_metadata_missing_sha1(self, swh_storage):
cont1 = data.cont
cont2 = data.cont2
missing_cont = data.missing_cont
swh_storage.content_add([cont1, cont2])
actual_contents = swh_storage.content_get_metadata(
[missing_cont['sha1']])
assert actual_contents == {missing_cont['sha1']: []}
def test_content_get_random(self, swh_storage):
swh_storage.content_add([data.cont, data.cont2, data.cont3])
assert swh_storage.content_get_random() in {
data.cont['sha1_git'], data.cont2['sha1_git'],
data.cont3['sha1_git']}
def test_directory_add(self, swh_storage):
init_missing = list(swh_storage.directory_missing([data.dir['id']]))
assert [data.dir['id']] == init_missing
actual_result = swh_storage.directory_add([data.dir])
assert actual_result == {'directory:add': 1}
assert list(swh_storage.journal_writer.objects) == \
[('directory', data.dir)]
actual_data = list(swh_storage.directory_ls(data.dir['id']))
expected_data = list(transform_entries(data.dir))
assert sorted(expected_data, key=cmpdir) \
== sorted(actual_data, key=cmpdir)
after_missing = list(swh_storage.directory_missing([data.dir['id']]))
assert after_missing == []
swh_storage.refresh_stat_counters()
assert swh_storage.stat_counters()['directory'] == 1
def test_directory_add_from_generator(self, swh_storage):
def _dir_gen():
yield data.dir
actual_result = swh_storage.directory_add(directories=_dir_gen())
assert actual_result == {'directory:add': 1}
assert list(swh_storage.journal_writer.objects) == \
[('directory', data.dir)]
swh_storage.refresh_stat_counters()
assert swh_storage.stat_counters()['directory'] == 1
def test_directory_add_validation(self, swh_storage):
dir_ = copy.deepcopy(data.dir)
dir_['entries'][0]['type'] = 'foobar'
with pytest.raises(ValueError, match='type.*foobar'):
swh_storage.directory_add([dir_])
dir_ = copy.deepcopy(data.dir)
del dir_['entries'][0]['target']
with pytest.raises((TypeError, psycopg2.IntegrityError),
match='target') as cm:
swh_storage.directory_add([dir_])
if type(cm.value) == psycopg2.IntegrityError:
assert cm.value.pgcode == psycopg2.errorcodes.NOT_NULL_VIOLATION
def test_directory_add_twice(self, swh_storage):
actual_result = swh_storage.directory_add([data.dir])
assert actual_result == {'directory:add': 1}
assert list(swh_storage.journal_writer.objects) \
== [('directory', data.dir)]
actual_result = swh_storage.directory_add([data.dir])
assert actual_result == {'directory:add': 0}
assert list(swh_storage.journal_writer.objects) \
== [('directory', data.dir)]
def test_directory_get_recursive(self, swh_storage):
init_missing = list(swh_storage.directory_missing([data.dir['id']]))
assert init_missing == [data.dir['id']]
actual_result = swh_storage.directory_add(
[data.dir, data.dir2, data.dir3])
assert actual_result == {'directory:add': 3}
assert list(swh_storage.journal_writer.objects) == [
('directory', data.dir),
('directory', data.dir2),
('directory', data.dir3)]
# List directory containing a file and an unknown subdirectory
actual_data = list(swh_storage.directory_ls(
data.dir['id'], recursive=True))
expected_data = list(transform_entries(data.dir))
assert sorted(expected_data, key=cmpdir) \
== sorted(actual_data, key=cmpdir)
# List directory containing a file and an unknown subdirectory
actual_data = list(swh_storage.directory_ls(
data.dir2['id'], recursive=True))
expected_data = list(transform_entries(data.dir2))
assert sorted(expected_data, key=cmpdir) \
== sorted(actual_data, key=cmpdir)
# List directory containing a known subdirectory, entries should
# be both those of the directory and of the subdir
actual_data = list(swh_storage.directory_ls(
data.dir3['id'], recursive=True))
expected_data = list(itertools.chain(
transform_entries(data.dir3),
transform_entries(data.dir, prefix=b'subdir/')))
assert sorted(expected_data, key=cmpdir) \
== sorted(actual_data, key=cmpdir)
def test_directory_get_non_recursive(self, swh_storage):
init_missing = list(swh_storage.directory_missing([data.dir['id']]))
assert init_missing == [data.dir['id']]
actual_result = swh_storage.directory_add(
[data.dir, data.dir2, data.dir3])
assert actual_result == {'directory:add': 3}
assert list(swh_storage.journal_writer.objects) == [
('directory', data.dir),
('directory', data.dir2),
('directory', data.dir3)]
# List directory containing a file and an unknown subdirectory
actual_data = list(swh_storage.directory_ls(data.dir['id']))
expected_data = list(transform_entries(data.dir))
assert sorted(expected_data, key=cmpdir) \
== sorted(actual_data, key=cmpdir)
# List directory contaiining a single file
actual_data = list(swh_storage.directory_ls(data.dir2['id']))
expected_data = list(transform_entries(data.dir2))
assert sorted(expected_data, key=cmpdir) \
== sorted(actual_data, key=cmpdir)
# List directory containing a known subdirectory, entries should
# only be those of the parent directory, not of the subdir
actual_data = list(swh_storage.directory_ls(data.dir3['id']))
expected_data = list(transform_entries(data.dir3))
assert sorted(expected_data, key=cmpdir) \
== sorted(actual_data, key=cmpdir)
def test_directory_entry_get_by_path(self, swh_storage):
# given
init_missing = list(swh_storage.directory_missing([data.dir3['id']]))
assert [data.dir3['id']] == init_missing
actual_result = swh_storage.directory_add([data.dir3, data.dir4])
assert actual_result == {'directory:add': 2}
expected_entries = [
{
'dir_id': data.dir3['id'],
'name': b'foo',
'type': 'file',
'target': data.cont['sha1_git'],
'sha1': None,
'sha1_git': None,
'sha256': None,
'status': None,
'perms': from_disk.DentryPerms.content,
'length': None,
},
{
'dir_id': data.dir3['id'],
'name': b'subdir',
'type': 'dir',
'target': data.dir['id'],
'sha1': None,
'sha1_git': None,
'sha256': None,
'status': None,
'perms': from_disk.DentryPerms.directory,
'length': None,
},
{
'dir_id': data.dir3['id'],
'name': b'hello',
'type': 'file',
'target': b'12345678901234567890',
'sha1': None,
'sha1_git': None,
'sha256': None,
'status': None,
'perms': from_disk.DentryPerms.content,
'length': None,
},
]
# when (all must be found here)
for entry, expected_entry in zip(
data.dir3['entries'], expected_entries):
actual_entry = swh_storage.directory_entry_get_by_path(
data.dir3['id'],
[entry['name']])
assert actual_entry == expected_entry
# same, but deeper
for entry, expected_entry in zip(
data.dir3['entries'], expected_entries):
actual_entry = swh_storage.directory_entry_get_by_path(
data.dir4['id'],
[b'subdir1', entry['name']])
expected_entry = expected_entry.copy()
expected_entry['name'] = b'subdir1/' + expected_entry['name']
assert actual_entry == expected_entry
# when (nothing should be found here since data.dir is not persisted.)
for entry in data.dir['entries']:
actual_entry = swh_storage.directory_entry_get_by_path(
data.dir['id'],
[entry['name']])
assert actual_entry is None
def test_directory_get_random(self, swh_storage):
swh_storage.directory_add([data.dir, data.dir2, data.dir3])
assert swh_storage.directory_get_random() in \
{data.dir['id'], data.dir2['id'], data.dir3['id']}
def test_revision_add(self, swh_storage):
init_missing = swh_storage.revision_missing([data.revision['id']])
assert list(init_missing) == [data.revision['id']]
actual_result = swh_storage.revision_add([data.revision])
assert actual_result == {'revision:add': 1}
end_missing = swh_storage.revision_missing([data.revision['id']])
assert list(end_missing) == []
assert list(swh_storage.journal_writer.objects) \
== [('revision', data.revision)]
# already there so nothing added
actual_result = swh_storage.revision_add([data.revision])
assert actual_result == {'revision:add': 0}
swh_storage.refresh_stat_counters()
assert swh_storage.stat_counters()['revision'] == 1
def test_revision_add_from_generator(self, swh_storage):
def _rev_gen():
yield data.revision
actual_result = swh_storage.revision_add(_rev_gen())
assert actual_result == {'revision:add': 1}
swh_storage.refresh_stat_counters()
assert swh_storage.stat_counters()['revision'] == 1
def test_revision_add_validation(self, swh_storage):
rev = copy.deepcopy(data.revision)
rev['date']['offset'] = 2**16
with pytest.raises((ValueError, psycopg2.DataError),
match='offset') as cm:
swh_storage.revision_add([rev])
if type(cm.value) == psycopg2.DataError:
assert cm.value.pgcode \
== psycopg2.errorcodes.NUMERIC_VALUE_OUT_OF_RANGE
rev = copy.deepcopy(data.revision)
rev['committer_date']['offset'] = 2**16
with pytest.raises((ValueError, psycopg2.DataError),
match='offset') as cm:
swh_storage.revision_add([rev])
if type(cm.value) == psycopg2.DataError:
assert cm.value.pgcode \
== psycopg2.errorcodes.NUMERIC_VALUE_OUT_OF_RANGE
rev = copy.deepcopy(data.revision)
rev['type'] = 'foobar'
with pytest.raises((ValueError, psycopg2.DataError),
match='(?i)type') as cm:
swh_storage.revision_add([rev])
if type(cm.value) == psycopg2.DataError:
assert cm.value.pgcode == \
psycopg2.errorcodes.INVALID_TEXT_REPRESENTATION
def test_revision_add_twice(self, swh_storage):
actual_result = swh_storage.revision_add([data.revision])
assert actual_result == {'revision:add': 1}
assert list(swh_storage.journal_writer.objects) \
== [('revision', data.revision)]
actual_result = swh_storage.revision_add(
[data.revision, data.revision2])
assert actual_result == {'revision:add': 1}
assert list(swh_storage.journal_writer.objects) \
== [('revision', data.revision),
('revision', data.revision2)]
def test_revision_add_name_clash(self, swh_storage):
revision1 = data.revision
revision2 = data.revision2
revision1['author'] = {
'fullname': b'John Doe ',
'name': b'John Doe',
'email': b'john.doe@example.com'
}
revision2['author'] = {
'fullname': b'John Doe ',
'name': b'John Doe ',
'email': b'john.doe@example.com '
}
actual_result = swh_storage.revision_add([revision1, revision2])
assert actual_result == {'revision:add': 2}
def test_revision_log(self, swh_storage):
# given
# data.revision4 -is-child-of-> data.revision3
swh_storage.revision_add([data.revision3,
data.revision4])
# when
actual_results = list(swh_storage.revision_log(
[data.revision4['id']]))
# hack: ids generated
for actual_result in actual_results:
if 'id' in actual_result['author']:
del actual_result['author']['id']
if 'id' in actual_result['committer']:
del actual_result['committer']['id']
assert len(actual_results) == 2 # rev4 -child-> rev3
assert actual_results[0] == normalize_entity(data.revision4)
assert actual_results[1] == normalize_entity(data.revision3)
assert list(swh_storage.journal_writer.objects) == [
('revision', data.revision3),
('revision', data.revision4)]
def test_revision_log_with_limit(self, swh_storage):
# given
# data.revision4 -is-child-of-> data.revision3
swh_storage.revision_add([data.revision3,
data.revision4])
actual_results = list(swh_storage.revision_log(
[data.revision4['id']], 1))
# hack: ids generated
for actual_result in actual_results:
if 'id' in actual_result['author']:
del actual_result['author']['id']
if 'id' in actual_result['committer']:
del actual_result['committer']['id']
assert len(actual_results) == 1
assert actual_results[0] == data.revision4
def test_revision_log_unknown_revision(self, swh_storage):
rev_log = list(swh_storage.revision_log([data.revision['id']]))
assert rev_log == []
def test_revision_shortlog(self, swh_storage):
# given
# data.revision4 -is-child-of-> data.revision3
swh_storage.revision_add([data.revision3,
data.revision4])
# when
actual_results = list(swh_storage.revision_shortlog(
[data.revision4['id']]))
assert len(actual_results) == 2 # rev4 -child-> rev3
assert list(actual_results[0]) == short_revision(data.revision4)
assert list(actual_results[1]) == short_revision(data.revision3)
def test_revision_shortlog_with_limit(self, swh_storage):
# given
# data.revision4 -is-child-of-> data.revision3
swh_storage.revision_add([data.revision3,
data.revision4])
actual_results = list(swh_storage.revision_shortlog(
[data.revision4['id']], 1))
assert len(actual_results) == 1
assert list(actual_results[0]) == short_revision(data.revision4)
def test_revision_get(self, swh_storage):
swh_storage.revision_add([data.revision])
actual_revisions = list(swh_storage.revision_get(
[data.revision['id'], data.revision2['id']]))
# when
if 'id' in actual_revisions[0]['author']:
del actual_revisions[0]['author']['id'] # hack: ids are generated
if 'id' in actual_revisions[0]['committer']:
del actual_revisions[0]['committer']['id']
assert len(actual_revisions) == 2
assert actual_revisions[0] == normalize_entity(data.revision)
assert actual_revisions[1] is None
def test_revision_get_no_parents(self, swh_storage):
swh_storage.revision_add([data.revision3])
get = list(swh_storage.revision_get([data.revision3['id']]))
assert len(get) == 1
assert get[0]['parents'] == [] # no parents on this one
def test_revision_get_random(self, swh_storage):
swh_storage.revision_add(
[data.revision, data.revision2, data.revision3])
assert swh_storage.revision_get_random() in \
{data.revision['id'], data.revision2['id'], data.revision3['id']}
def test_release_add(self, swh_storage):
init_missing = swh_storage.release_missing([data.release['id'],
data.release2['id']])
assert [data.release['id'], data.release2['id']] == list(init_missing)
actual_result = swh_storage.release_add([data.release, data.release2])
assert actual_result == {'release:add': 2}
end_missing = swh_storage.release_missing([data.release['id'],
data.release2['id']])
assert list(end_missing) == []
assert list(swh_storage.journal_writer.objects) == [
('release', data.release),
('release', data.release2)]
# already present so nothing added
actual_result = swh_storage.release_add([data.release, data.release2])
assert actual_result == {'release:add': 0}
swh_storage.refresh_stat_counters()
assert swh_storage.stat_counters()['release'] == 2
def test_release_add_from_generator(self, swh_storage):
def _rel_gen():
yield data.release
yield data.release2
actual_result = swh_storage.release_add(_rel_gen())
assert actual_result == {'release:add': 2}
assert list(swh_storage.journal_writer.objects) == [
('release', data.release),
('release', data.release2)]
swh_storage.refresh_stat_counters()
assert swh_storage.stat_counters()['release'] == 2
def test_release_add_no_author_date(self, swh_storage):
release = data.release
release['author'] = None
release['date'] = None
actual_result = swh_storage.release_add([release])
assert actual_result == {'release:add': 1}
end_missing = swh_storage.release_missing([data.release['id']])
assert list(end_missing) == []
assert list(swh_storage.journal_writer.objects) \
== [('release', release)]
def test_release_add_validation(self, swh_storage):
rel = copy.deepcopy(data.release)
rel['date']['offset'] = 2**16
with pytest.raises((ValueError, psycopg2.DataError),
match='offset') as cm:
swh_storage.release_add([rel])
if type(cm.value) == psycopg2.DataError:
assert cm.value.pgcode \
== psycopg2.errorcodes.NUMERIC_VALUE_OUT_OF_RANGE
rel = copy.deepcopy(data.release)
rel['author'] = None
with pytest.raises((ValueError, psycopg2.IntegrityError),
match='date') as cm:
swh_storage.release_add([rel])
if type(cm.value) == psycopg2.IntegrityError:
assert cm.value.pgcode == psycopg2.errorcodes.CHECK_VIOLATION
def test_release_add_twice(self, swh_storage):
actual_result = swh_storage.release_add([data.release])
assert actual_result == {'release:add': 1}
assert list(swh_storage.journal_writer.objects) \
== [('release', data.release)]
actual_result = swh_storage.release_add([data.release, data.release2])
assert actual_result == {'release:add': 1}
assert list(swh_storage.journal_writer.objects) \
== [('release', data.release),
('release', data.release2)]
def test_release_add_name_clash(self, swh_storage):
release1 = data.release.copy()
release2 = data.release2.copy()
release1['author'] = {
'fullname': b'John Doe ',
'name': b'John Doe',
'email': b'john.doe@example.com'
}
release2['author'] = {
'fullname': b'John Doe ',
'name': b'John Doe ',
'email': b'john.doe@example.com '
}
actual_result = swh_storage.release_add([release1, release2])
assert actual_result == {'release:add': 2}
def test_release_get(self, swh_storage):
# given
swh_storage.release_add([data.release, data.release2])
# when
actual_releases = list(swh_storage.release_get([data.release['id'],
data.release2['id']]))
# then
for actual_release in actual_releases:
if 'id' in actual_release['author']:
del actual_release['author']['id'] # hack: ids are generated
assert [
normalize_entity(data.release), normalize_entity(data.release2)] \
== [actual_releases[0], actual_releases[1]]
unknown_releases = \
list(swh_storage.release_get([data.release3['id']]))
assert unknown_releases[0] is None
def test_release_get_random(self, swh_storage):
swh_storage.release_add([data.release, data.release2, data.release3])
assert swh_storage.release_get_random() in \
{data.release['id'], data.release2['id'], data.release3['id']}
def test_origin_add_one(self, swh_storage):
origin0 = swh_storage.origin_get(data.origin)
assert origin0 is None
id = swh_storage.origin_add_one(data.origin)
actual_origin = swh_storage.origin_get({'url': data.origin['url']})
assert actual_origin['url'] == data.origin['url']
id2 = swh_storage.origin_add_one(data.origin)
assert id == id2
def test_origin_add(self, swh_storage):
origin0 = swh_storage.origin_get([data.origin])[0]
assert origin0 is None
origin1, origin2 = swh_storage.origin_add([data.origin, data.origin2])
actual_origin = swh_storage.origin_get([{
'url': data.origin['url'],
}])[0]
assert actual_origin['url'] == origin1['url']
actual_origin2 = swh_storage.origin_get([{
'url': data.origin2['url'],
}])[0]
assert actual_origin2['url'] == origin2['url']
if 'id' in actual_origin:
del actual_origin['id']
del actual_origin2['id']
assert list(swh_storage.journal_writer.objects) \
== [('origin', actual_origin),
('origin', actual_origin2)]
swh_storage.refresh_stat_counters()
assert swh_storage.stat_counters()['origin'] == 2
def test_origin_add_from_generator(self, swh_storage):
def _ori_gen():
yield data.origin
yield data.origin2
origin1, origin2 = swh_storage.origin_add(_ori_gen())
actual_origin = swh_storage.origin_get([{
'url': data.origin['url'],
}])[0]
assert actual_origin['url'] == origin1['url']
actual_origin2 = swh_storage.origin_get([{
'url': data.origin2['url'],
}])[0]
assert actual_origin2['url'] == origin2['url']
if 'id' in actual_origin:
del actual_origin['id']
del actual_origin2['id']
assert list(swh_storage.journal_writer.objects) \
== [('origin', actual_origin),
('origin', actual_origin2)]
swh_storage.refresh_stat_counters()
assert swh_storage.stat_counters()['origin'] == 2
def test_origin_add_twice(self, swh_storage):
add1 = swh_storage.origin_add([data.origin, data.origin2])
assert list(swh_storage.journal_writer.objects) \
== [('origin', data.origin),
('origin', data.origin2)]
add2 = swh_storage.origin_add([data.origin, data.origin2])
assert list(swh_storage.journal_writer.objects) \
== [('origin', data.origin),
('origin', data.origin2)]
assert add1 == add2
def test_origin_add_validation(self, swh_storage):
with pytest.raises((TypeError, KeyError), match='url'):
swh_storage.origin_add([{'type': 'git'}])
def test_origin_get_legacy(self, swh_storage):
assert swh_storage.origin_get(data.origin) is None
swh_storage.origin_add_one(data.origin)
actual_origin0 = swh_storage.origin_get(
{'url': data.origin['url']})
assert actual_origin0['url'] == data.origin['url']
def test_origin_get(self, swh_storage):
assert swh_storage.origin_get(data.origin) is None
swh_storage.origin_add_one(data.origin)
actual_origin0 = swh_storage.origin_get(
[{'url': data.origin['url']}])
assert len(actual_origin0) == 1
assert actual_origin0[0]['url'] == data.origin['url']
def _generate_random_visits(self, nb_visits=100, start=0, end=7):
"""Generate random visits within the last 2 months (to avoid
computations)
"""
visits = []
today = datetime.datetime.now(tz=datetime.timezone.utc)
for weeks in range(nb_visits, 0, -1):
hours = random.randint(0, 24)
minutes = random.randint(0, 60)
seconds = random.randint(0, 60)
days = random.randint(0, 28)
weeks = random.randint(start, end)
date_visit = today - timedelta(
weeks=weeks, hours=hours, minutes=minutes,
seconds=seconds, days=days)
visits.append(date_visit)
return visits
def test_origin_visit_get_random(self, swh_storage):
swh_storage.origin_add(data.origins)
# Add some random visits within the selection range
visits = self._generate_random_visits()
visit_type = 'git'
# Add visits to those origins
for origin in data.origins:
for date_visit in visits:
visit = swh_storage.origin_visit_add(
origin['url'], date=date_visit, type=visit_type)
swh_storage.origin_visit_update(
origin['url'], visit_id=visit['visit'], status='full')
swh_storage.refresh_stat_counters()
stats = swh_storage.stat_counters()
assert stats['origin'] == len(data.origins)
assert stats['origin_visit'] == len(data.origins) * len(visits)
random_origin_visit = swh_storage.origin_visit_get_random(visit_type)
assert random_origin_visit
assert random_origin_visit['origin'] is not None
original_urls = [o['url'] for o in data.origins]
assert random_origin_visit['origin'] in original_urls
def test_origin_visit_get_random_nothing_found(self, swh_storage):
swh_storage.origin_add(data.origins)
visit_type = 'hg'
# Add some visits outside of the random generation selection so nothing
# will be found by the random selection
visits = self._generate_random_visits(nb_visits=3, start=13, end=24)
for origin in data.origins:
for date_visit in visits:
visit = swh_storage.origin_visit_add(
origin['url'], date=date_visit, type=visit_type)
swh_storage.origin_visit_update(
origin['url'], visit_id=visit['visit'], status='full')
random_origin_visit = swh_storage.origin_visit_get_random(visit_type)
assert random_origin_visit is None
def test_origin_get_by_sha1(self, swh_storage):
assert swh_storage.origin_get(data.origin) is None
swh_storage.origin_add_one(data.origin)
origins = list(swh_storage.origin_get_by_sha1([
sha1(data.origin['url'])
]))
assert len(origins) == 1
assert origins[0]['url'] == data.origin['url']
def test_origin_get_by_sha1_not_found(self, swh_storage):
assert swh_storage.origin_get(data.origin) is None
origins = list(swh_storage.origin_get_by_sha1([
sha1(data.origin['url'])
]))
assert len(origins) == 1
assert origins[0] is None
def test_origin_search_single_result(self, swh_storage):
found_origins = list(swh_storage.origin_search(data.origin['url']))
assert len(found_origins) == 0
found_origins = list(swh_storage.origin_search(data.origin['url'],
regexp=True))
assert len(found_origins) == 0
swh_storage.origin_add_one(data.origin)
origin_data = {
'url': data.origin['url']}
found_origins = list(swh_storage.origin_search(data.origin['url']))
assert len(found_origins) == 1
if 'id' in found_origins[0]:
del found_origins[0]['id']
assert found_origins[0] == origin_data
found_origins = list(swh_storage.origin_search(
'.' + data.origin['url'][1:-1] + '.', regexp=True))
assert len(found_origins) == 1
if 'id' in found_origins[0]:
del found_origins[0]['id']
assert found_origins[0] == origin_data
swh_storage.origin_add_one(data.origin2)
origin2_data = {'url': data.origin2['url']}
found_origins = list(swh_storage.origin_search(data.origin2['url']))
assert len(found_origins) == 1
if 'id' in found_origins[0]:
del found_origins[0]['id']
assert found_origins[0] == origin2_data
found_origins = list(swh_storage.origin_search(
'.' + data.origin2['url'][1:-1] + '.', regexp=True))
assert len(found_origins) == 1
if 'id' in found_origins[0]:
del found_origins[0]['id']
assert found_origins[0] == origin2_data
def test_origin_search_no_regexp(self, swh_storage):
swh_storage.origin_add_one(data.origin)
swh_storage.origin_add_one(data.origin2)
origin = swh_storage.origin_get({'url': data.origin['url']})
origin2 = swh_storage.origin_get({'url': data.origin2['url']})
# no pagination
found_origins = list(swh_storage.origin_search('/'))
assert len(found_origins) == 2
# offset=0
found_origins0 = list(swh_storage.origin_search('/', offset=0, limit=1)) # noqa
assert len(found_origins0) == 1
assert found_origins0[0] in [origin, origin2]
# offset=1
found_origins1 = list(swh_storage.origin_search('/', offset=1, limit=1)) # noqa
assert len(found_origins1) == 1
assert found_origins1[0] in [origin, origin2]
# check both origins were returned
assert found_origins0 != found_origins1
def test_origin_search_regexp_substring(self, swh_storage):
swh_storage.origin_add_one(data.origin)
swh_storage.origin_add_one(data.origin2)
origin = swh_storage.origin_get({'url': data.origin['url']})
origin2 = swh_storage.origin_get({'url': data.origin2['url']})
# no pagination
found_origins = list(swh_storage.origin_search('/', regexp=True))
assert len(found_origins) == 2
# offset=0
found_origins0 = list(swh_storage.origin_search('/', offset=0, limit=1, regexp=True)) # noqa
assert len(found_origins0) == 1
assert found_origins0[0] in [origin, origin2]
# offset=1
found_origins1 = list(swh_storage.origin_search('/', offset=1, limit=1, regexp=True)) # noqa
assert len(found_origins1) == 1
assert found_origins1[0] in [origin, origin2]
# check both origins were returned
assert found_origins0 != found_origins1
def test_origin_search_regexp_fullstring(self, swh_storage):
swh_storage.origin_add_one(data.origin)
swh_storage.origin_add_one(data.origin2)
origin = swh_storage.origin_get({'url': data.origin['url']})
origin2 = swh_storage.origin_get({'url': data.origin2['url']})
# no pagination
found_origins = list(swh_storage.origin_search('.*/.*', regexp=True))
assert len(found_origins) == 2
# offset=0
found_origins0 = list(swh_storage.origin_search('.*/.*', offset=0, limit=1, regexp=True)) # noqa
assert len(found_origins0) == 1
assert found_origins0[0] in [origin, origin2]
# offset=1
found_origins1 = list(swh_storage.origin_search('.*/.*', offset=1, limit=1, regexp=True)) # noqa
assert len(found_origins1) == 1
assert found_origins1[0] in [origin, origin2]
# check both origins were returned
assert found_origins0 != found_origins1
def test_origin_visit_add(self, swh_storage):
# given
swh_storage.origin_add_one(data.origin2)
origin_url = data.origin2['url']
# when
date_visit = datetime.datetime.now(datetime.timezone.utc)
origin_visit1 = swh_storage.origin_visit_add(
origin_url,
type=data.type_visit1,
date=date_visit)
actual_origin_visits = list(swh_storage.origin_visit_get(
origin_url))
assert {
'origin': origin_url,
'date': date_visit,
'visit': origin_visit1['visit'],
'type': data.type_visit1,
'status': 'ongoing',
'metadata': None,
'snapshot': None,
} in actual_origin_visits
origin_visit = {
'origin': origin_url,
'date': date_visit,
'visit': origin_visit1['visit'],
'type': data.type_visit1,
'status': 'ongoing',
'metadata': None,
'snapshot': None,
}
objects = list(swh_storage.journal_writer.objects)
assert ('origin', data.origin2) in objects
assert ('origin_visit', origin_visit) in objects
def test_origin_visit_get__unknown_origin(self, swh_storage):
assert [] == list(swh_storage.origin_visit_get('foo'))
def test_origin_visit_add_default_type(self, swh_storage):
# given
swh_storage.origin_add_one(data.origin2)
origin_url = data.origin2['url']
# when
date_visit = datetime.datetime.now(datetime.timezone.utc)
date_visit2 = date_visit + datetime.timedelta(minutes=1)
origin_visit1 = swh_storage.origin_visit_add(
origin_url,
date=date_visit,
type=data.type_visit1,
)
origin_visit2 = swh_storage.origin_visit_add(
origin_url,
date=date_visit2,
type=data.type_visit2,
)
# then
assert origin_visit1['origin'] == origin_url
assert origin_visit1['visit'] is not None
actual_origin_visits = list(swh_storage.origin_visit_get(
origin_url))
expected_visits = [
{
'origin': origin_url,
'date': date_visit,
'visit': origin_visit1['visit'],
'type': data.type_visit1,
'status': 'ongoing',
'metadata': None,
'snapshot': None,
},
{
'origin': origin_url,
'date': date_visit2,
'visit': origin_visit2['visit'],
'type': data.type_visit2,
'status': 'ongoing',
'metadata': None,
'snapshot': None,
},
]
for visit in expected_visits:
assert visit in actual_origin_visits
objects = list(swh_storage.journal_writer.objects)
assert ('origin', data.origin2) in objects
for visit in expected_visits:
assert ('origin_visit', visit) in objects
def test_origin_visit_add_validation(self, swh_storage):
origin_url = swh_storage.origin_add_one(data.origin2)
with pytest.raises((TypeError, psycopg2.ProgrammingError)) as cm:
swh_storage.origin_visit_add(origin_url, date=[b'foo'])
if type(cm.value) == psycopg2.ProgrammingError:
assert cm.value.pgcode \
== psycopg2.errorcodes.UNDEFINED_FUNCTION
def test_origin_visit_update(self, swh_storage):
# given
swh_storage.origin_add_one(data.origin)
origin_url = data.origin['url']
date_visit = datetime.datetime.now(datetime.timezone.utc)
origin_visit1 = swh_storage.origin_visit_add(
origin_url,
date=date_visit,
type=data.type_visit1,
)
date_visit2 = date_visit + datetime.timedelta(minutes=1)
origin_visit2 = swh_storage.origin_visit_add(
origin_url,
date=date_visit2,
type=data.type_visit2
)
swh_storage.origin_add_one(data.origin2)
origin_url2 = data.origin2['url']
origin_visit3 = swh_storage.origin_visit_add(
origin_url2,
date=date_visit2,
type=data.type_visit3
)
# when
visit1_metadata = {
'contents': 42,
'directories': 22,
}
swh_storage.origin_visit_update(
origin_url,
origin_visit1['visit'], status='full',
metadata=visit1_metadata)
swh_storage.origin_visit_update(
origin_url2,
origin_visit3['visit'], status='partial')
# then
actual_origin_visits = list(swh_storage.origin_visit_get(
origin_url))
expected_visits = [{
'origin': origin_url,
'date': date_visit,
'visit': origin_visit1['visit'],
'type': data.type_visit1,
'status': 'full',
'metadata': visit1_metadata,
'snapshot': None,
}, {
'origin': origin_url,
'date': date_visit2,
'visit': origin_visit2['visit'],
'type': data.type_visit2,
'status': 'ongoing',
'metadata': None,
'snapshot': None,
}]
for visit in expected_visits:
assert visit in actual_origin_visits
actual_origin_visits_bis = list(swh_storage.origin_visit_get(
origin_url,
limit=1))
assert actual_origin_visits_bis == [
{
'origin': origin_url,
'date': date_visit,
'visit': origin_visit1['visit'],
'type': data.type_visit1,
'status': 'full',
'metadata': visit1_metadata,
'snapshot': None,
}]
actual_origin_visits_ter = list(swh_storage.origin_visit_get(
origin_url,
last_visit=origin_visit1['visit']))
assert actual_origin_visits_ter == [
{
'origin': origin_url,
'date': date_visit2,
'visit': origin_visit2['visit'],
'type': data.type_visit2,
'status': 'ongoing',
'metadata': None,
'snapshot': None,
}]
actual_origin_visits2 = list(swh_storage.origin_visit_get(
origin_url2))
assert actual_origin_visits2 == [
{
'origin': origin_url2,
'date': date_visit2,
'visit': origin_visit3['visit'],
'type': data.type_visit3,
'status': 'partial',
'metadata': None,
'snapshot': None,
}]
data1 = {
'origin': origin_url,
'date': date_visit,
'visit': origin_visit1['visit'],
'type': data.type_visit1,
'status': 'ongoing',
'metadata': None,
'snapshot': None,
}
data2 = {
'origin': origin_url,
'date': date_visit2,
'visit': origin_visit2['visit'],
'type': data.type_visit2,
'status': 'ongoing',
'metadata': None,
'snapshot': None,
}
data3 = {
'origin': origin_url2,
'date': date_visit2,
'visit': origin_visit3['visit'],
'type': data.type_visit3,
'status': 'ongoing',
'metadata': None,
'snapshot': None,
}
data4 = {
'origin': origin_url,
'date': date_visit,
'visit': origin_visit1['visit'],
'type': data.type_visit1,
'metadata': visit1_metadata,
'status': 'full',
'snapshot': None,
}
data5 = {
'origin': origin_url2,
'date': date_visit2,
'visit': origin_visit3['visit'],
'type': data.type_visit3,
'status': 'partial',
'metadata': None,
'snapshot': None,
}
objects = list(swh_storage.journal_writer.objects)
assert ('origin', data.origin) in objects
assert ('origin', data.origin2) in objects
assert ('origin_visit', data1) in objects
assert ('origin_visit', data2) in objects
assert ('origin_visit', data3) in objects
assert ('origin_visit', data4) in objects
assert ('origin_visit', data5) in objects
def test_origin_visit_update_validation(self, swh_storage):
origin_url = data.origin['url']
swh_storage.origin_add_one(data.origin)
visit = swh_storage.origin_visit_add(
origin_url,
date=data.date_visit2,
type=data.type_visit2,
)
with pytest.raises((ValueError, psycopg2.DataError),
match='status') as cm:
swh_storage.origin_visit_update(
origin_url, visit['visit'], status='foobar')
if type(cm.value) == psycopg2.DataError:
assert cm.value.pgcode == \
psycopg2.errorcodes.INVALID_TEXT_REPRESENTATION
def test_origin_visit_find_by_date(self, swh_storage):
# given
swh_storage.origin_add_one(data.origin)
swh_storage.origin_visit_add(
data.origin['url'],
date=data.date_visit2,
type=data.type_visit1,
)
origin_visit2 = swh_storage.origin_visit_add(
data.origin['url'],
date=data.date_visit3,
type=data.type_visit2,
)
origin_visit3 = swh_storage.origin_visit_add(
data.origin['url'],
date=data.date_visit2,
type=data.type_visit3,
)
# Simple case
visit = swh_storage.origin_visit_find_by_date(
data.origin['url'], data.date_visit3)
assert visit['visit'] == origin_visit2['visit']
# There are two visits at the same date, the latest must be returned
visit = swh_storage.origin_visit_find_by_date(
data.origin['url'], data.date_visit2)
assert visit['visit'] == origin_visit3['visit']
def test_origin_visit_find_by_date__unknown_origin(self, swh_storage):
swh_storage.origin_visit_find_by_date('foo', data.date_visit2)
def test_origin_visit_update_missing_snapshot(self, swh_storage):
# given
swh_storage.origin_add_one(data.origin)
origin_url = data.origin['url']
origin_visit = swh_storage.origin_visit_add(
origin_url,
date=data.date_visit1,
type=data.type_visit1,
)
# when
swh_storage.origin_visit_update(
origin_url,
origin_visit['visit'],
snapshot=data.snapshot['id'])
# then
actual_origin_visit = swh_storage.origin_visit_get_by(
origin_url,
origin_visit['visit'])
assert actual_origin_visit['snapshot'] == data.snapshot['id']
# when
swh_storage.snapshot_add([data.snapshot])
assert actual_origin_visit['snapshot'] == data.snapshot['id']
def test_origin_visit_get_by(self, swh_storage):
swh_storage.origin_add_one(data.origin)
swh_storage.origin_add_one(data.origin2)
origin_url = data.origin['url']
origin2_url = data.origin2['url']
origin_visit1 = swh_storage.origin_visit_add(
origin_url,
date=data.date_visit2,
type=data.type_visit2,
)
swh_storage.snapshot_add([data.snapshot])
swh_storage.origin_visit_update(
origin_url,
origin_visit1['visit'],
snapshot=data.snapshot['id'])
# Add some other {origin, visit} entries
swh_storage.origin_visit_add(
origin_url,
date=data.date_visit3,
type=data.type_visit3,
)
swh_storage.origin_visit_add(
origin2_url,
date=data.date_visit3,
type=data.type_visit3,
)
# when
visit1_metadata = {
'contents': 42,
'directories': 22,
}
swh_storage.origin_visit_update(
origin_url,
origin_visit1['visit'], status='full',
metadata=visit1_metadata)
expected_origin_visit = origin_visit1.copy()
expected_origin_visit.update({
'origin': origin_url,
'visit': origin_visit1['visit'],
'date': data.date_visit2,
'type': data.type_visit2,
'metadata': visit1_metadata,
'status': 'full',
'snapshot': data.snapshot['id'],
})
# when
actual_origin_visit1 = swh_storage.origin_visit_get_by(
origin_url,
origin_visit1['visit'])
# then
assert actual_origin_visit1 == expected_origin_visit
def test_origin_visit_get_by__unknown_origin(self, swh_storage):
assert swh_storage.origin_visit_get_by('foo', 10) is None
def test_origin_visit_upsert_new(self, swh_storage):
# given
swh_storage.origin_add_one(data.origin2)
origin_url = data.origin2['url']
# when
swh_storage.origin_visit_upsert([
{
'origin': origin_url,
'date': data.date_visit2,
'visit': 123,
'type': data.type_visit2,
'status': 'full',
'metadata': None,
'snapshot': None,
},
{
'origin': origin_url,
'date': '2018-01-01 23:00:00+00',
'visit': 1234,
'type': data.type_visit2,
'status': 'full',
'metadata': None,
'snapshot': None,
},
])
# then
actual_origin_visits = list(swh_storage.origin_visit_get(
origin_url))
assert actual_origin_visits == [
{
'origin': origin_url,
'date': data.date_visit2,
'visit': 123,
'type': data.type_visit2,
'status': 'full',
'metadata': None,
'snapshot': None,
},
{
'origin': origin_url,
'date': data.date_visit3,
'visit': 1234,
'type': data.type_visit2,
'status': 'full',
'metadata': None,
'snapshot': None,
},
]
data1 = {
'origin': origin_url,
'date': data.date_visit2,
'visit': 123,
'type': data.type_visit2,
'status': 'full',
'metadata': None,
'snapshot': None,
}
data2 = {
'origin': origin_url,
'date': data.date_visit3,
'visit': 1234,
'type': data.type_visit2,
'status': 'full',
'metadata': None,
'snapshot': None,
}
assert list(swh_storage.journal_writer.objects) == [
('origin', data.origin2),
('origin_visit', data1),
('origin_visit', data2)]
def test_origin_visit_upsert_existing(self, swh_storage):
# given
swh_storage.origin_add_one(data.origin2)
origin_url = data.origin2['url']
# when
origin_visit1 = swh_storage.origin_visit_add(
origin_url,
date=data.date_visit2,
type=data.type_visit1,
)
swh_storage.origin_visit_upsert([{
'origin': origin_url,
'date': data.date_visit2,
'visit': origin_visit1['visit'],
'type': data.type_visit1,
'status': 'full',
'metadata': None,
'snapshot': None,
}])
# then
assert origin_visit1['origin'] == origin_url
assert origin_visit1['visit'] is not None
actual_origin_visits = list(swh_storage.origin_visit_get(
origin_url))
assert actual_origin_visits == [
{
'origin': origin_url,
'date': data.date_visit2,
'visit': origin_visit1['visit'],
'type': data.type_visit1,
'status': 'full',
'metadata': None,
'snapshot': None,
}]
data1 = {
'origin': origin_url,
'date': data.date_visit2,
'visit': origin_visit1['visit'],
'type': data.type_visit1,
'status': 'ongoing',
'metadata': None,
'snapshot': None,
}
data2 = {
'origin': origin_url,
'date': data.date_visit2,
'visit': origin_visit1['visit'],
'type': data.type_visit1,
'status': 'full',
'metadata': None,
'snapshot': None,
}
assert list(swh_storage.journal_writer.objects) == [
('origin', data.origin2),
('origin_visit', data1),
('origin_visit', data2)]
def test_origin_visit_get_by_no_result(self, swh_storage):
swh_storage.origin_add([data.origin])
actual_origin_visit = swh_storage.origin_visit_get_by(
data.origin['url'], 999)
assert actual_origin_visit is None
def test_origin_visit_get_latest(self, swh_storage):
swh_storage.origin_add_one(data.origin)
origin_url = data.origin['url']
origin_visit1 = swh_storage.origin_visit_add(
origin=origin_url,
date=data.date_visit1,
type=data.type_visit1,
)
visit1_id = origin_visit1['visit']
origin_visit2 = swh_storage.origin_visit_add(
origin=origin_url,
date=data.date_visit2,
type=data.type_visit2,
)
visit2_id = origin_visit2['visit']
# Add a visit with the same date as the previous one
origin_visit3 = swh_storage.origin_visit_add(
origin=origin_url,
date=data.date_visit2,
type=data.type_visit2,
)
visit3_id = origin_visit3['visit']
origin_visit1 = swh_storage.origin_visit_get_by(origin_url, visit1_id)
origin_visit2 = swh_storage.origin_visit_get_by(origin_url, visit2_id)
origin_visit3 = swh_storage.origin_visit_get_by(origin_url, visit3_id)
# Two visits, both with no snapshot
assert origin_visit3 == swh_storage.origin_visit_get_latest(origin_url)
assert swh_storage.origin_visit_get_latest(
origin_url, require_snapshot=True) is None
# Add snapshot to visit1; require_snapshot=True makes it return
# visit1 and require_snapshot=False still returns visit2
swh_storage.snapshot_add([data.complete_snapshot])
swh_storage.origin_visit_update(
origin_url, visit1_id,
snapshot=data.complete_snapshot['id'])
assert {**origin_visit1, 'snapshot': data.complete_snapshot['id']} \
== swh_storage.origin_visit_get_latest(
origin_url, require_snapshot=True)
assert origin_visit3 == swh_storage.origin_visit_get_latest(origin_url)
# Status filter: all three visits are status=ongoing, so no visit
# returned
assert swh_storage.origin_visit_get_latest(
origin_url, allowed_statuses=['full']) is None
# Mark the first visit as completed and check status filter again
swh_storage.origin_visit_update(
origin_url,
visit1_id, status='full')
assert {
**origin_visit1,
'snapshot': data.complete_snapshot['id'],
'status': 'full'} == swh_storage.origin_visit_get_latest(
origin_url, allowed_statuses=['full'])
assert origin_visit3 == swh_storage.origin_visit_get_latest(origin_url)
# Add snapshot to visit2 and check that the new snapshot is returned
swh_storage.snapshot_add([data.empty_snapshot])
swh_storage.origin_visit_update(
origin_url, visit2_id,
snapshot=data.empty_snapshot['id'])
assert {**origin_visit2, 'snapshot': data.empty_snapshot['id']} == \
swh_storage.origin_visit_get_latest(
origin_url, require_snapshot=True)
assert origin_visit3 == swh_storage.origin_visit_get_latest(origin_url)
# Check that the status filter is still working
assert {
**origin_visit1,
'snapshot': data.complete_snapshot['id'],
'status': 'full'} == swh_storage.origin_visit_get_latest(
origin_url, allowed_statuses=['full'])
# Add snapshot to visit3 (same date as visit2)
swh_storage.snapshot_add([data.complete_snapshot])
swh_storage.origin_visit_update(
origin_url, visit3_id, snapshot=data.complete_snapshot['id'])
assert {
**origin_visit1,
'snapshot': data.complete_snapshot['id'],
'status': 'full'} == swh_storage.origin_visit_get_latest(
origin_url, allowed_statuses=['full'])
assert {
**origin_visit1,
'snapshot': data.complete_snapshot['id'],
'status': 'full'} == swh_storage.origin_visit_get_latest(
origin_url, allowed_statuses=['full'], require_snapshot=True)
assert {
**origin_visit3,
'snapshot': data.complete_snapshot['id']
} == swh_storage.origin_visit_get_latest(origin_url)
assert {
**origin_visit3,
'snapshot': data.complete_snapshot['id']
} == swh_storage.origin_visit_get_latest(
origin_url, require_snapshot=True)
def test_person_fullname_unicity(self, swh_storage):
# given (person injection through revisions for example)
revision = data.revision
# create a revision with same committer fullname but wo name and email
revision2 = copy.deepcopy(data.revision2)
revision2['committer'] = dict(revision['committer'])
revision2['committer']['email'] = None
revision2['committer']['name'] = None
swh_storage.revision_add([revision])
swh_storage.revision_add([revision2])
# when getting added revisions
revisions = list(
swh_storage.revision_get([revision['id'], revision2['id']]))
# then
# check committers are the same
assert revisions[0]['committer'] == revisions[1]['committer']
def test_snapshot_add_get_empty(self, swh_storage):
origin_url = data.origin['url']
swh_storage.origin_add_one(data.origin)
origin_visit1 = swh_storage.origin_visit_add(
origin=origin_url,
date=data.date_visit1,
type=data.type_visit1,
)
visit_id = origin_visit1['visit']
actual_result = swh_storage.snapshot_add([data.empty_snapshot])
assert actual_result == {'snapshot:add': 1}
swh_storage.origin_visit_update(
origin_url, visit_id, snapshot=data.empty_snapshot['id'])
by_id = swh_storage.snapshot_get(data.empty_snapshot['id'])
assert by_id == {**data.empty_snapshot, 'next_branch': None}
by_ov = swh_storage.snapshot_get_by_origin_visit(origin_url, visit_id)
assert by_ov == {**data.empty_snapshot, 'next_branch': None}
data1 = {
'origin': origin_url,
'date': data.date_visit1,
'visit': origin_visit1['visit'],
'type': data.type_visit1,
'status': 'ongoing',
'metadata': None,
'snapshot': None,
}
data2 = {
'origin': origin_url,
'date': data.date_visit1,
'visit': origin_visit1['visit'],
'type': data.type_visit1,
'status': 'ongoing',
'metadata': None,
'snapshot': data.empty_snapshot['id'],
}
assert list(swh_storage.journal_writer.objects) == \
[('origin', data.origin),
('origin_visit', data1),
('snapshot', data.empty_snapshot),
('origin_visit', data2)]
def test_snapshot_add_get_complete(self, swh_storage):
origin_url = data.origin['url']
swh_storage.origin_add_one(data.origin)
origin_visit1 = swh_storage.origin_visit_add(
origin=origin_url,
date=data.date_visit1,
type=data.type_visit1,
)
visit_id = origin_visit1['visit']
actual_result = swh_storage.snapshot_add([data.complete_snapshot])
swh_storage.origin_visit_update(
origin_url, visit_id, snapshot=data.complete_snapshot['id'])
assert actual_result == {'snapshot:add': 1}
by_id = swh_storage.snapshot_get(data.complete_snapshot['id'])
assert by_id == {**data.complete_snapshot, 'next_branch': None}
by_ov = swh_storage.snapshot_get_by_origin_visit(origin_url, visit_id)
assert by_ov == {**data.complete_snapshot, 'next_branch': None}
def test_snapshot_add_many(self, swh_storage):
actual_result = swh_storage.snapshot_add(
[data.snapshot, data.complete_snapshot])
assert actual_result == {'snapshot:add': 2}
assert {**data.complete_snapshot, 'next_branch': None} \
== swh_storage.snapshot_get(data.complete_snapshot['id'])
assert {**data.snapshot, 'next_branch': None} \
== swh_storage.snapshot_get(data.snapshot['id'])
swh_storage.refresh_stat_counters()
assert swh_storage.stat_counters()['snapshot'] == 2
def test_snapshot_add_many_from_generator(self, swh_storage):
def _snp_gen():
yield data.snapshot
yield data.complete_snapshot
actual_result = swh_storage.snapshot_add(_snp_gen())
assert actual_result == {'snapshot:add': 2}
swh_storage.refresh_stat_counters()
assert swh_storage.stat_counters()['snapshot'] == 2
def test_snapshot_add_many_incremental(self, swh_storage):
actual_result = swh_storage.snapshot_add([data.complete_snapshot])
assert actual_result == {'snapshot:add': 1}
actual_result2 = swh_storage.snapshot_add(
[data.snapshot, data.complete_snapshot])
assert actual_result2 == {'snapshot:add': 1}
assert {**data.complete_snapshot, 'next_branch': None} \
== swh_storage.snapshot_get(data.complete_snapshot['id'])
assert {**data.snapshot, 'next_branch': None} \
== swh_storage.snapshot_get(data.snapshot['id'])
def test_snapshot_add_twice(self, swh_storage):
actual_result = swh_storage.snapshot_add([data.empty_snapshot])
assert actual_result == {'snapshot:add': 1}
assert list(swh_storage.journal_writer.objects) \
== [('snapshot', data.empty_snapshot)]
actual_result = swh_storage.snapshot_add([data.snapshot])
assert actual_result == {'snapshot:add': 1}
assert list(swh_storage.journal_writer.objects) \
== [('snapshot', data.empty_snapshot),
('snapshot', data.snapshot)]
def test_snapshot_add_validation(self, swh_storage):
snap = copy.deepcopy(data.snapshot)
snap['branches'][b'foo'] = {'target_type': 'revision'}
with pytest.raises(KeyError, match='target'):
swh_storage.snapshot_add([snap])
snap = copy.deepcopy(data.snapshot)
snap['branches'][b'foo'] = {'target': b'\x42'*20}
with pytest.raises(KeyError, match='target_type'):
swh_storage.snapshot_add([snap])
def test_snapshot_add_count_branches(self, swh_storage):
actual_result = swh_storage.snapshot_add([data.complete_snapshot])
assert actual_result == {'snapshot:add': 1}
snp_id = data.complete_snapshot['id']
snp_size = swh_storage.snapshot_count_branches(snp_id)
expected_snp_size = {
'alias': 1,
'content': 1,
'directory': 2,
'release': 1,
'revision': 1,
'snapshot': 1,
None: 1
}
assert snp_size == expected_snp_size
def test_snapshot_add_get_paginated(self, swh_storage):
swh_storage.snapshot_add([data.complete_snapshot])
snp_id = data.complete_snapshot['id']
branches = data.complete_snapshot['branches']
branch_names = list(sorted(branches))
# Test branch_from
snapshot = swh_storage.snapshot_get_branches(
snp_id, branches_from=b'release')
rel_idx = branch_names.index(b'release')
expected_snapshot = {
'id': snp_id,
'branches': {
name: branches[name]
for name in branch_names[rel_idx:]
},
'next_branch': None,
}
assert snapshot == expected_snapshot
# Test branches_count
snapshot = swh_storage.snapshot_get_branches(
snp_id, branches_count=1)
expected_snapshot = {
'id': snp_id,
'branches': {
branch_names[0]: branches[branch_names[0]],
},
'next_branch': b'content',
}
assert snapshot == expected_snapshot
# test branch_from + branches_count
snapshot = swh_storage.snapshot_get_branches(
snp_id, branches_from=b'directory', branches_count=3)
dir_idx = branch_names.index(b'directory')
expected_snapshot = {
'id': snp_id,
'branches': {
name: branches[name]
for name in branch_names[dir_idx:dir_idx + 3]
},
'next_branch': branch_names[dir_idx + 3],
}
assert snapshot == expected_snapshot
def test_snapshot_add_get_filtered(self, swh_storage):
origin_url = data.origin['url']
swh_storage.origin_add_one(data.origin)
origin_visit1 = swh_storage.origin_visit_add(
origin=origin_url,
date=data.date_visit1,
type=data.type_visit1,
)
visit_id = origin_visit1['visit']
swh_storage.snapshot_add([data.complete_snapshot])
swh_storage.origin_visit_update(
origin_url, visit_id, snapshot=data.complete_snapshot['id'])
snp_id = data.complete_snapshot['id']
branches = data.complete_snapshot['branches']
snapshot = swh_storage.snapshot_get_branches(
snp_id, target_types=['release', 'revision'])
expected_snapshot = {
'id': snp_id,
'branches': {
name: tgt
for name, tgt in branches.items()
if tgt and tgt['target_type'] in ['release', 'revision']
},
'next_branch': None,
}
assert snapshot == expected_snapshot
snapshot = swh_storage.snapshot_get_branches(
snp_id, target_types=['alias'])
expected_snapshot = {
'id': snp_id,
'branches': {
name: tgt
for name, tgt in branches.items()
if tgt and tgt['target_type'] == 'alias'
},
'next_branch': None,
}
assert snapshot == expected_snapshot
def test_snapshot_add_get_filtered_and_paginated(self, swh_storage):
swh_storage.snapshot_add([data.complete_snapshot])
snp_id = data.complete_snapshot['id']
branches = data.complete_snapshot['branches']
branch_names = list(sorted(branches))
# Test branch_from
snapshot = swh_storage.snapshot_get_branches(
snp_id, target_types=['directory', 'release'],
branches_from=b'directory2')
expected_snapshot = {
'id': snp_id,
'branches': {
name: branches[name]
for name in (b'directory2', b'release')
},
'next_branch': None,
}
assert snapshot == expected_snapshot
# Test branches_count
snapshot = swh_storage.snapshot_get_branches(
snp_id, target_types=['directory', 'release'],
branches_count=1)
expected_snapshot = {
'id': snp_id,
'branches': {
b'directory': branches[b'directory']
},
'next_branch': b'directory2',
}
assert snapshot == expected_snapshot
# Test branches_count
snapshot = swh_storage.snapshot_get_branches(
snp_id, target_types=['directory', 'release'],
branches_count=2)
expected_snapshot = {
'id': snp_id,
'branches': {
name: branches[name]
for name in (b'directory', b'directory2')
},
'next_branch': b'release',
}
assert snapshot == expected_snapshot
# test branch_from + branches_count
snapshot = swh_storage.snapshot_get_branches(
snp_id, target_types=['directory', 'release'],
branches_from=b'directory2', branches_count=1)
dir_idx = branch_names.index(b'directory2')
expected_snapshot = {
'id': snp_id,
'branches': {
branch_names[dir_idx]: branches[branch_names[dir_idx]],
},
'next_branch': b'release',
}
assert snapshot == expected_snapshot
def test_snapshot_add_get(self, swh_storage):
origin_url = data.origin['url']
swh_storage.origin_add_one(data.origin)
origin_visit1 = swh_storage.origin_visit_add(
origin=origin_url,
date=data.date_visit1,
type=data.type_visit1,
)
visit_id = origin_visit1['visit']
swh_storage.snapshot_add([data.snapshot])
swh_storage.origin_visit_update(
origin_url, visit_id, snapshot=data.snapshot['id'])
by_id = swh_storage.snapshot_get(data.snapshot['id'])
assert by_id == {**data.snapshot, 'next_branch': None}
by_ov = swh_storage.snapshot_get_by_origin_visit(origin_url, visit_id)
assert by_ov == {**data.snapshot, 'next_branch': None}
origin_visit_info = swh_storage.origin_visit_get_by(
origin_url, visit_id)
assert origin_visit_info['snapshot'] == data.snapshot['id']
def test_snapshot_add_nonexistent_visit(self, swh_storage):
origin_url = data.origin['url']
swh_storage.origin_add_one(data.origin)
visit_id = 54164461156
swh_storage.journal_writer.objects[:] = []
swh_storage.snapshot_add([data.snapshot])
with pytest.raises(ValueError):
swh_storage.origin_visit_update(
origin_url, visit_id, snapshot=data.snapshot['id'])
assert list(swh_storage.journal_writer.objects) == [
('snapshot', data.snapshot)]
def test_snapshot_add_twice__by_origin_visit(self, swh_storage):
origin_url = data.origin['url']
swh_storage.origin_add_one(data.origin)
origin_visit1 = swh_storage.origin_visit_add(
origin=origin_url,
date=data.date_visit1,
type=data.type_visit1,
)
visit1_id = origin_visit1['visit']
swh_storage.snapshot_add([data.snapshot])
swh_storage.origin_visit_update(
origin_url, visit1_id, snapshot=data.snapshot['id'])
by_ov1 = swh_storage.snapshot_get_by_origin_visit(
origin_url, visit1_id)
assert by_ov1 == {**data.snapshot, 'next_branch': None}
origin_visit2 = swh_storage.origin_visit_add(
origin=origin_url,
date=data.date_visit2,
type=data.type_visit2,
)
visit2_id = origin_visit2['visit']
swh_storage.snapshot_add([data.snapshot])
swh_storage.origin_visit_update(
origin_url, visit2_id, snapshot=data.snapshot['id'])
by_ov2 = swh_storage.snapshot_get_by_origin_visit(
origin_url, visit2_id)
assert by_ov2 == {**data.snapshot, 'next_branch': None}
data1 = {
'origin': origin_url,
'date': data.date_visit1,
'visit': origin_visit1['visit'],
'type': data.type_visit1,
'status': 'ongoing',
'metadata': None,
'snapshot': None,
}
data2 = {
'origin': origin_url,
'date': data.date_visit1,
'visit': origin_visit1['visit'],
'type': data.type_visit1,
'status': 'ongoing',
'metadata': None,
'snapshot': data.snapshot['id'],
}
data3 = {
'origin': origin_url,
'date': data.date_visit2,
'visit': origin_visit2['visit'],
'type': data.type_visit2,
'status': 'ongoing',
'metadata': None,
'snapshot': None,
}
data4 = {
'origin': origin_url,
'date': data.date_visit2,
'visit': origin_visit2['visit'],
'type': data.type_visit2,
'status': 'ongoing',
'metadata': None,
'snapshot': data.snapshot['id'],
}
assert list(swh_storage.journal_writer.objects) \
== [('origin', data.origin),
('origin_visit', data1),
('snapshot', data.snapshot),
('origin_visit', data2),
('origin_visit', data3),
('origin_visit', data4)]
def test_snapshot_get_latest(self, swh_storage):
origin_url = data.origin['url']
swh_storage.origin_add_one(data.origin)
origin_url = data.origin['url']
origin_visit1 = swh_storage.origin_visit_add(
origin=origin_url,
date=data.date_visit1,
type=data.type_visit1,
)
visit1_id = origin_visit1['visit']
origin_visit2 = swh_storage.origin_visit_add(
origin=origin_url,
date=data.date_visit2,
type=data.type_visit2,
)
visit2_id = origin_visit2['visit']
# Add a visit with the same date as the previous one
origin_visit3 = swh_storage.origin_visit_add(
origin=origin_url,
date=data.date_visit2,
type=data.type_visit3,
)
visit3_id = origin_visit3['visit']
# Two visits, both with no snapshot: latest snapshot is None
assert swh_storage.snapshot_get_latest(origin_url) is None
# Add snapshot to visit1, latest snapshot = visit 1 snapshot
swh_storage.snapshot_add([data.complete_snapshot])
swh_storage.origin_visit_update(
origin_url, visit1_id, snapshot=data.complete_snapshot['id'])
assert {**data.complete_snapshot, 'next_branch': None} \
== swh_storage.snapshot_get_latest(origin_url)
# Status filter: all three visits are status=ongoing, so no snapshot
# returned
assert swh_storage.snapshot_get_latest(
origin_url,
allowed_statuses=['full']) is None
# Mark the first visit as completed and check status filter again
swh_storage.origin_visit_update(origin_url, visit1_id, status='full')
assert {**data.complete_snapshot, 'next_branch': None} \
== swh_storage.snapshot_get_latest(
origin_url,
allowed_statuses=['full'])
# Add snapshot to visit2 and check that the new snapshot is returned
swh_storage.snapshot_add([data.empty_snapshot])
swh_storage.origin_visit_update(
origin_url, visit2_id, snapshot=data.empty_snapshot['id'])
assert {**data.empty_snapshot, 'next_branch': None} \
== swh_storage.snapshot_get_latest(origin_url)
# Check that the status filter is still working
assert {**data.complete_snapshot, 'next_branch': None} \
== swh_storage.snapshot_get_latest(
origin_url,
allowed_statuses=['full'])
# Add snapshot to visit3 (same date as visit2) and check that
# the new snapshot is returned
swh_storage.snapshot_add([data.complete_snapshot])
swh_storage.origin_visit_update(
origin_url, visit3_id, snapshot=data.complete_snapshot['id'])
assert {**data.complete_snapshot, 'next_branch': None} \
== swh_storage.snapshot_get_latest(origin_url)
def test_snapshot_get_latest__missing_snapshot(self, swh_storage):
# Origin does not exist
origin_url = data.origin['url']
assert swh_storage.snapshot_get_latest(origin_url) is None
swh_storage.origin_add_one(data.origin)
origin_visit1 = swh_storage.origin_visit_add(
origin=origin_url,
date=data.date_visit1,
type=data.type_visit1,
)
visit1_id = origin_visit1['visit']
origin_visit2 = swh_storage.origin_visit_add(
origin=origin_url,
date=data.date_visit2,
type=data.type_visit2,
)
visit2_id = origin_visit2['visit']
# Two visits, both with no snapshot: latest snapshot is None
assert swh_storage.snapshot_get_latest(origin_url) is None
# Add unknown snapshot to visit1, check that the inconsistency is
# detected
swh_storage.origin_visit_update(
origin_url,
visit1_id, snapshot=data.complete_snapshot['id'])
with pytest.raises(ValueError):
swh_storage.snapshot_get_latest(
origin_url)
# Status filter: both visits are status=ongoing, so no snapshot
# returned
assert swh_storage.snapshot_get_latest(
origin_url,
allowed_statuses=['full']) is None
# Mark the first visit as completed and check status filter again
swh_storage.origin_visit_update(
origin_url,
visit1_id, status='full')
with pytest.raises(ValueError):
swh_storage.snapshot_get_latest(
origin_url,
allowed_statuses=['full']),
# Actually add the snapshot and check status filter again
swh_storage.snapshot_add([data.complete_snapshot])
assert {**data.complete_snapshot, 'next_branch': None} \
== swh_storage.snapshot_get_latest(origin_url)
# Add unknown snapshot to visit2 and check that the inconsistency
# is detected
swh_storage.origin_visit_update(
origin_url,
visit2_id, snapshot=data.snapshot['id'])
with pytest.raises(ValueError):
swh_storage.snapshot_get_latest(
origin_url)
# Actually add that snapshot and check that the new one is returned
swh_storage.snapshot_add([data.snapshot])
assert{**data.snapshot, 'next_branch': None} \
== swh_storage.snapshot_get_latest(origin_url)
def test_snapshot_get_random(self, swh_storage):
swh_storage.snapshot_add(
[data.snapshot, data.empty_snapshot, data.complete_snapshot])
assert swh_storage.snapshot_get_random() in {
data.snapshot['id'], data.empty_snapshot['id'],
data.complete_snapshot['id']}
def test_snapshot_missing(self, swh_storage):
snap = data.snapshot
missing_snap = data.empty_snapshot
snapshots = [snap['id'], missing_snap['id']]
swh_storage.snapshot_add([snap])
missing_snapshots = swh_storage.snapshot_missing(snapshots)
assert list(missing_snapshots) == [missing_snap['id']]
def test_stat_counters(self, swh_storage):
expected_keys = ['content', 'directory',
'origin', 'revision']
# Initially, all counters are 0
swh_storage.refresh_stat_counters()
counters = swh_storage.stat_counters()
assert set(expected_keys) <= set(counters)
for key in expected_keys:
assert counters[key] == 0
# Add a content. Only the content counter should increase.
swh_storage.content_add([data.cont])
swh_storage.refresh_stat_counters()
counters = swh_storage.stat_counters()
assert set(expected_keys) <= set(counters)
for key in expected_keys:
if key != 'content':
assert counters[key] == 0
assert counters['content'] == 1
# Add other objects. Check their counter increased as well.
swh_storage.origin_add_one(data.origin2)
origin_visit1 = swh_storage.origin_visit_add(
origin=data.origin2['url'],
date=data.date_visit2,
type=data.type_visit2,
)
swh_storage.snapshot_add([data.snapshot])
swh_storage.origin_visit_update(
data.origin2['url'], origin_visit1['visit'],
snapshot=data.snapshot['id'])
swh_storage.directory_add([data.dir])
swh_storage.revision_add([data.revision])
swh_storage.release_add([data.release])
swh_storage.refresh_stat_counters()
counters = swh_storage.stat_counters()
assert counters['content'] == 1
assert counters['directory'] == 1
assert counters['snapshot'] == 1
assert counters['origin'] == 1
assert counters['origin_visit'] == 1
assert counters['revision'] == 1
assert counters['release'] == 1
assert counters['snapshot'] == 1
if 'person' in counters:
assert counters['person'] == 3
def test_content_find_ctime(self, swh_storage):
cont = data.cont.copy()
del cont['data']
now = datetime.datetime.now(tz=datetime.timezone.utc)
cont['ctime'] = now
swh_storage.content_add_metadata([cont])
actually_present = swh_storage.content_find({'sha1': cont['sha1']})
# check ctime up to one second
dt = actually_present[0]['ctime'] - now
assert abs(dt.total_seconds()) <= 1
del actually_present[0]['ctime']
assert actually_present[0] == {
'sha1': cont['sha1'],
'sha256': cont['sha256'],
'sha1_git': cont['sha1_git'],
'blake2s256': cont['blake2s256'],
'length': cont['length'],
'status': 'visible'
}
def test_content_find_with_present_content(self, swh_storage):
# 1. with something to find
cont = data.cont
swh_storage.content_add([cont, data.cont2])
actually_present = swh_storage.content_find(
{'sha1': cont['sha1']}
)
assert 1 == len(actually_present)
actually_present[0].pop('ctime')
assert actually_present[0] == {
'sha1': cont['sha1'],
'sha256': cont['sha256'],
'sha1_git': cont['sha1_git'],
'blake2s256': cont['blake2s256'],
'length': cont['length'],
'status': 'visible'
}
# 2. with something to find
actually_present = swh_storage.content_find(
{'sha1_git': cont['sha1_git']})
assert 1 == len(actually_present)
actually_present[0].pop('ctime')
assert actually_present[0] == {
'sha1': cont['sha1'],
'sha256': cont['sha256'],
'sha1_git': cont['sha1_git'],
'blake2s256': cont['blake2s256'],
'length': cont['length'],
'status': 'visible'
}
# 3. with something to find
actually_present = swh_storage.content_find(
{'sha256': cont['sha256']})
assert 1 == len(actually_present)
actually_present[0].pop('ctime')
assert actually_present[0] == {
'sha1': cont['sha1'],
'sha256': cont['sha256'],
'sha1_git': cont['sha1_git'],
'blake2s256': cont['blake2s256'],
'length': cont['length'],
'status': 'visible'
}
# 4. with something to find
actually_present = swh_storage.content_find({
'sha1': cont['sha1'],
'sha1_git': cont['sha1_git'],
'sha256': cont['sha256'],
'blake2s256': cont['blake2s256'],
})
assert 1 == len(actually_present)
actually_present[0].pop('ctime')
assert actually_present[0] == {
'sha1': cont['sha1'],
'sha256': cont['sha256'],
'sha1_git': cont['sha1_git'],
'blake2s256': cont['blake2s256'],
'length': cont['length'],
'status': 'visible'
}
def test_content_find_with_non_present_content(self, swh_storage):
# 1. with something that does not exist
missing_cont = data.missing_cont
actually_present = swh_storage.content_find(
{'sha1': missing_cont['sha1']})
assert actually_present == []
# 2. with something that does not exist
actually_present = swh_storage.content_find(
{'sha1_git': missing_cont['sha1_git']})
assert actually_present == []
# 3. with something that does not exist
actually_present = swh_storage.content_find(
{'sha256': missing_cont['sha256']})
assert actually_present == []
def test_content_find_with_duplicate_input(self, swh_storage):
cont1 = data.cont
duplicate_cont = cont1.copy()
# Create fake data with colliding sha256 and blake2s256
sha1_array = bytearray(duplicate_cont['sha1'])
sha1_array[0] += 1
duplicate_cont['sha1'] = bytes(sha1_array)
sha1git_array = bytearray(duplicate_cont['sha1_git'])
sha1git_array[0] += 1
duplicate_cont['sha1_git'] = bytes(sha1git_array)
# Inject the data
swh_storage.content_add([cont1, duplicate_cont])
finder = {'blake2s256': duplicate_cont['blake2s256'],
'sha256': duplicate_cont['sha256']}
actual_result = list(swh_storage.content_find(finder))
cont1.pop('data')
duplicate_cont.pop('data')
actual_result[0].pop('ctime')
actual_result[1].pop('ctime')
expected_result = [
cont1, duplicate_cont
]
for result in expected_result:
assert result in actual_result
def test_content_find_with_duplicate_sha256(self, swh_storage):
cont1 = data.cont
duplicate_cont = cont1.copy()
# Create fake data with colliding sha256
for hashalgo in ('sha1', 'sha1_git', 'blake2s256'):
value = bytearray(duplicate_cont[hashalgo])
value[0] += 1
duplicate_cont[hashalgo] = bytes(value)
swh_storage.content_add([cont1, duplicate_cont])
finder = {
'sha256': duplicate_cont['sha256']
}
actual_result = list(swh_storage.content_find(finder))
assert len(actual_result) == 2
cont1.pop('data')
duplicate_cont.pop('data')
actual_result[0].pop('ctime')
actual_result[1].pop('ctime')
expected_result = [
cont1, duplicate_cont
]
assert expected_result == sorted(actual_result,
key=lambda x: x['sha1'])
# Find with both sha256 and blake2s256
finder = {
'sha256': duplicate_cont['sha256'],
'blake2s256': duplicate_cont['blake2s256']
}
actual_result = list(swh_storage.content_find(finder))
assert len(actual_result) == 1
actual_result[0].pop('ctime')
expected_result = [duplicate_cont]
assert actual_result[0] == duplicate_cont
def test_content_find_with_duplicate_blake2s256(self, swh_storage):
cont1 = data.cont
duplicate_cont = cont1.copy()
# Create fake data with colliding sha256 and blake2s256
sha1_array = bytearray(duplicate_cont['sha1'])
sha1_array[0] += 1
duplicate_cont['sha1'] = bytes(sha1_array)
sha1git_array = bytearray(duplicate_cont['sha1_git'])
sha1git_array[0] += 1
duplicate_cont['sha1_git'] = bytes(sha1git_array)
sha256_array = bytearray(duplicate_cont['sha256'])
sha256_array[0] += 1
duplicate_cont['sha256'] = bytes(sha256_array)
swh_storage.content_add([cont1, duplicate_cont])
finder = {
'blake2s256': duplicate_cont['blake2s256']
}
actual_result = list(swh_storage.content_find(finder))
cont1.pop('data')
duplicate_cont.pop('data')
actual_result[0].pop('ctime')
actual_result[1].pop('ctime')
expected_result = [
cont1, duplicate_cont
]
for result in expected_result:
assert result in actual_result
# Find with both sha256 and blake2s256
finder = {
'sha256': duplicate_cont['sha256'],
'blake2s256': duplicate_cont['blake2s256']
}
actual_result = list(swh_storage.content_find(finder))
actual_result[0].pop('ctime')
expected_result = [
duplicate_cont
]
assert expected_result == actual_result
def test_content_find_bad_input(self, swh_storage):
# 1. with bad input
with pytest.raises(ValueError):
swh_storage.content_find({}) # empty is bad
# 2. with bad input
with pytest.raises(ValueError):
swh_storage.content_find(
{'unknown-sha1': 'something'}) # not the right key
def test_object_find_by_sha1_git(self, swh_storage):
sha1_gits = [b'00000000000000000000']
expected = {
b'00000000000000000000': [],
}
swh_storage.content_add([data.cont])
sha1_gits.append(data.cont['sha1_git'])
expected[data.cont['sha1_git']] = [{
'sha1_git': data.cont['sha1_git'],
'type': 'content',
}]
swh_storage.directory_add([data.dir])
sha1_gits.append(data.dir['id'])
expected[data.dir['id']] = [{
'sha1_git': data.dir['id'],
'type': 'directory',
}]
swh_storage.revision_add([data.revision])
sha1_gits.append(data.revision['id'])
expected[data.revision['id']] = [{
'sha1_git': data.revision['id'],
'type': 'revision',
}]
swh_storage.release_add([data.release])
sha1_gits.append(data.release['id'])
expected[data.release['id']] = [{
'sha1_git': data.release['id'],
'type': 'release',
}]
ret = swh_storage.object_find_by_sha1_git(sha1_gits)
assert expected == ret
def test_tool_add(self, swh_storage):
tool = {
'name': 'some-unknown-tool',
'version': 'some-version',
'configuration': {"debian-package": "some-package"},
}
actual_tool = swh_storage.tool_get(tool)
assert actual_tool is None # does not exist
# add it
actual_tools = swh_storage.tool_add([tool])
assert len(actual_tools) == 1
actual_tool = actual_tools[0]
assert actual_tool is not None # now it exists
new_id = actual_tool.pop('id')
assert actual_tool == tool
actual_tools2 = swh_storage.tool_add([tool])
actual_tool2 = actual_tools2[0]
assert actual_tool2 is not None # now it exists
new_id2 = actual_tool2.pop('id')
assert new_id == new_id2
assert actual_tool == actual_tool2
def test_tool_add_multiple(self, swh_storage):
tool = {
'name': 'some-unknown-tool',
'version': 'some-version',
'configuration': {"debian-package": "some-package"},
}
actual_tools = list(swh_storage.tool_add([tool]))
assert len(actual_tools) == 1
new_tools = [tool, {
'name': 'yet-another-tool',
'version': 'version',
'configuration': {},
}]
actual_tools = swh_storage.tool_add(new_tools)
assert len(actual_tools) == 2
# order not guaranteed, so we iterate over results to check
for tool in actual_tools:
_id = tool.pop('id')
assert _id is not None
assert tool in new_tools
def test_tool_get_missing(self, swh_storage):
tool = {
'name': 'unknown-tool',
'version': '3.1.0rc2-31-ga2cbb8c',
'configuration': {"command_line": "nomossa "},
}
actual_tool = swh_storage.tool_get(tool)
assert actual_tool is None
def test_tool_metadata_get_missing_context(self, swh_storage):
tool = {
'name': 'swh-metadata-translator',
'version': '0.0.1',
'configuration': {"context": "unknown-context"},
}
actual_tool = swh_storage.tool_get(tool)
assert actual_tool is None
def test_tool_metadata_get(self, swh_storage):
tool = {
'name': 'swh-metadata-translator',
'version': '0.0.1',
'configuration': {"type": "local", "context": "npm"},
}
expected_tool = swh_storage.tool_add([tool])[0]
# when
actual_tool = swh_storage.tool_get(tool)
# then
assert expected_tool == actual_tool
def test_metadata_provider_get(self, swh_storage):
# given
no_provider = swh_storage.metadata_provider_get(6459456445615)
assert no_provider is None
# when
provider_id = swh_storage.metadata_provider_add(
data.provider['name'],
data.provider['type'],
data.provider['url'],
data.provider['metadata'])
actual_provider = swh_storage.metadata_provider_get(provider_id)
expected_provider = {
'provider_name': data.provider['name'],
'provider_url': data.provider['url']
}
# then
del actual_provider['id']
assert actual_provider, expected_provider
def test_metadata_provider_get_by(self, swh_storage):
# given
no_provider = swh_storage.metadata_provider_get_by({
'provider_name': data.provider['name'],
'provider_url': data.provider['url']
})
assert no_provider is None
# when
provider_id = swh_storage.metadata_provider_add(
data.provider['name'],
data.provider['type'],
data.provider['url'],
data.provider['metadata'])
actual_provider = swh_storage.metadata_provider_get_by({
'provider_name': data.provider['name'],
'provider_url': data.provider['url']
})
# then
assert provider_id, actual_provider['id']
def test_origin_metadata_add(self, swh_storage):
# given
origin = data.origin
swh_storage.origin_add([origin])[0]
tools = swh_storage.tool_add([data.metadata_tool])
tool = tools[0]
swh_storage.metadata_provider_add(
data.provider['name'],
data.provider['type'],
data.provider['url'],
data.provider['metadata'])
provider = swh_storage.metadata_provider_get_by({
'provider_name': data.provider['name'],
'provider_url': data.provider['url']
})
# when adding for the same origin 2 metadatas
n_om = len(list(swh_storage.origin_metadata_get_by(origin['url'])))
swh_storage.origin_metadata_add(
origin['url'],
data.origin_metadata['discovery_date'],
provider['id'],
tool['id'],
data.origin_metadata['metadata'])
swh_storage.origin_metadata_add(
origin['url'],
'2015-01-01 23:00:00+00',
provider['id'],
tool['id'],
data.origin_metadata2['metadata'])
n_actual_om = len(list(
swh_storage.origin_metadata_get_by(origin['url'])))
# then
assert n_actual_om == n_om + 2
def test_origin_metadata_get(self, swh_storage):
# given
origin_url = data.origin['url']
origin_url2 = data.origin2['url']
swh_storage.origin_add([data.origin])
swh_storage.origin_add([data.origin2])
swh_storage.metadata_provider_add(data.provider['name'],
data.provider['type'],
data.provider['url'],
data.provider['metadata'])
provider = swh_storage.metadata_provider_get_by({
'provider_name': data.provider['name'],
'provider_url': data.provider['url']
})
tool = swh_storage.tool_add([data.metadata_tool])[0]
# when adding for the same origin 2 metadatas
swh_storage.origin_metadata_add(
origin_url,
data.origin_metadata['discovery_date'],
provider['id'],
tool['id'],
data.origin_metadata['metadata'])
swh_storage.origin_metadata_add(
origin_url2,
data.origin_metadata2['discovery_date'],
provider['id'],
tool['id'],
data.origin_metadata2['metadata'])
swh_storage.origin_metadata_add(
origin_url,
data.origin_metadata2['discovery_date'],
provider['id'],
tool['id'],
data.origin_metadata2['metadata'])
all_metadatas = list(sorted(swh_storage.origin_metadata_get_by(
origin_url), key=lambda x: x['discovery_date']))
metadatas_for_origin2 = list(swh_storage.origin_metadata_get_by(
origin_url2))
expected_results = [{
'origin_url': origin_url,
'discovery_date': datetime.datetime(
2015, 1, 1, 23, 0,
tzinfo=datetime.timezone.utc),
'metadata': {
'name': 'test_origin_metadata',
'version': '0.0.1'
},
'provider_id': provider['id'],
'provider_name': 'hal',
'provider_type': 'deposit-client',
'provider_url': 'http:///hal/inria',
'tool_id': tool['id']
}, {
'origin_url': origin_url,
'discovery_date': datetime.datetime(
2017, 1, 1, 23, 0,
tzinfo=datetime.timezone.utc),
'metadata': {
'name': 'test_origin_metadata',
'version': '0.0.1'
},
'provider_id': provider['id'],
'provider_name': 'hal',
'provider_type': 'deposit-client',
'provider_url': 'http:///hal/inria',
'tool_id': tool['id']
}]
# then
assert len(all_metadatas) == 2
assert len(metadatas_for_origin2) == 1
assert all_metadatas == expected_results
def test_metadata_provider_add(self, swh_storage):
provider = {
'provider_name': 'swMATH',
'provider_type': 'registry',
'provider_url': 'http://www.swmath.org/',
'metadata': {
'email': 'contact@swmath.org',
'license': 'All rights reserved'
}
}
provider['id'] = provider_id = swh_storage.metadata_provider_add(
**provider)
assert provider == swh_storage.metadata_provider_get_by(
{'provider_name': 'swMATH',
'provider_url': 'http://www.swmath.org/'})
assert provider == swh_storage.metadata_provider_get(provider_id)
def test_origin_metadata_get_by_provider_type(self, swh_storage):
# given
origin_url = data.origin['url']
origin_url2 = data.origin2['url']
swh_storage.origin_add([data.origin])
swh_storage.origin_add([data.origin2])
provider1_id = swh_storage.metadata_provider_add(
data.provider['name'],
data.provider['type'],
data.provider['url'],
data.provider['metadata'])
provider1 = swh_storage.metadata_provider_get_by({
'provider_name': data.provider['name'],
'provider_url': data.provider['url']
})
assert provider1 == swh_storage.metadata_provider_get(provider1_id)
provider2_id = swh_storage.metadata_provider_add(
'swMATH',
'registry',
'http://www.swmath.org/',
{'email': 'contact@swmath.org',
'license': 'All rights reserved'})
provider2 = swh_storage.metadata_provider_get_by({
'provider_name': 'swMATH',
'provider_url': 'http://www.swmath.org/'
})
assert provider2 == swh_storage.metadata_provider_get(provider2_id)
# using the only tool now inserted in the data.sql, but for this
# provider should be a crawler tool (not yet implemented)
tool = swh_storage.tool_add([data.metadata_tool])[0]
# when adding for the same origin 2 metadatas
swh_storage.origin_metadata_add(
origin_url,
data.origin_metadata['discovery_date'],
provider1['id'],
tool['id'],
data.origin_metadata['metadata'])
swh_storage.origin_metadata_add(
origin_url2,
data.origin_metadata2['discovery_date'],
provider2['id'],
tool['id'],
data.origin_metadata2['metadata'])
provider_type = 'registry'
m_by_provider = list(swh_storage.origin_metadata_get_by(
origin_url2,
provider_type))
for item in m_by_provider:
if 'id' in item:
del item['id']
expected_results = [{
'origin_url': origin_url2,
'discovery_date': datetime.datetime(
2017, 1, 1, 23, 0,
tzinfo=datetime.timezone.utc),
'metadata': {
'name': 'test_origin_metadata',
'version': '0.0.1'
},
'provider_id': provider2['id'],
'provider_name': 'swMATH',
'provider_type': provider_type,
'provider_url': 'http://www.swmath.org/',
'tool_id': tool['id']
}]
# then
assert len(m_by_provider) == 1
assert m_by_provider == expected_results
class TestStorageGeneratedData:
def test_generate_content_get(self, swh_storage, swh_contents):
contents_with_data = [c for c in swh_contents
if c['status'] != 'absent']
# input the list of sha1s we want from storage
get_sha1s = [c['sha1'] for c in contents_with_data]
# retrieve contents
actual_contents = list(swh_storage.content_get(get_sha1s))
assert None not in actual_contents
assert_contents_ok(contents_with_data, actual_contents)
def test_generate_content_get_metadata(self, swh_storage, swh_contents):
# input the list of sha1s we want from storage
expected_contents = [c for c in swh_contents
if c['status'] != 'absent']
get_sha1s = [c['sha1'] for c in expected_contents]
# retrieve contents
meta_contents = swh_storage.content_get_metadata(get_sha1s)
assert len(list(meta_contents)) == len(get_sha1s)
actual_contents = []
for contents in meta_contents.values():
actual_contents.extend(contents)
keys_to_check = {'length', 'status',
'sha1', 'sha1_git', 'sha256', 'blake2s256'}
assert_contents_ok(expected_contents, actual_contents,
keys_to_check=keys_to_check)
def test_generate_content_get_range(self, swh_storage, swh_contents):
"""content_get_range returns complete range"""
present_contents = [c for c in swh_contents
if c['status'] != 'absent']
get_sha1s = sorted([c['sha1'] for c in swh_contents
if c['status'] != 'absent'])
start = get_sha1s[2]
end = get_sha1s[-2]
actual_result = swh_storage.content_get_range(start, end)
assert actual_result['next'] is None
actual_contents = actual_result['contents']
expected_contents = [c for c in present_contents
if start <= c['sha1'] <= end]
if expected_contents:
assert_contents_ok(
expected_contents, actual_contents, ['sha1'])
else:
assert actual_contents == []
def test_generate_content_get_range_full(self, swh_storage, swh_contents):
"""content_get_range for a full range returns all available contents"""
present_contents = [c for c in swh_contents
if c['status'] != 'absent']
start = b'0' * 40
end = b'f' * 40
actual_result = swh_storage.content_get_range(start, end)
assert actual_result['next'] is None
actual_contents = actual_result['contents']
expected_contents = [c for c in present_contents
if start <= c['sha1'] <= end]
if expected_contents:
assert_contents_ok(
expected_contents, actual_contents, ['sha1'])
else:
assert actual_contents == []
def test_generate_content_get_range_empty(self, swh_storage, swh_contents):
"""content_get_range for an empty range returns nothing"""
start = b'0' * 40
end = b'f' * 40
actual_result = swh_storage.content_get_range(end, start)
assert actual_result['next'] is None
assert len(actual_result['contents']) == 0
def test_generate_content_get_range_limit_none(self, swh_storage):
"""content_get_range call with wrong limit input should fail"""
with pytest.raises(ValueError) as e:
swh_storage.content_get_range(start=None, end=None, limit=None)
assert e.value.args == ('Development error: limit should not be None',)
def test_generate_content_get_range_no_limit(
self, swh_storage, swh_contents):
"""content_get_range returns contents within range provided"""
# input the list of sha1s we want from storage
get_sha1s = sorted([c['sha1'] for c in swh_contents
if c['status'] != 'absent'])
start = get_sha1s[0]
end = get_sha1s[-1]
# retrieve contents
actual_result = swh_storage.content_get_range(start, end)
actual_contents = actual_result['contents']
assert actual_result['next'] is None
assert len(actual_contents) == len(get_sha1s)
expected_contents = [c for c in swh_contents
if c['status'] != 'absent']
assert_contents_ok(
expected_contents, actual_contents, ['sha1'])
def test_generate_content_get_range_limit(self, swh_storage, swh_contents):
"""content_get_range paginates results if limit exceeded"""
contents_map = {c['sha1']: c for c in swh_contents}
# input the list of sha1s we want from storage
get_sha1s = sorted([c['sha1'] for c in swh_contents
if c['status'] != 'absent'])
start = get_sha1s[0]
end = get_sha1s[-1]
# retrieve contents limited to n-1 results
limited_results = len(get_sha1s) - 1
actual_result = swh_storage.content_get_range(
start, end, limit=limited_results)
actual_contents = actual_result['contents']
assert actual_result['next'] == get_sha1s[-1]
assert len(actual_contents) == limited_results
expected_contents = [contents_map[sha1] for sha1 in get_sha1s[:-1]]
assert_contents_ok(
expected_contents, actual_contents, ['sha1'])
# retrieve next part
actual_results2 = swh_storage.content_get_range(start=end, end=end)
assert actual_results2['next'] is None
actual_contents2 = actual_results2['contents']
assert len(actual_contents2) == 1
assert_contents_ok(
[contents_map[get_sha1s[-1]]], actual_contents2, ['sha1'])
def test_origin_get_range_from_zero(self, swh_storage, swh_origins):
actual_origins = list(
swh_storage.origin_get_range(origin_from=0,
origin_count=0))
assert len(actual_origins) == 0
actual_origins = list(
swh_storage.origin_get_range(origin_from=0,
origin_count=1))
assert len(actual_origins) == 1
assert actual_origins[0]['id'] == 1
assert actual_origins[0]['url'] == swh_origins[0]['url']
@pytest.mark.parametrize('origin_from,origin_count', [
(1, 1), (1, 10), (1, 20), (1, 101), (11, 0),
(11, 10), (91, 11)])
def test_origin_get_range(
self, swh_storage, swh_origins, origin_from, origin_count):
actual_origins = list(
swh_storage.origin_get_range(origin_from=origin_from,
origin_count=origin_count))
origins_with_id = list(enumerate(swh_origins, start=1))
expected_origins = [
{
'url': origin['url'],
'id': origin_id,
}
for (origin_id, origin)
in origins_with_id[origin_from-1:origin_from+origin_count-1]
]
assert actual_origins == expected_origins
@pytest.mark.parametrize('limit', [1, 7, 10, 100, 1000])
def test_origin_list(self, swh_storage, swh_origins, limit):
returned_origins = []
page_token = None
i = 0
while True:
result = swh_storage.origin_list(
page_token=page_token, limit=limit)
assert len(result['origins']) <= limit
returned_origins.extend(
origin['url'] for origin in result['origins'])
i += 1
page_token = result.get('next_page_token')
if page_token is None:
assert i*limit >= len(swh_origins)
break
else:
assert len(result['origins']) == limit
expected_origins = [origin['url'] for origin in swh_origins]
assert sorted(returned_origins) == sorted(expected_origins)
ORIGINS = [
'https://github.com/user1/repo1',
'https://github.com/user2/repo1',
'https://github.com/user3/repo1',
'https://gitlab.com/user1/repo1',
'https://gitlab.com/user2/repo1',
'https://forge.softwareheritage.org/source/repo1',
]
def test_origin_count(self, swh_storage):
swh_storage.origin_add([{'url': url} for url in self.ORIGINS])
assert swh_storage.origin_count('github') == 3
assert swh_storage.origin_count('gitlab') == 2
assert swh_storage.origin_count('.*user.*', regexp=True) == 5
assert swh_storage.origin_count('.*user.*', regexp=False) == 0
assert swh_storage.origin_count('.*user1.*', regexp=True) == 2
assert swh_storage.origin_count('.*user1.*', regexp=False) == 0
def test_origin_count_with_visit_no_visits(self, swh_storage):
swh_storage.origin_add([{'url': url} for url in self.ORIGINS])
# none of them have visits, so with_visit=True => 0
assert swh_storage.origin_count('github', with_visit=True) == 0
assert swh_storage.origin_count('gitlab', with_visit=True) == 0
assert swh_storage.origin_count('.*user.*', regexp=True,
with_visit=True) == 0
assert swh_storage.origin_count('.*user.*', regexp=False,
with_visit=True) == 0
assert swh_storage.origin_count('.*user1.*', regexp=True,
with_visit=True) == 0
assert swh_storage.origin_count('.*user1.*', regexp=False,
with_visit=True) == 0
def test_origin_count_with_visit_with_visits_no_snapshot(
self, swh_storage):
swh_storage.origin_add([{'url': url} for url in self.ORIGINS])
now = datetime.datetime.now(tz=datetime.timezone.utc)
swh_storage.origin_visit_add(
origin='https://github.com/user1/repo1', date=now, type='git')
assert swh_storage.origin_count('github', with_visit=False) == 3
# it has a visit, but no snapshot, so with_visit=True => 0
assert swh_storage.origin_count('github', with_visit=True) == 0
assert swh_storage.origin_count('gitlab', with_visit=False) == 2
# these gitlab origins have no visit
assert swh_storage.origin_count('gitlab', with_visit=True) == 0
assert swh_storage.origin_count('github.*user1', regexp=True,
with_visit=False) == 1
assert swh_storage.origin_count('github.*user1', regexp=True,
with_visit=True) == 0
assert swh_storage.origin_count('github', regexp=True,
with_visit=True) == 0
def test_origin_count_with_visit_with_visits_and_snapshot(
self, swh_storage):
swh_storage.origin_add([{'url': url} for url in self.ORIGINS])
now = datetime.datetime.now(tz=datetime.timezone.utc)
swh_storage.snapshot_add([data.snapshot])
visit = swh_storage.origin_visit_add(
origin='https://github.com/user1/repo1', date=now, type='git')
swh_storage.origin_visit_update(
origin='https://github.com/user1/repo1', visit_id=visit['visit'],
snapshot=data.snapshot['id'])
assert swh_storage.origin_count('github', with_visit=False) == 3
# github/user1 has a visit and a snapshot, so with_visit=True => 1
assert swh_storage.origin_count('github', with_visit=True) == 1
assert swh_storage.origin_count('github.*user1', regexp=True,
with_visit=False) == 1
assert swh_storage.origin_count('github.*user1', regexp=True,
with_visit=True) == 1
assert swh_storage.origin_count('github', regexp=True,
with_visit=True) == 1
@settings(suppress_health_check=[HealthCheck.too_slow])
@given(strategies.lists(objects(), max_size=2))
def test_add_arbitrary(self, swh_storage, objects):
for (obj_type, obj) in objects:
obj = obj.to_dict()
if obj_type == 'origin_visit':
origin = obj.pop('origin')
swh_storage.origin_add_one({'url': origin})
if 'visit' in obj:
del obj['visit']
swh_storage.origin_visit_add(
origin, obj['date'], obj['type'])
else:
method = getattr(swh_storage, obj_type + '_add')
try:
method([obj])
except HashCollision:
pass
@pytest.mark.db
class TestLocalStorage:
"""Test the local storage"""
# This test is only relevant on the local storage, with an actual
# objstorage raising an exception
def test_content_add_objstorage_exception(self, swh_storage):
swh_storage.objstorage.add = Mock(
side_effect=Exception('mocked broken objstorage')
)
with pytest.raises(Exception) as e:
swh_storage.content_add([data.cont])
assert e.value.args == ('mocked broken objstorage',)
missing = list(swh_storage.content_missing([data.cont]))
assert missing == [data.cont['sha1']]
@pytest.mark.db
class TestStorageRaceConditions:
@pytest.mark.xfail
def test_content_add_race(self, swh_storage):
results = queue.Queue()
def thread():
try:
with db_transaction(swh_storage) as (db, cur):
ret = swh_storage.content_add([data.cont], db=db,
cur=cur)
results.put((threading.get_ident(), 'data', ret))
except Exception as e:
results.put((threading.get_ident(), 'exc', e))
t1 = threading.Thread(target=thread)
t2 = threading.Thread(target=thread)
t1.start()
# this avoids the race condition
# import time
# time.sleep(1)
t2.start()
t1.join()
t2.join()
r1 = results.get(block=False)
r2 = results.get(block=False)
with pytest.raises(queue.Empty):
results.get(block=False)
assert r1[0] != r2[0]
assert r1[1] == 'data', 'Got exception %r in Thread%s' % (r1[2], r1[0])
assert r2[1] == 'data', 'Got exception %r in Thread%s' % (r2[2], r2[0])
@pytest.mark.db
class TestPgStorage:
"""This class is dedicated for the rare case where the schema needs to
be altered dynamically.
Otherwise, the tests could be blocking when ran altogether.
"""
def test_content_update_with_new_cols(self, swh_storage):
swh_storage.journal_writer = None # TODO, not supported
with db_transaction(swh_storage) as (_, cur):
cur.execute("""alter table content
add column test text default null,
add column test2 text default null""")
cont = copy.deepcopy(data.cont2)
swh_storage.content_add([cont])
cont['test'] = 'value-1'
cont['test2'] = 'value-2'
swh_storage.content_update([cont], keys=['test', 'test2'])
with db_transaction(swh_storage) as (_, cur):
cur.execute(
'''SELECT sha1, sha1_git, sha256, length, status,
test, test2
FROM content WHERE sha1 = %s''',
(cont['sha1'],))
datum = cur.fetchone()
assert datum == (cont['sha1'], cont['sha1_git'], cont['sha256'],
cont['length'], 'visible',
cont['test'], cont['test2'])
with db_transaction(swh_storage) as (_, cur):
cur.execute("""alter table content drop column test,
drop column test2""")
def test_content_add_db(self, swh_storage):
cont = data.cont
actual_result = swh_storage.content_add([cont])
assert actual_result == {
'content:add': 1,
'content:add:bytes': cont['length'],
'skipped_content:add': 0
}
if hasattr(swh_storage, 'objstorage'):
assert cont['sha1'] in swh_storage.objstorage
with db_transaction(swh_storage) as (_, cur):
cur.execute('SELECT sha1, sha1_git, sha256, length, status'
' FROM content WHERE sha1 = %s',
(cont['sha1'],))
datum = cur.fetchone()
assert datum == (cont['sha1'], cont['sha1_git'], cont['sha256'],
cont['length'], 'visible')
expected_cont = cont.copy()
del expected_cont['data']
journal_objects = list(swh_storage.journal_writer.objects)
for (obj_type, obj) in journal_objects:
del obj['ctime']
assert journal_objects == [('content', expected_cont)]
def test_content_add_metadata_db(self, swh_storage):
cont = data.cont
del cont['data']
cont['ctime'] = datetime.datetime.now()
actual_result = swh_storage.content_add_metadata([cont])
assert actual_result == {
'content:add': 1,
'skipped_content:add': 0
}
if hasattr(swh_storage, 'objstorage'):
assert cont['sha1'] not in swh_storage.objstorage
with db_transaction(swh_storage) as (_, cur):
cur.execute('SELECT sha1, sha1_git, sha256, length, status'
' FROM content WHERE sha1 = %s',
(cont['sha1'],))
datum = cur.fetchone()
assert datum == (cont['sha1'], cont['sha1_git'], cont['sha256'],
cont['length'], 'visible')
assert list(swh_storage.journal_writer.objects) == [('content', cont)]
def test_skipped_content_add_db(self, swh_storage):
cont = data.skipped_cont
cont2 = data.skipped_cont2
cont2['blake2s256'] = None
actual_result = swh_storage.content_add([cont, cont, cont2])
assert actual_result == {
'content:add': 0,
'content:add:bytes': 0,
'skipped_content:add': 2,
}
with db_transaction(swh_storage) as (_, cur):
cur.execute('SELECT sha1, sha1_git, sha256, blake2s256, '
'length, status, reason '
'FROM skipped_content ORDER BY sha1_git')
dbdata = cur.fetchall()
assert len(dbdata) == 2
assert dbdata[0] == (cont['sha1'], cont['sha1_git'], cont['sha256'],
cont['blake2s256'], cont['length'], 'absent',
'Content too long')
assert dbdata[1] == (cont2['sha1'], cont2['sha1_git'], cont2['sha256'],
cont2['blake2s256'], cont2['length'], 'absent',
'Content too long')