diff --git a/sql/upgrades/141.sql b/sql/upgrades/141.sql
new file mode 100644
index 00000000..bb9d4660
--- /dev/null
+++ b/sql/upgrades/141.sql
@@ -0,0 +1,10 @@
+-- SWH DB schema upgrade
+-- from_version: 140
+-- to_version: 141
+-- description: Remove fetch history
+
+insert into dbversion(version, release, description)
+ values(141, now(), 'Work In Progress');
+
+drop table fetch_history;
+
diff --git a/swh/storage/api/client.py b/swh/storage/api/client.py
index 865b3663..f75fa45a 100644
--- a/swh/storage/api/client.py
+++ b/swh/storage/api/client.py
@@ -1,268 +1,257 @@
# Copyright (C) 2015-2017 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import warnings
from swh.core.api import RPCClient
from ..exc import StorageAPIError
class RemoteStorage(RPCClient):
"""Proxy to a remote storage API"""
api_exception = StorageAPIError
def check_config(self, *, check_write):
return self.post('check_config', {'check_write': check_write})
def reset(self):
return self.post('reset', {})
def content_add(self, content):
return self.post('content/add', {'content': content})
def content_add_metadata(self, content):
return self.post('content/add_metadata', {'content': content})
def content_update(self, content, keys=[]):
return self.post('content/update', {'content': content,
'keys': keys})
def content_missing(self, content, key_hash='sha1'):
return self.post('content/missing', {'content': content,
'key_hash': key_hash})
def content_missing_per_sha1(self, contents):
return self.post('content/missing/sha1', {'contents': contents})
def skipped_content_missing(self, contents):
return self.post('content/skipped/missing', {'contents': contents})
def content_get(self, content):
return self.post('content/data', {'content': content})
def content_get_metadata(self, content):
return self.post('content/metadata', {'content': content})
def content_get_range(self, start, end, limit=1000):
return self.post('content/range', {'start': start,
'end': end,
'limit': limit})
def content_find(self, content):
return self.post('content/present', {'content': content})
def directory_add(self, directories):
return self.post('directory/add', {'directories': directories})
def directory_missing(self, directories):
return self.post('directory/missing', {'directories': directories})
def directory_ls(self, directory, recursive=False):
return self.post('directory/ls', {'directory': directory,
'recursive': recursive})
def revision_get(self, revisions):
return self.post('revision', {'revisions': revisions})
def revision_log(self, revisions, limit=None):
return self.post('revision/log', {'revisions': revisions,
'limit': limit})
def revision_shortlog(self, revisions, limit=None):
return self.post('revision/shortlog', {'revisions': revisions,
'limit': limit})
def revision_add(self, revisions):
return self.post('revision/add', {'revisions': revisions})
def revision_missing(self, revisions):
return self.post('revision/missing', {'revisions': revisions})
def release_add(self, releases):
return self.post('release/add', {'releases': releases})
def release_get(self, releases):
return self.post('release', {'releases': releases})
def release_missing(self, releases):
return self.post('release/missing', {'releases': releases})
def object_find_by_sha1_git(self, ids):
return self.post('object/find_by_sha1_git', {'ids': ids})
def snapshot_add(self, snapshots):
return self.post('snapshot/add', {'snapshots': snapshots})
def snapshot_get(self, snapshot_id):
return self.post('snapshot', {
'snapshot_id': snapshot_id
})
def snapshot_get_by_origin_visit(self, origin, visit):
return self.post('snapshot/by_origin_visit', {
'origin': origin,
'visit': visit
})
def snapshot_get_latest(self, origin, allowed_statuses=None):
return self.post('snapshot/latest', {
'origin': origin,
'allowed_statuses': allowed_statuses
})
def snapshot_count_branches(self, snapshot_id):
return self.post('snapshot/count_branches', {
'snapshot_id': snapshot_id
})
def snapshot_get_branches(self, snapshot_id, branches_from=b'',
branches_count=1000, target_types=None):
return self.post('snapshot/get_branches', {
'snapshot_id': snapshot_id,
'branches_from': branches_from,
'branches_count': branches_count,
'target_types': target_types
})
def origin_get(self, origins=None, *, origin=None):
if origin is None:
if origins is None:
raise TypeError('origin_get expected 1 argument')
else:
assert origins is None
origins = origin
warnings.warn("argument 'origin' of origin_get was renamed "
"to 'origins' in v0.0.123.",
DeprecationWarning)
return self.post('origin/get', {'origins': origins})
def origin_search(self, url_pattern, offset=0, limit=50, regexp=False,
with_visit=False):
return self.post('origin/search', {'url_pattern': url_pattern,
'offset': offset,
'limit': limit,
'regexp': regexp,
'with_visit': with_visit})
def origin_count(self, url_pattern, regexp=False, with_visit=False):
return self.post('origin/count', {'url_pattern': url_pattern,
'regexp': regexp,
'with_visit': with_visit})
def origin_get_range(self, origin_from=1, origin_count=100):
return self.post('origin/get_range', {'origin_from': origin_from,
'origin_count': origin_count})
def origin_add(self, origins):
return self.post('origin/add_multi', {'origins': origins})
def origin_add_one(self, origin):
return self.post('origin/add', {'origin': origin})
def origin_visit_add(self, origin, date, type=None):
return self.post(
'origin/visit/add',
{'origin': origin, 'date': date, 'type': type})
def origin_visit_update(self, origin, visit_id, status=None,
metadata=None, snapshot=None):
return self.post('origin/visit/update', {'origin': origin,
'visit_id': visit_id,
'status': status,
'metadata': metadata,
'snapshot': snapshot})
def origin_visit_upsert(self, visits):
return self.post('origin/visit/upsert', {'visits': visits})
def origin_visit_get(self, origin, last_visit=None, limit=None):
return self.post('origin/visit/get', {
'origin': origin, 'last_visit': last_visit, 'limit': limit})
def origin_visit_find_by_date(self, origin, visit_date, limit=None):
return self.post('origin/visit/find_by_date', {
'origin': origin, 'visit_date': visit_date})
def origin_visit_get_by(self, origin, visit):
return self.post('origin/visit/getby', {'origin': origin,
'visit': visit})
def origin_visit_get_latest(self, origin, allowed_statuses=None,
require_snapshot=False):
return self.post(
'origin/visit/get_latest',
{'origin': origin, 'allowed_statuses': allowed_statuses,
'require_snapshot': require_snapshot})
- def fetch_history_start(self, origin_id):
- return self.post('fetch_history/start', {'origin_id': origin_id})
-
- def fetch_history_end(self, fetch_history_id, data):
- return self.post('fetch_history/end',
- {'fetch_history_id': fetch_history_id,
- 'data': data})
-
- def fetch_history_get(self, fetch_history_id):
- return self.get('fetch_history', {'id': fetch_history_id})
-
def stat_counters(self):
return self.get('stat/counters')
def refresh_stat_counters(self):
return self.get('stat/refresh')
def directory_entry_get_by_path(self, directory, paths):
return self.post('directory/path', dict(directory=directory,
paths=paths))
def tool_add(self, tools):
return self.post('tool/add', {'tools': tools})
def tool_get(self, tool):
return self.post('tool/data', {'tool': tool})
def origin_metadata_add(self, origin_id, ts, provider, tool, metadata):
return self.post('origin/metadata/add', {'origin_id': origin_id,
'ts': ts,
'provider': provider,
'tool': tool,
'metadata': metadata})
def origin_metadata_get_by(self, origin_id, provider_type=None):
return self.post('origin/metadata/get', {
'origin_id': origin_id,
'provider_type': provider_type
})
def metadata_provider_add(self, provider_name, provider_type, provider_url,
metadata):
return self.post('provider/add', {'provider_name': provider_name,
'provider_type': provider_type,
'provider_url': provider_url,
'metadata': metadata})
def metadata_provider_get(self, provider_id):
return self.post('provider/get', {'provider_id': provider_id})
def metadata_provider_get_by(self, provider):
return self.post('provider/getby', {'provider': provider})
def diff_directories(self, from_dir, to_dir, track_renaming=False):
return self.post('algos/diff_directories',
{'from_dir': from_dir,
'to_dir': to_dir,
'track_renaming': track_renaming})
def diff_revisions(self, from_rev, to_rev, track_renaming=False):
return self.post('algos/diff_revisions',
{'from_rev': from_rev,
'to_rev': to_rev,
'track_renaming': track_renaming})
def diff_revision(self, revision, track_renaming=False):
return self.post('algos/diff_revision',
{'revision': revision,
'track_renaming': track_renaming})
diff --git a/swh/storage/api/server.py b/swh/storage/api/server.py
index 797c5452..102b54d0 100644
--- a/swh/storage/api/server.py
+++ b/swh/storage/api/server.py
@@ -1,619 +1,599 @@
# Copyright (C) 2015-2019 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import os
import logging
from flask import request
from functools import wraps
from swh.core import config
from swh.storage import get_storage as get_swhstorage
from swh.core.api import (RPCServerApp, decode_request,
error_handler,
encode_data_server as encode_data)
from swh.core.statsd import statsd
app = RPCServerApp(__name__)
storage = None
OPERATIONS_METRIC = 'swh_storage_operations_total'
OPERATIONS_UNIT_METRIC = "swh_storage_operations_{unit}_total"
DURATION_METRIC = "swh_storage_request_duration_seconds"
def timed(f):
"""Time that function!
"""
@wraps(f)
def d(*a, **kw):
with statsd.timed(DURATION_METRIC, tags={'endpoint': f.__name__}):
return f(*a, **kw)
return d
def encode(f):
@wraps(f)
def d(*a, **kw):
r = f(*a, **kw)
return encode_data(r)
return d
def send_metric(metric, count, method_name):
"""Send statsd metric with count for method `method_name`
If count is 0, the metric is discarded. If the metric is not
parseable, the metric is discarded with a log message.
Args:
metric (str): Metric's name (e.g content:add, content:add:bytes)
count (int): Associated value for the metric
method_name (str): Method's name
Returns:
Bool to explicit if metric has been set or not
"""
if count == 0:
return False
metric_type = metric.split(':')
_length = len(metric_type)
if _length == 2:
object_type, operation = metric_type
metric_name = OPERATIONS_METRIC
elif _length == 3:
object_type, operation, unit = metric_type
metric_name = OPERATIONS_UNIT_METRIC.format(unit=unit)
else:
logging.warning('Skipping unknown metric {%s: %s}' % (
metric, count))
return False
statsd.increment(
metric_name, count, tags={
'endpoint': method_name,
'object_type': object_type,
'operation': operation,
})
return True
def process_metrics(f):
"""Increment object counters for the decorated function.
"""
@wraps(f)
def d(*a, **kw):
r = f(*a, **kw)
for metric, count in r.items():
send_metric(metric=metric, count=count, method_name=f.__name__)
return r
return d
@app.errorhandler(Exception)
def my_error_handler(exception):
return error_handler(exception, encode_data)
def get_storage():
global storage
if not storage:
storage = get_swhstorage(**app.config['storage'])
return storage
@app.route('/')
@timed
def index():
return '''
Software Heritage storage server
You have reached the
Software Heritage
storage server.
See its
documentation
and API for more information
'''
@app.route('/check_config', methods=['POST'])
@timed
def check_config():
return encode_data(get_storage().check_config(**decode_request(request)))
@app.route('/reset', methods=['POST'])
@timed
def reset():
return encode_data(get_storage().reset(**decode_request(request)))
@app.route('/content/missing', methods=['POST'])
@timed
def content_missing():
return encode_data(get_storage().content_missing(
**decode_request(request)))
@app.route('/content/missing/sha1', methods=['POST'])
@timed
def content_missing_per_sha1():
return encode_data(get_storage().content_missing_per_sha1(
**decode_request(request)))
@app.route('/content/skipped/missing', methods=['POST'])
@timed
def skipped_content_missing():
return encode_data(get_storage().skipped_content_missing(
**decode_request(request)))
@app.route('/content/present', methods=['POST'])
@timed
def content_find():
return encode_data(get_storage().content_find(**decode_request(request)))
@app.route('/content/add', methods=['POST'])
@timed
@encode
@process_metrics
def content_add():
return get_storage().content_add(**decode_request(request))
@app.route('/content/add_metadata', methods=['POST'])
@timed
@encode
@process_metrics
def content_add_metadata():
return get_storage().content_add_metadata(**decode_request(request))
@app.route('/content/update', methods=['POST'])
@timed
def content_update():
return encode_data(get_storage().content_update(**decode_request(request)))
@app.route('/content/data', methods=['POST'])
@timed
def content_get():
return encode_data(get_storage().content_get(**decode_request(request)))
@app.route('/content/metadata', methods=['POST'])
@timed
def content_get_metadata():
return encode_data(get_storage().content_get_metadata(
**decode_request(request)))
@app.route('/content/range', methods=['POST'])
@timed
def content_get_range():
return encode_data(get_storage().content_get_range(
**decode_request(request)))
@app.route('/directory/missing', methods=['POST'])
@timed
def directory_missing():
return encode_data(get_storage().directory_missing(
**decode_request(request)))
@app.route('/directory/add', methods=['POST'])
@timed
@encode
@process_metrics
def directory_add():
return get_storage().directory_add(**decode_request(request))
@app.route('/directory/path', methods=['POST'])
@timed
def directory_entry_get_by_path():
return encode_data(get_storage().directory_entry_get_by_path(
**decode_request(request)))
@app.route('/directory/ls', methods=['POST'])
@timed
def directory_ls():
return encode_data(get_storage().directory_ls(
**decode_request(request)))
@app.route('/revision/add', methods=['POST'])
@timed
@encode
@process_metrics
def revision_add():
return get_storage().revision_add(**decode_request(request))
@app.route('/revision', methods=['POST'])
@timed
def revision_get():
return encode_data(get_storage().revision_get(**decode_request(request)))
@app.route('/revision/log', methods=['POST'])
@timed
def revision_log():
return encode_data(get_storage().revision_log(**decode_request(request)))
@app.route('/revision/shortlog', methods=['POST'])
@timed
def revision_shortlog():
return encode_data(get_storage().revision_shortlog(
**decode_request(request)))
@app.route('/revision/missing', methods=['POST'])
@timed
def revision_missing():
return encode_data(get_storage().revision_missing(
**decode_request(request)))
@app.route('/release/add', methods=['POST'])
@timed
@encode
@process_metrics
def release_add():
return get_storage().release_add(**decode_request(request))
@app.route('/release', methods=['POST'])
@timed
def release_get():
return encode_data(get_storage().release_get(**decode_request(request)))
@app.route('/release/missing', methods=['POST'])
@timed
def release_missing():
return encode_data(get_storage().release_missing(
**decode_request(request)))
@app.route('/object/find_by_sha1_git', methods=['POST'])
@timed
def object_find_by_sha1_git():
return encode_data(get_storage().object_find_by_sha1_git(
**decode_request(request)))
@app.route('/snapshot/add', methods=['POST'])
@timed
@encode
@process_metrics
def snapshot_add():
req_data = decode_request(request)
return get_storage().snapshot_add(**req_data)
@app.route('/snapshot', methods=['POST'])
@timed
def snapshot_get():
return encode_data(get_storage().snapshot_get(**decode_request(request)))
@app.route('/snapshot/by_origin_visit', methods=['POST'])
@timed
def snapshot_get_by_origin_visit():
return encode_data(get_storage().snapshot_get_by_origin_visit(
**decode_request(request)))
@app.route('/snapshot/latest', methods=['POST'])
@timed
def snapshot_get_latest():
return encode_data(get_storage().snapshot_get_latest(
**decode_request(request)))
@app.route('/snapshot/count_branches', methods=['POST'])
@timed
def snapshot_count_branches():
return encode_data(get_storage().snapshot_count_branches(
**decode_request(request)))
@app.route('/snapshot/get_branches', methods=['POST'])
@timed
def snapshot_get_branches():
return encode_data(get_storage().snapshot_get_branches(
**decode_request(request)))
@app.route('/origin/get', methods=['POST'])
@timed
def origin_get():
return encode_data(get_storage().origin_get(**decode_request(request)))
@app.route('/origin/get_range', methods=['POST'])
@timed
def origin_get_range():
return encode_data(get_storage().origin_get_range(
**decode_request(request)))
@app.route('/origin/search', methods=['POST'])
@timed
def origin_search():
return encode_data(get_storage().origin_search(**decode_request(request)))
@app.route('/origin/count', methods=['POST'])
@timed
def origin_count():
return encode_data(get_storage().origin_count(**decode_request(request)))
@app.route('/origin/add_multi', methods=['POST'])
@timed
@encode
def origin_add():
origins = get_storage().origin_add(**decode_request(request))
send_metric('origin:add', count=len(origins), method_name='origin_add')
return origins
@app.route('/origin/add', methods=['POST'])
@timed
@encode
def origin_add_one():
origin = get_storage().origin_add_one(**decode_request(request))
send_metric('origin:add', count=1, method_name='origin_add_one')
return origin
@app.route('/origin/visit/get', methods=['POST'])
@timed
def origin_visit_get():
return encode_data(get_storage().origin_visit_get(
**decode_request(request)))
@app.route('/origin/visit/find_by_date', methods=['POST'])
@timed
def origin_visit_find_by_date():
return encode_data(get_storage().origin_visit_find_by_date(
**decode_request(request)))
@app.route('/origin/visit/getby', methods=['POST'])
@timed
def origin_visit_get_by():
return encode_data(
get_storage().origin_visit_get_by(**decode_request(request)))
@app.route('/origin/visit/get_latest', methods=['POST'])
@timed
def origin_visit_get_latest():
return encode_data(
get_storage().origin_visit_get_latest(**decode_request(request)))
@app.route('/origin/visit/add', methods=['POST'])
@timed
@encode
def origin_visit_add():
origin_visit = get_storage().origin_visit_add(
**decode_request(request))
send_metric('origin_visit:add', count=1, method_name='origin_visit')
return origin_visit
@app.route('/origin/visit/update', methods=['POST'])
@timed
def origin_visit_update():
return encode_data(get_storage().origin_visit_update(
**decode_request(request)))
@app.route('/origin/visit/upsert', methods=['POST'])
@timed
def origin_visit_upsert():
return encode_data(get_storage().origin_visit_upsert(
**decode_request(request)))
-@app.route('/fetch_history', methods=['GET'])
-@timed
-def fetch_history_get():
- return encode_data(get_storage().fetch_history_get(request.args['id']))
-
-
-@app.route('/fetch_history/start', methods=['POST'])
-@timed
-def fetch_history_start():
- return encode_data(
- get_storage().fetch_history_start(**decode_request(request)))
-
-
-@app.route('/fetch_history/end', methods=['POST'])
-@timed
-def fetch_history_end():
- return encode_data(
- get_storage().fetch_history_end(**decode_request(request)))
-
-
@app.route('/tool/data', methods=['POST'])
@timed
def tool_get():
return encode_data(get_storage().tool_get(
**decode_request(request)))
@app.route('/tool/add', methods=['POST'])
@timed
@encode
def tool_add():
tools = get_storage().tool_add(**decode_request(request))
send_metric('tool:add', count=len(tools), method_name='tool_add')
return tools
@app.route('/origin/metadata/add', methods=['POST'])
@timed
@encode
def origin_metadata_add():
origin_metadata = get_storage().origin_metadata_add(
**decode_request(request))
send_metric(
'origin_metadata:add', count=1, method_name='origin_metadata_add')
return origin_metadata
@app.route('/origin/metadata/get', methods=['POST'])
@timed
def origin_metadata_get_by():
return encode_data(get_storage().origin_metadata_get_by(**decode_request(
request)))
@app.route('/provider/add', methods=['POST'])
@timed
@encode
def metadata_provider_add():
metadata_provider = get_storage().metadata_provider_add(**decode_request(
request))
send_metric(
'metadata_provider:add', count=1, method_name='metadata_provider')
return metadata_provider
@app.route('/provider/get', methods=['POST'])
@timed
def metadata_provider_get():
return encode_data(get_storage().metadata_provider_get(**decode_request(
request)))
@app.route('/provider/getby', methods=['POST'])
@timed
def metadata_provider_get_by():
return encode_data(get_storage().metadata_provider_get_by(**decode_request(
request)))
@app.route('/stat/counters', methods=['GET'])
@timed
def stat_counters():
return encode_data(get_storage().stat_counters())
@app.route('/stat/refresh', methods=['GET'])
@timed
def refresh_stat_counters():
return encode_data(get_storage().refresh_stat_counters())
@app.route('/algos/diff_directories', methods=['POST'])
@timed
def diff_directories():
return encode_data(get_storage().diff_directories(
**decode_request(request)))
@app.route('/algos/diff_revisions', methods=['POST'])
@timed
def diff_revisions():
return encode_data(get_storage().diff_revisions(**decode_request(request)))
@app.route('/algos/diff_revision', methods=['POST'])
@timed
def diff_revision():
return encode_data(get_storage().diff_revision(**decode_request(request)))
api_cfg = None
def load_and_check_config(config_file, type='local'):
"""Check the minimal configuration is set to run the api or raise an
error explanation.
Args:
config_file (str): Path to the configuration file to load
type (str): configuration type. For 'local' type, more
checks are done.
Raises:
Error if the setup is not as expected
Returns:
configuration as a dict
"""
if not config_file:
raise EnvironmentError('Configuration file must be defined')
if not os.path.exists(config_file):
raise FileNotFoundError('Configuration file %s does not exist' % (
config_file, ))
cfg = config.read(config_file)
if 'storage' not in cfg:
raise KeyError("Missing '%storage' configuration")
if type == 'local':
vcfg = cfg['storage']
cls = vcfg.get('cls')
if cls != 'local':
raise ValueError(
"The storage backend can only be started with a 'local' "
"configuration")
args = vcfg['args']
for key in ('db', 'objstorage'):
if not args.get(key):
raise ValueError(
"Invalid configuration; missing '%s' config entry" % key)
return cfg
def make_app_from_configfile():
"""Run the WSGI app from the webserver, loading the configuration from
a configuration file.
SWH_CONFIG_FILENAME environment variable defines the
configuration path to load.
"""
global api_cfg
if not api_cfg:
config_file = os.environ.get('SWH_CONFIG_FILENAME')
api_cfg = load_and_check_config(config_file)
app.config.update(api_cfg)
handler = logging.StreamHandler()
app.logger.addHandler(handler)
return app
if __name__ == '__main__':
print('Deprecated. Use swh-storage')
diff --git a/swh/storage/db.py b/swh/storage/db.py
index 11cef63f..27bee153 100644
--- a/swh/storage/db.py
+++ b/swh/storage/db.py
@@ -1,912 +1,866 @@
# Copyright (C) 2015-2019 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import select
from swh.core.db import BaseDb
from swh.core.db.db_utils import stored_procedure, jsonize
from swh.core.db.db_utils import execute_values_generator
class Db(BaseDb):
"""Proxy to the SWH DB, with wrappers around stored procedures
"""
def mktemp_dir_entry(self, entry_type, cur=None):
self._cursor(cur).execute('SELECT swh_mktemp_dir_entry(%s)',
(('directory_entry_%s' % entry_type),))
@stored_procedure('swh_mktemp_revision')
def mktemp_revision(self, cur=None): pass
@stored_procedure('swh_mktemp_release')
def mktemp_release(self, cur=None): pass
@stored_procedure('swh_mktemp_snapshot_branch')
def mktemp_snapshot_branch(self, cur=None): pass
def register_listener(self, notify_queue, cur=None):
"""Register a listener for NOTIFY queue `notify_queue`"""
self._cursor(cur).execute("LISTEN %s" % notify_queue)
def listen_notifies(self, timeout):
"""Listen to notifications for `timeout` seconds"""
if select.select([self.conn], [], [], timeout) == ([], [], []):
return
else:
self.conn.poll()
while self.conn.notifies:
yield self.conn.notifies.pop(0)
@stored_procedure('swh_content_add')
def content_add_from_temp(self, cur=None): pass
@stored_procedure('swh_directory_add')
def directory_add_from_temp(self, cur=None): pass
@stored_procedure('swh_skipped_content_add')
def skipped_content_add_from_temp(self, cur=None): pass
@stored_procedure('swh_revision_add')
def revision_add_from_temp(self, cur=None): pass
@stored_procedure('swh_release_add')
def release_add_from_temp(self, cur=None): pass
def content_update_from_temp(self, keys_to_update, cur=None):
cur = self._cursor(cur)
cur.execute("""select swh_content_update(ARRAY[%s] :: text[])""" %
keys_to_update)
content_get_metadata_keys = [
'sha1', 'sha1_git', 'sha256', 'blake2s256', 'length', 'status']
content_add_keys = content_get_metadata_keys + ['ctime']
skipped_content_keys = [
'sha1', 'sha1_git', 'sha256', 'blake2s256',
'length', 'reason', 'status', 'origin']
def content_get_metadata_from_sha1s(self, sha1s, cur=None):
cur = self._cursor(cur)
yield from execute_values_generator(
cur, """
select t.sha1, %s from (values %%s) as t (sha1)
left join content using (sha1)
""" % ', '.join(self.content_get_metadata_keys[1:]),
((sha1,) for sha1 in sha1s),
)
def content_get_range(self, start, end, limit=None, cur=None):
"""Retrieve contents within range [start, end].
"""
cur = self._cursor(cur)
query = """select %s from content
where %%s <= sha1 and sha1 <= %%s
order by sha1
limit %%s""" % ', '.join(self.content_get_metadata_keys)
cur.execute(query, (start, end, limit))
yield from cur
content_hash_keys = ['sha1', 'sha1_git', 'sha256', 'blake2s256']
def content_missing_from_list(self, contents, cur=None):
cur = self._cursor(cur)
keys = ', '.join(self.content_hash_keys)
equality = ' AND '.join(
('t.%s = c.%s' % (key, key))
for key in self.content_hash_keys
)
yield from execute_values_generator(
cur, """
SELECT %s
FROM (VALUES %%s) as t(%s)
WHERE NOT EXISTS (
SELECT 1 FROM content c
WHERE %s
)
""" % (keys, keys, equality),
(tuple(c[key] for key in self.content_hash_keys) for c in contents)
)
def content_missing_per_sha1(self, sha1s, cur=None):
cur = self._cursor(cur)
yield from execute_values_generator(cur, """
SELECT t.sha1 FROM (VALUES %s) AS t(sha1)
WHERE NOT EXISTS (
SELECT 1 FROM content c WHERE c.sha1 = t.sha1
)""", ((sha1,) for sha1 in sha1s))
def skipped_content_missing(self, contents, cur=None):
if not contents:
return []
cur = self._cursor(cur)
query = """SELECT * FROM (VALUES %s) AS t (%s)
WHERE not exists
(SELECT 1 FROM skipped_content s WHERE
s.sha1 is not distinct from t.sha1 and
s.sha1_git is not distinct from t.sha1_git and
s.sha256 is not distinct from t.sha256);""" % \
((', '.join('%s' for _ in contents)),
', '.join(self.content_hash_keys))
cur.execute(query,
[tuple(cont[key] for key in self.content_hash_keys)
for cont in contents])
yield from cur
def snapshot_exists(self, snapshot_id, cur=None):
"""Check whether a snapshot with the given id exists"""
cur = self._cursor(cur)
cur.execute("""SELECT 1 FROM snapshot where id=%s""", (snapshot_id,))
return bool(cur.fetchone())
def snapshot_add(self, snapshot_id, cur=None):
"""Add a snapshot from the temporary table"""
cur = self._cursor(cur)
cur.execute("""SELECT swh_snapshot_add(%s)""", (snapshot_id,))
snapshot_count_cols = ['target_type', 'count']
def snapshot_count_branches(self, snapshot_id, cur=None):
cur = self._cursor(cur)
query = """\
SELECT %s FROM swh_snapshot_count_branches(%%s)
""" % ', '.join(self.snapshot_count_cols)
cur.execute(query, (snapshot_id,))
yield from cur
snapshot_get_cols = ['snapshot_id', 'name', 'target', 'target_type']
def snapshot_get_by_id(self, snapshot_id, branches_from=b'',
branches_count=None, target_types=None,
cur=None):
cur = self._cursor(cur)
query = """\
SELECT %s
FROM swh_snapshot_get_by_id(%%s, %%s, %%s, %%s :: snapshot_target[])
""" % ', '.join(self.snapshot_get_cols)
cur.execute(query, (snapshot_id, branches_from, branches_count,
target_types))
yield from cur
def snapshot_get_by_origin_visit(self, origin_id, visit_id, cur=None):
cur = self._cursor(cur)
query = """\
SELECT snapshot from origin_visit where
origin_visit.origin=%s and origin_visit.visit=%s;
"""
cur.execute(query, (origin_id, visit_id))
ret = cur.fetchone()
if ret:
return ret[0]
content_find_cols = ['sha1', 'sha1_git', 'sha256', 'blake2s256', 'length',
'ctime', 'status']
def content_find(self, sha1=None, sha1_git=None, sha256=None,
blake2s256=None, cur=None):
"""Find the content optionally on a combination of the following
checksums sha1, sha1_git, sha256 or blake2s256.
Args:
sha1: sha1 content
git_sha1: the sha1 computed `a la git` sha1 of the content
sha256: sha256 content
blake2s256: blake2s256 content
Returns:
The tuple (sha1, sha1_git, sha256, blake2s256) if found or None.
"""
cur = self._cursor(cur)
checksum_dict = {'sha1': sha1, 'sha1_git': sha1_git,
'sha256': sha256, 'blake2s256': blake2s256}
where_parts = []
args = []
# Adds only those keys which have value other than None
for algorithm in checksum_dict:
if checksum_dict[algorithm] is not None:
args.append(checksum_dict[algorithm])
where_parts.append(algorithm + '= %s')
query = ' AND '.join(where_parts)
cur.execute("""SELECT %s
FROM content WHERE %s
"""
% (','.join(self.content_find_cols), query),
args)
content = cur.fetchall()
return content
def directory_missing_from_list(self, directories, cur=None):
cur = self._cursor(cur)
yield from execute_values_generator(
cur, """
SELECT id FROM (VALUES %s) as t(id)
WHERE NOT EXISTS (
SELECT 1 FROM directory d WHERE d.id = t.id
)
""", ((id,) for id in directories))
directory_ls_cols = ['dir_id', 'type', 'target', 'name', 'perms',
'status', 'sha1', 'sha1_git', 'sha256', 'length']
def directory_walk_one(self, directory, cur=None):
cur = self._cursor(cur)
cols = ', '.join(self.directory_ls_cols)
query = 'SELECT %s FROM swh_directory_walk_one(%%s)' % cols
cur.execute(query, (directory,))
yield from cur
def directory_walk(self, directory, cur=None):
cur = self._cursor(cur)
cols = ', '.join(self.directory_ls_cols)
query = 'SELECT %s FROM swh_directory_walk(%%s)' % cols
cur.execute(query, (directory,))
yield from cur
def directory_entry_get_by_path(self, directory, paths, cur=None):
"""Retrieve a directory entry by path.
"""
cur = self._cursor(cur)
cols = ', '.join(self.directory_ls_cols)
query = (
'SELECT %s FROM swh_find_directory_entry_by_path(%%s, %%s)' % cols)
cur.execute(query, (directory, paths))
data = cur.fetchone()
if set(data) == {None}:
return None
return data
def revision_missing_from_list(self, revisions, cur=None):
cur = self._cursor(cur)
yield from execute_values_generator(
cur, """
SELECT id FROM (VALUES %s) as t(id)
WHERE NOT EXISTS (
SELECT 1 FROM revision r WHERE r.id = t.id
)
""", ((id,) for id in revisions))
revision_add_cols = [
'id', 'date', 'date_offset', 'date_neg_utc_offset', 'committer_date',
'committer_date_offset', 'committer_date_neg_utc_offset', 'type',
'directory', 'message', 'author_fullname', 'author_name',
'author_email', 'committer_fullname', 'committer_name',
'committer_email', 'metadata', 'synthetic',
]
revision_get_cols = revision_add_cols + ['parents']
def origin_visit_add(self, origin, ts, type, cur=None):
"""Add a new origin_visit for origin origin at timestamp ts with
status 'ongoing'.
Args:
origin: origin concerned by the visit
ts: the date of the visit
type: type of loader for the visit
Returns:
The new visit index step for that origin
"""
cur = self._cursor(cur)
self._cursor(cur).execute('SELECT swh_origin_visit_add(%s, %s, %s)',
(origin, ts, type))
return cur.fetchone()[0]
def origin_visit_update(self, origin_id, visit_id, updates, cur=None):
"""Update origin_visit's status."""
cur = self._cursor(cur)
update_cols = []
values = []
where = ['origin=%s AND visit=%s']
where_values = [origin_id, visit_id]
from_ = ''
if 'status' in updates:
update_cols.append('status=%s')
values.append(updates.pop('status'))
if 'metadata' in updates:
update_cols.append('metadata=%s')
values.append(jsonize(updates.pop('metadata')))
if 'snapshot' in updates:
update_cols.append('snapshot=%s')
values.append(updates.pop('snapshot'))
assert not updates, 'Unknown fields: %r' % updates
query = """UPDATE origin_visit
SET {update_cols}
{from}
WHERE {where}""".format(**{
'update_cols': ', '.join(update_cols),
'from': from_,
'where': ' AND '.join(where)
})
cur.execute(query, (*values, *where_values))
def origin_visit_upsert(self, origin, visit, date, type, status,
metadata, snapshot, cur=None):
cur = self._cursor(cur)
query = """INSERT INTO origin_visit ({cols}) VALUES ({values})
ON CONFLICT ON CONSTRAINT origin_visit_pkey DO
UPDATE SET {updates}""".format(
cols=', '.join(self.origin_visit_get_cols),
values=', '.join('%s' for col in self.origin_visit_get_cols),
updates=', '.join('{0}=excluded.{0}'.format(col)
for col in self.origin_visit_get_cols))
cur.execute(
query, (origin, visit, date, type, status, metadata, snapshot))
origin_visit_get_cols = ['origin', 'visit', 'date', 'type', 'status',
'metadata', 'snapshot']
def origin_visit_get_all(self, origin_id,
last_visit=None, limit=None, cur=None):
"""Retrieve all visits for origin with id origin_id.
Args:
origin_id: The occurrence's origin
Yields:
The occurrence's history visits
"""
cur = self._cursor(cur)
if last_visit:
extra_condition = 'and visit > %s'
args = (origin_id, last_visit, limit)
else:
extra_condition = ''
args = (origin_id, limit)
query = """\
SELECT %s
FROM origin_visit
WHERE origin=%%s %s
order by visit asc
limit %%s""" % (
', '.join(self.origin_visit_get_cols), extra_condition
)
cur.execute(query, args)
yield from cur
def origin_visit_get(self, origin_id, visit_id, cur=None):
"""Retrieve information on visit visit_id of origin origin_id.
Args:
origin_id: the origin concerned
visit_id: The visit step for that origin
Returns:
The origin_visit information
"""
cur = self._cursor(cur)
query = """\
SELECT %s
FROM origin_visit
WHERE origin = %%s AND visit = %%s
""" % (', '.join(self.origin_visit_get_cols))
cur.execute(query, (origin_id, visit_id))
r = cur.fetchall()
if not r:
return None
return r[0]
def origin_visit_find_by_date(self, origin, visit_date, cur=None):
cur = self._cursor(cur)
cur.execute(
'SELECT * FROM swh_visit_find_by_date(%s, %s)',
(origin, visit_date))
r = cur.fetchall()
if r:
return r[0]
def origin_visit_exists(self, origin_id, visit_id, cur=None):
"""Check whether an origin visit with the given ids exists"""
cur = self._cursor(cur)
query = "SELECT 1 FROM origin_visit where origin = %s AND visit = %s"
cur.execute(query, (origin_id, visit_id))
return bool(cur.fetchone())
def origin_visit_get_latest(
self, origin_id, allowed_statuses=None, require_snapshot=False,
cur=None):
"""Retrieve the most recent origin_visit of the given origin,
with optional filters.
Args:
origin_id: the origin concerned
allowed_statuses: the visit statuses allowed for the returned visit
require_snapshot (bool): If True, only a visit with a known
snapshot will be returned.
Returns:
The origin_visit information, or None if no visit matches.
"""
cur = self._cursor(cur)
query_parts = [
'SELECT %s' % ', '.join(self.origin_visit_get_cols),
'FROM origin_visit']
query_parts.append('WHERE origin = %s')
if require_snapshot:
query_parts.append('AND snapshot is not null')
if allowed_statuses:
query_parts.append(
cur.mogrify('AND status IN %s',
(tuple(allowed_statuses),)).decode())
query_parts.append('ORDER BY date DESC, visit DESC LIMIT 1')
query = '\n'.join(query_parts)
cur.execute(query, (origin_id,))
r = cur.fetchone()
if not r:
return None
return r
@staticmethod
def mangle_query_key(key, main_table):
if key == 'id':
return 't.id'
if key == 'parents':
return '''
ARRAY(
SELECT rh.parent_id::bytea
FROM revision_history rh
WHERE rh.id = t.id
ORDER BY rh.parent_rank
)'''
if '_' not in key:
return '%s.%s' % (main_table, key)
head, tail = key.split('_', 1)
if (head in ('author', 'committer')
and tail in ('name', 'email', 'id', 'fullname')):
return '%s.%s' % (head, tail)
return '%s.%s' % (main_table, key)
def revision_get_from_list(self, revisions, cur=None):
cur = self._cursor(cur)
query_keys = ', '.join(
self.mangle_query_key(k, 'revision')
for k in self.revision_get_cols
)
yield from execute_values_generator(
cur, """
SELECT %s FROM (VALUES %%s) as t(id)
LEFT JOIN revision ON t.id = revision.id
LEFT JOIN person author ON revision.author = author.id
LEFT JOIN person committer ON revision.committer = committer.id
""" % query_keys,
((id,) for id in revisions))
def revision_log(self, root_revisions, limit=None, cur=None):
cur = self._cursor(cur)
query = """SELECT %s
FROM swh_revision_log(%%s, %%s)
""" % ', '.join(self.revision_get_cols)
cur.execute(query, (root_revisions, limit))
yield from cur
revision_shortlog_cols = ['id', 'parents']
def revision_shortlog(self, root_revisions, limit=None, cur=None):
cur = self._cursor(cur)
query = """SELECT %s
FROM swh_revision_list(%%s, %%s)
""" % ', '.join(self.revision_shortlog_cols)
cur.execute(query, (root_revisions, limit))
yield from cur
def release_missing_from_list(self, releases, cur=None):
cur = self._cursor(cur)
yield from execute_values_generator(
cur, """
SELECT id FROM (VALUES %s) as t(id)
WHERE NOT EXISTS (
SELECT 1 FROM release r WHERE r.id = t.id
)
""", ((id,) for id in releases))
object_find_by_sha1_git_cols = ['sha1_git', 'type', 'id', 'object_id']
def object_find_by_sha1_git(self, ids, cur=None):
cur = self._cursor(cur)
yield from execute_values_generator(
cur, """
WITH t (id) AS (VALUES %s),
known_objects as ((
select
id as sha1_git,
'release'::object_type as type,
id,
object_id
from release r
where exists (select 1 from t where t.id = r.id)
) union all (
select
id as sha1_git,
'revision'::object_type as type,
id,
object_id
from revision r
where exists (select 1 from t where t.id = r.id)
) union all (
select
id as sha1_git,
'directory'::object_type as type,
id,
object_id
from directory d
where exists (select 1 from t where t.id = d.id)
) union all (
select
sha1_git as sha1_git,
'content'::object_type as type,
sha1 as id,
object_id
from content c
where exists (select 1 from t where t.id = c.sha1_git)
))
select t.id as sha1_git, k.type, k.id, k.object_id
from t
left join known_objects k on t.id = k.sha1_git
""",
((id,) for id in ids)
)
def stat_counters(self, cur=None):
cur = self._cursor(cur)
cur.execute('SELECT * FROM swh_stat_counters()')
yield from cur
- fetch_history_cols = ['origin', 'date', 'status', 'result', 'stdout',
- 'stderr', 'duration']
-
- def create_fetch_history(self, fetch_history, cur=None):
- """Create a fetch_history entry with the data in fetch_history"""
- cur = self._cursor(cur)
- query = '''INSERT INTO fetch_history (%s)
- VALUES (%s) RETURNING id''' % (
- ','.join(self.fetch_history_cols),
- ','.join(['%s'] * len(self.fetch_history_cols))
- )
- cur.execute(query, [fetch_history.get(col) for col in
- self.fetch_history_cols])
-
- return cur.fetchone()[0]
-
- def get_fetch_history(self, fetch_history_id, cur=None):
- """Get a fetch_history entry with the given id"""
- cur = self._cursor(cur)
- query = '''SELECT %s FROM fetch_history WHERE id=%%s''' % (
- ', '.join(self.fetch_history_cols),
- )
- cur.execute(query, (fetch_history_id,))
-
- data = cur.fetchone()
-
- if not data:
- return None
-
- ret = {'id': fetch_history_id}
- for i, col in enumerate(self.fetch_history_cols):
- ret[col] = data[i]
-
- return ret
-
- def update_fetch_history(self, fetch_history, cur=None):
- """Update the fetch_history entry from the data in fetch_history"""
- cur = self._cursor(cur)
- query = '''UPDATE fetch_history
- SET %s
- WHERE id=%%s''' % (
- ','.join('%s=%%s' % col for col in self.fetch_history_cols)
- )
- cur.execute(query, [jsonize(fetch_history.get(col)) for col in
- self.fetch_history_cols + ['id']])
-
def origin_add(self, type, url, cur=None):
"""Insert a new origin and return the new identifier."""
insert = """INSERT INTO origin (type, url) values (%s, %s)
RETURNING id"""
cur.execute(insert, (type, url))
return cur.fetchone()[0]
origin_cols = ['id', 'type', 'url']
def origin_get_by_url(self, origins, cur=None):
"""Retrieve origin `(id, type, url)` from urls if found."""
cur = self._cursor(cur)
query = """SELECT %s FROM (VALUES %%s) as t(url)
LEFT JOIN origin ON t.url = origin.url
""" % ','.join('origin.' + col for col in self.origin_cols)
yield from execute_values_generator(
cur, query, ((url,) for url in origins))
def origin_get_by_id(self, ids, cur=None):
"""Retrieve origin `(id, type, url)` from ids if found.
"""
cur = self._cursor(cur)
query = """SELECT %s FROM (VALUES %%s) as t(id)
LEFT JOIN origin ON t.id = origin.id
""" % ','.join('origin.' + col for col in self.origin_cols)
yield from execute_values_generator(
cur, query, ((id,) for id in ids))
def origin_get_range(self, origin_from=1, origin_count=100, cur=None):
"""Retrieve ``origin_count`` origins whose ids are greater
or equal than ``origin_from``.
Origins are sorted by id before retrieving them.
Args:
origin_from (int): the minimum id of origins to retrieve
origin_count (int): the maximum number of origins to retrieve
"""
cur = self._cursor(cur)
query = """SELECT %s
FROM origin WHERE id >= %%s
ORDER BY id LIMIT %%s
""" % ','.join(self.origin_cols)
cur.execute(query, (origin_from, origin_count))
yield from cur
def _origin_query(self, url_pattern, count=False, offset=0, limit=50,
regexp=False, with_visit=False, cur=None):
"""
Method factorizing query creation for searching and counting origins.
"""
cur = self._cursor(cur)
if count:
origin_cols = 'COUNT(*)'
else:
origin_cols = ','.join(self.origin_cols)
query = """SELECT %s
FROM origin
WHERE """
if with_visit:
query += """
EXISTS (SELECT 1 from origin_visit WHERE origin=origin.id)
AND """
query += 'url %s %%s '
if not count:
query += 'ORDER BY id OFFSET %%s LIMIT %%s'
if not regexp:
query = query % (origin_cols, 'ILIKE')
query_params = ('%'+url_pattern+'%', offset, limit)
else:
query = query % (origin_cols, '~*')
query_params = (url_pattern, offset, limit)
if count:
query_params = (query_params[0],)
cur.execute(query, query_params)
def origin_search(self, url_pattern, offset=0, limit=50,
regexp=False, with_visit=False, cur=None):
"""Search for origins whose urls contain a provided string pattern
or match a provided regular expression.
The search is performed in a case insensitive way.
Args:
url_pattern (str): the string pattern to search for in origin urls
offset (int): number of found origins to skip before returning
results
limit (int): the maximum number of found origins to return
regexp (bool): if True, consider the provided pattern as a regular
expression and returns origins whose urls match it
with_visit (bool): if True, filter out origins with no visit
"""
self._origin_query(url_pattern, offset=offset, limit=limit,
regexp=regexp, with_visit=with_visit, cur=cur)
yield from cur
def origin_count(self, url_pattern, regexp=False,
with_visit=False, cur=None):
"""Count origins whose urls contain a provided string pattern
or match a provided regular expression.
The pattern search in origin urls is performed in a case insensitive
way.
Args:
url_pattern (str): the string pattern to search for in origin urls
regexp (bool): if True, consider the provided pattern as a regular
expression and returns origins whose urls match it
with_visit (bool): if True, filter out origins with no visit
"""
self._origin_query(url_pattern, count=True,
regexp=regexp, with_visit=with_visit, cur=cur)
return cur.fetchone()[0]
release_add_cols = [
'id', 'target', 'target_type', 'date', 'date_offset',
'date_neg_utc_offset', 'name', 'comment', 'synthetic',
'author_fullname', 'author_name', 'author_email',
]
release_get_cols = release_add_cols
def release_get_from_list(self, releases, cur=None):
cur = self._cursor(cur)
query_keys = ', '.join(
self.mangle_query_key(k, 'release')
for k in self.release_get_cols
)
yield from execute_values_generator(
cur, """
SELECT %s FROM (VALUES %%s) as t(id)
LEFT JOIN release ON t.id = release.id
LEFT JOIN person author ON release.author = author.id
""" % query_keys,
((id,) for id in releases))
def origin_metadata_add(self, origin, ts, provider, tool,
metadata, cur=None):
""" Add an origin_metadata for the origin at ts with provider, tool and
metadata.
Args:
origin (int): the origin's id for which the metadata is added
ts (datetime): time when the metadata was found
provider (int): the metadata provider identifier
tool (int): the tool's identifier used to extract metadata
metadata (jsonb): the metadata retrieved at the time and location
Returns:
id (int): the origin_metadata unique id
"""
cur = self._cursor(cur)
insert = """INSERT INTO origin_metadata (origin_id, discovery_date,
provider_id, tool_id, metadata) values (%s, %s, %s, %s, %s)
RETURNING id"""
cur.execute(insert, (origin, ts, provider, tool, jsonize(metadata)))
return cur.fetchone()[0]
origin_metadata_get_cols = ['origin_id', 'discovery_date',
'tool_id', 'metadata', 'provider_id',
'provider_name', 'provider_type',
'provider_url']
def origin_metadata_get_by(self, origin_id, provider_type=None, cur=None):
"""Retrieve all origin_metadata entries for one origin_id
"""
cur = self._cursor(cur)
if not provider_type:
query = '''SELECT %s
FROM swh_origin_metadata_get_by_origin(
%%s)''' % (','.join(
self.origin_metadata_get_cols))
cur.execute(query, (origin_id, ))
else:
query = '''SELECT %s
FROM swh_origin_metadata_get_by_provider_type(
%%s, %%s)''' % (','.join(
self.origin_metadata_get_cols))
cur.execute(query, (origin_id, provider_type))
yield from cur
tool_cols = ['id', 'name', 'version', 'configuration']
@stored_procedure('swh_mktemp_tool')
def mktemp_tool(self, cur=None):
pass
def tool_add_from_temp(self, cur=None):
cur = self._cursor(cur)
cur.execute("SELECT %s from swh_tool_add()" % (
','.join(self.tool_cols), ))
yield from cur
def tool_get(self, name, version, configuration, cur=None):
cur = self._cursor(cur)
cur.execute('''select %s
from tool
where name=%%s and
version=%%s and
configuration=%%s''' % (
','.join(self.tool_cols)),
(name, version, configuration))
return cur.fetchone()
metadata_provider_cols = ['id', 'provider_name', 'provider_type',
'provider_url', 'metadata']
def metadata_provider_add(self, provider_name, provider_type,
provider_url, metadata, cur=None):
"""Insert a new provider and return the new identifier."""
cur = self._cursor(cur)
insert = """INSERT INTO metadata_provider (provider_name, provider_type,
provider_url, metadata) values (%s, %s, %s, %s)
RETURNING id"""
cur.execute(insert, (provider_name, provider_type, provider_url,
jsonize(metadata)))
return cur.fetchone()[0]
def metadata_provider_get(self, provider_id, cur=None):
cur = self._cursor(cur)
cur.execute('''select %s
from metadata_provider
where id=%%s ''' % (
','.join(self.metadata_provider_cols)),
(provider_id, ))
return cur.fetchone()
def metadata_provider_get_by(self, provider_name, provider_url,
cur=None):
cur = self._cursor(cur)
cur.execute('''select %s
from metadata_provider
where provider_name=%%s and
provider_url=%%s''' % (
','.join(self.metadata_provider_cols)),
(provider_name, provider_url))
return cur.fetchone()
diff --git a/swh/storage/in_memory.py b/swh/storage/in_memory.py
index 74b10ac0..20ebba95 100644
--- a/swh/storage/in_memory.py
+++ b/swh/storage/in_memory.py
@@ -1,1748 +1,1729 @@
# Copyright (C) 2015-2019 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import os
import re
import bisect
import dateutil
import collections
from collections import defaultdict
import copy
import datetime
import itertools
import random
import attr
from swh.model.model import \
Content, Directory, Revision, Release, Snapshot, OriginVisit, Origin
from swh.model.hashutil import DEFAULT_ALGORITHMS
from swh.objstorage import get_objstorage
from swh.objstorage.exc import ObjNotFoundError
from .storage import get_journal_writer
# Max block size of contents to return
BULK_BLOCK_CONTENT_LEN_MAX = 10000
def now():
return datetime.datetime.now(tz=datetime.timezone.utc)
ENABLE_ORIGIN_IDS = \
os.environ.get('SWH_STORAGE_IN_MEMORY_ENABLE_ORIGIN_IDS', 'true') == 'true'
class Storage:
def __init__(self, journal_writer=None):
self._contents = {}
self._content_indexes = defaultdict(lambda: defaultdict(set))
self._skipped_contents = {}
self._skipped_content_indexes = defaultdict(lambda: defaultdict(set))
self.reset()
if journal_writer:
self.journal_writer = get_journal_writer(**journal_writer)
else:
self.journal_writer = None
def reset(self):
self._directories = {}
self._revisions = {}
self._releases = {}
self._snapshots = {}
self._origins = {}
self._origins_by_id = []
self._origin_visits = {}
self._persons = []
self._origin_metadata = defaultdict(list)
self._tools = {}
self._metadata_providers = {}
self._objects = defaultdict(list)
# ideally we would want a skip list for both fast inserts and searches
self._sorted_sha1s = []
self.objstorage = get_objstorage('memory', {})
def check_config(self, *, check_write):
"""Check that the storage is configured and ready to go."""
return True
def _content_add(self, contents, with_data):
content_with_data = []
content_without_data = []
for content in contents:
if content.status is None:
content.status = 'visible'
if content.length is None:
content.length = -1
if content.status != 'absent':
if self._content_key(content) not in self._contents:
content_with_data.append(content)
else:
if self._content_key(content) not in self._skipped_contents:
content_without_data.append(content)
if self.journal_writer:
for content in content_with_data:
content = attr.evolve(content, data=None)
self.journal_writer.write_addition('content', content)
for content in content_without_data:
self.journal_writer.write_addition('content', content)
count_content_added, count_content_bytes_added = \
self._content_add_present(content_with_data, with_data)
count_skipped_content_added = self._content_add_absent(
content_without_data
)
summary = {
'content:add': count_content_added,
'skipped_content:add': count_skipped_content_added,
}
if with_data:
summary['content:add:bytes'] = count_content_bytes_added
return summary
def _content_add_present(self, contents, with_data):
count_content_added = 0
count_content_bytes_added = 0
for content in contents:
key = self._content_key(content)
if key in self._contents:
continue
for algorithm in DEFAULT_ALGORITHMS:
hash_ = content.get_hash(algorithm)
if hash_ in self._content_indexes[algorithm]\
and (algorithm not in {'blake2s256', 'sha256'}):
from . import HashCollision
raise HashCollision(algorithm, hash_, key)
for algorithm in DEFAULT_ALGORITHMS:
hash_ = content.get_hash(algorithm)
self._content_indexes[algorithm][hash_].add(key)
self._objects[content.sha1_git].append(
('content', content.sha1))
self._contents[key] = content
bisect.insort(self._sorted_sha1s, content.sha1)
count_content_added += 1
if with_data:
content_data = self._contents[key].data
self._contents[key].data = None
count_content_bytes_added += len(content_data)
self.objstorage.add(content_data, content.sha1)
return (count_content_added, count_content_bytes_added)
def _content_add_absent(self, contents):
count = 0
skipped_content_missing = self.skipped_content_missing(contents)
for content in skipped_content_missing:
key = self._content_key(content)
for algo in DEFAULT_ALGORITHMS:
self._skipped_content_indexes[algo][content.get_hash(algo)] \
.add(key)
self._skipped_contents[key] = content
count += 1
return count
def _content_to_model(self, contents):
"""Takes a list of content dicts, optionally with an extra 'origin'
key, and yields tuples (model.Content, origin)."""
for content in contents:
content = content.copy()
content.pop('origin', None)
yield Content.from_dict(content)
def content_add(self, content):
"""Add content blobs to the storage
Args:
content (iterable): iterable of dictionaries representing
individual pieces of content to add. Each dictionary has the
following keys:
- data (bytes): the actual content
- length (int): content length (default: -1)
- one key for each checksum algorithm in
:data:`swh.model.hashutil.DEFAULT_ALGORITHMS`, mapped to the
corresponding checksum
- status (str): one of visible, hidden, absent
- reason (str): if status = absent, the reason why
- origin (int): if status = absent, the origin we saw the
content in
Raises:
HashCollision in case of collision
Returns:
Summary dict with the following key and associated values:
content:add: New contents added
content_bytes:add: Sum of the contents' length data
skipped_content:add: New skipped contents (no data) added
"""
content = list(self._content_to_model(content))
now = datetime.datetime.now(tz=datetime.timezone.utc)
for item in content:
item.ctime = now
return self._content_add(content, with_data=True)
def content_add_metadata(self, content):
"""Add content metadata to the storage (like `content_add`, but
without inserting to the objstorage).
Args:
content (iterable): iterable of dictionaries representing
individual pieces of content to add. Each dictionary has the
following keys:
- length (int): content length (default: -1)
- one key for each checksum algorithm in
:data:`swh.model.hashutil.DEFAULT_ALGORITHMS`, mapped to the
corresponding checksum
- status (str): one of visible, hidden, absent
- reason (str): if status = absent, the reason why
- origin (int): if status = absent, the origin we saw the
content in
- ctime (datetime): time of insertion in the archive
Raises:
HashCollision in case of collision
Returns:
Summary dict with the following key and associated values:
content:add: New contents added
skipped_content:add: New skipped contents (no data) added
"""
content = list(self._content_to_model(content))
return self._content_add(content, with_data=False)
def content_get(self, content):
"""Retrieve in bulk contents and their data.
This function may yield more blobs than provided sha1 identifiers,
in case they collide.
Args:
content: iterables of sha1
Yields:
Dict[str, bytes]: Generates streams of contents as dict with their
raw data:
- sha1 (bytes): content id
- data (bytes): content's raw data
Raises:
ValueError in case of too much contents are required.
cf. BULK_BLOCK_CONTENT_LEN_MAX
"""
# FIXME: Make this method support slicing the `data`.
if len(content) > BULK_BLOCK_CONTENT_LEN_MAX:
raise ValueError(
"Sending at most %s contents." % BULK_BLOCK_CONTENT_LEN_MAX)
for obj_id in content:
try:
data = self.objstorage.get(obj_id)
except ObjNotFoundError:
yield None
continue
yield {'sha1': obj_id, 'data': data}
def content_get_range(self, start, end, limit=1000, db=None, cur=None):
"""Retrieve contents within range [start, end] bound by limit.
Note that this function may return more than one blob per hash. The
limit is enforced with multiplicity (ie. two blobs with the same hash
will count twice toward the limit).
Args:
**start** (bytes): Starting identifier range (expected smaller
than end)
**end** (bytes): Ending identifier range (expected larger
than start)
**limit** (int): Limit result (default to 1000)
Returns:
a dict with keys:
- contents [dict]: iterable of contents in between the range.
- next (bytes): There remains content in the range
starting from this next sha1
"""
if limit is None:
raise ValueError('Development error: limit should not be None')
from_index = bisect.bisect_left(self._sorted_sha1s, start)
sha1s = itertools.islice(self._sorted_sha1s, from_index, None)
sha1s = ((sha1, content_key)
for sha1 in sha1s
for content_key in self._content_indexes['sha1'][sha1])
matched = []
next_content = None
for sha1, key in sha1s:
if sha1 > end:
break
if len(matched) >= limit:
next_content = sha1
break
matched.append(self._contents[key].to_dict())
return {
'contents': matched,
'next': next_content,
}
def content_get_metadata(self, content):
"""Retrieve content metadata in bulk
Args:
content: iterable of content identifiers (sha1)
Returns:
an iterable with content metadata corresponding to the given ids
"""
# FIXME: the return value should be a mapping from search key to found
# content*s*
for sha1 in content:
if sha1 in self._content_indexes['sha1']:
objs = self._content_indexes['sha1'][sha1]
# FIXME: rather than selecting one of the objects with that
# hash, we should return all of them. See:
# https://forge.softwareheritage.org/D645?id=1994#inline-3389
key = random.sample(objs, 1)[0]
d = self._contents[key].to_dict()
del d['ctime']
yield d
else:
# FIXME: should really be None
yield {
'sha1': sha1,
'sha1_git': None,
'sha256': None,
'blake2s256': None,
'length': None,
'status': None,
}
def content_find(self, content):
if not set(content).intersection(DEFAULT_ALGORITHMS):
raise ValueError('content keys must contain at least one of: '
'%s' % ', '.join(sorted(DEFAULT_ALGORITHMS)))
found = []
for algo in DEFAULT_ALGORITHMS:
hash = content.get(algo)
if hash and hash in self._content_indexes[algo]:
found.append(self._content_indexes[algo][hash])
if not found:
return []
keys = list(set.intersection(*found))
return [self._contents[key].to_dict() for key in keys]
def content_missing(self, content, key_hash='sha1'):
"""List content missing from storage
Args:
contents ([dict]): iterable of dictionaries whose keys are
either 'length' or an item of
:data:`swh.model.hashutil.ALGORITHMS`;
mapped to the corresponding checksum
(or length).
key_hash (str): name of the column to use as hash id
result (default: 'sha1')
Returns:
iterable ([bytes]): missing content ids (as per the
key_hash column)
"""
for cont in content:
for (algo, hash_) in cont.items():
if algo not in DEFAULT_ALGORITHMS:
continue
if hash_ not in self._content_indexes.get(algo, []):
yield cont[key_hash]
break
else:
for result in self.content_find(cont):
if result['status'] == 'missing':
yield cont[key_hash]
def content_missing_per_sha1(self, contents):
"""List content missing from storage based only on sha1.
Args:
contents: Iterable of sha1 to check for absence.
Returns:
iterable: missing ids
Raises:
TODO: an exception when we get a hash collision.
"""
for content in contents:
if content not in self._content_indexes['sha1']:
yield content
def skipped_content_missing(self, contents):
"""List all skipped_content missing from storage
Args:
contents: Iterable of sha1 to check for skipped content entry
Returns:
iterable: dict of skipped content entry
"""
for content in contents:
for (key, algorithm) in self._content_key_algorithm(content):
if algorithm == 'blake2s256':
continue
if key not in self._skipped_content_indexes[algorithm]:
# index must contain hashes of algos except blake2s256
# else the content is considered skipped
yield content
break
def directory_add(self, directories):
"""Add directories to the storage
Args:
directories (iterable): iterable of dictionaries representing the
individual directories to add. Each dict has the following
keys:
- id (sha1_git): the id of the directory to add
- entries (list): list of dicts for each entry in the
directory. Each dict has the following keys:
- name (bytes)
- type (one of 'file', 'dir', 'rev'): type of the
directory entry (file, directory, revision)
- target (sha1_git): id of the object pointed at by the
directory entry
- perms (int): entry permissions
Returns:
Summary dict of keys with associated count as values:
directory:add: Number of directories actually added
"""
if self.journal_writer:
self.journal_writer.write_additions(
'directory',
(dir_ for dir_ in directories
if dir_['id'] not in self._directories))
directories = [Directory.from_dict(d) for d in directories]
count = 0
for directory in directories:
if directory.id not in self._directories:
count += 1
self._directories[directory.id] = directory
self._objects[directory.id].append(
('directory', directory.id))
return {'directory:add': count}
def directory_missing(self, directories):
"""List directories missing from storage
Args:
directories (iterable): an iterable of directory ids
Yields:
missing directory ids
"""
for id in directories:
if id not in self._directories:
yield id
def _join_dentry_to_content(self, dentry):
keys = (
'status',
'sha1',
'sha1_git',
'sha256',
'length',
)
ret = dict.fromkeys(keys)
ret.update(dentry)
if ret['type'] == 'file':
# TODO: Make it able to handle more than one content
content = self.content_find({'sha1_git': ret['target']})
if content:
content = content[0]
for key in keys:
ret[key] = content[key]
return ret
def _directory_ls(self, directory_id, recursive, prefix=b''):
if directory_id in self._directories:
for entry in self._directories[directory_id].entries:
ret = self._join_dentry_to_content(entry.to_dict())
ret['name'] = prefix + ret['name']
ret['dir_id'] = directory_id
yield ret
if recursive and ret['type'] == 'dir':
yield from self._directory_ls(
ret['target'], True, prefix + ret['name'] + b'/')
def directory_ls(self, directory, recursive=False):
"""Get entries for one directory.
Args:
- directory: the directory to list entries from.
- recursive: if flag on, this list recursively from this directory.
Returns:
List of entries for such directory.
If `recursive=True`, names in the path of a dir/file not at the
root are concatenated with a slash (`/`).
"""
yield from self._directory_ls(directory, recursive)
def directory_entry_get_by_path(self, directory, paths):
"""Get the directory entry (either file or dir) from directory with path.
Args:
- directory: sha1 of the top level directory
- paths: path to lookup from the top level directory. From left
(top) to right (bottom).
Returns:
The corresponding directory entry if found, None otherwise.
"""
return self._directory_entry_get_by_path(directory, paths, b'')
def _directory_entry_get_by_path(self, directory, paths, prefix):
if not paths:
return
contents = list(self.directory_ls(directory))
if not contents:
return
def _get_entry(entries, name):
for entry in entries:
if entry['name'] == name:
entry = entry.copy()
entry['name'] = prefix + entry['name']
return entry
first_item = _get_entry(contents, paths[0])
if len(paths) == 1:
return first_item
if not first_item or first_item['type'] != 'dir':
return
return self._directory_entry_get_by_path(
first_item['target'], paths[1:], prefix + paths[0] + b'/')
def revision_add(self, revisions):
"""Add revisions to the storage
Args:
revisions (Iterable[dict]): iterable of dictionaries representing
the individual revisions to add. Each dict has the following
keys:
- **id** (:class:`sha1_git`): id of the revision to add
- **date** (:class:`dict`): date the revision was written
- **committer_date** (:class:`dict`): date the revision got
added to the origin
- **type** (one of 'git', 'tar'): type of the
revision added
- **directory** (:class:`sha1_git`): the directory the
revision points at
- **message** (:class:`bytes`): the message associated with
the revision
- **author** (:class:`Dict[str, bytes]`): dictionary with
keys: name, fullname, email
- **committer** (:class:`Dict[str, bytes]`): dictionary with
keys: name, fullname, email
- **metadata** (:class:`jsonb`): extra information as
dictionary
- **synthetic** (:class:`bool`): revision's nature (tarball,
directory creates synthetic revision`)
- **parents** (:class:`list[sha1_git]`): the parents of
this revision
date dictionaries have the form defined in :mod:`swh.model`.
Returns:
Summary dict of keys with associated count as values
revision_added: New objects actually stored in db
"""
if self.journal_writer:
self.journal_writer.write_additions(
'revision',
(rev for rev in revisions
if rev['id'] not in self._revisions))
revisions = [Revision.from_dict(rev) for rev in revisions]
count = 0
for revision in revisions:
if revision.id not in self._revisions:
revision.committer = self._person_add(revision.committer)
revision.author = self._person_add(revision.author)
self._revisions[revision.id] = revision
self._objects[revision.id].append(
('revision', revision.id))
count += 1
return {'revision:add': count}
def revision_missing(self, revisions):
"""List revisions missing from storage
Args:
revisions (iterable): revision ids
Yields:
missing revision ids
"""
for id in revisions:
if id not in self._revisions:
yield id
def revision_get(self, revisions):
for id in revisions:
if id in self._revisions:
yield self._revisions.get(id).to_dict()
else:
yield None
def _get_parent_revs(self, rev_id, seen, limit):
if limit and len(seen) >= limit:
return
if rev_id in seen or rev_id not in self._revisions:
return
seen.add(rev_id)
yield self._revisions[rev_id].to_dict()
for parent in self._revisions[rev_id].parents:
yield from self._get_parent_revs(parent, seen, limit)
def revision_log(self, revisions, limit=None):
"""Fetch revision entry from the given root revisions.
Args:
revisions: array of root revision to lookup
limit: limitation on the output result. Default to None.
Yields:
List of revision log from such revisions root.
"""
seen = set()
for rev_id in revisions:
yield from self._get_parent_revs(rev_id, seen, limit)
def revision_shortlog(self, revisions, limit=None):
"""Fetch the shortlog for the given revisions
Args:
revisions: list of root revisions to lookup
limit: depth limitation for the output
Yields:
a list of (id, parents) tuples.
"""
yield from ((rev['id'], rev['parents'])
for rev in self.revision_log(revisions, limit))
def release_add(self, releases):
"""Add releases to the storage
Args:
releases (Iterable[dict]): iterable of dictionaries representing
the individual releases to add. Each dict has the following
keys:
- **id** (:class:`sha1_git`): id of the release to add
- **revision** (:class:`sha1_git`): id of the revision the
release points to
- **date** (:class:`dict`): the date the release was made
- **name** (:class:`bytes`): the name of the release
- **comment** (:class:`bytes`): the comment associated with
the release
- **author** (:class:`Dict[str, bytes]`): dictionary with
keys: name, fullname, email
the date dictionary has the form defined in :mod:`swh.model`.
Returns:
Summary dict of keys with associated count as values
release:add: New objects contents actually stored in db
"""
if self.journal_writer:
self.journal_writer.write_additions(
'release',
(rel for rel in releases
if rel['id'] not in self._releases))
releases = [Release.from_dict(rel) for rel in releases]
count = 0
for rel in releases:
if rel.id not in self._releases:
if rel.author:
self._person_add(rel.author)
self._objects[rel.id].append(
('release', rel.id))
self._releases[rel.id] = rel
count += 1
return {'release:add': count}
def release_missing(self, releases):
"""List releases missing from storage
Args:
releases: an iterable of release ids
Returns:
a list of missing release ids
"""
yield from (rel for rel in releases if rel not in self._releases)
def release_get(self, releases):
"""Given a list of sha1, return the releases's information
Args:
releases: list of sha1s
Yields:
dicts with the same keys as those given to `release_add`
(or ``None`` if a release does not exist)
"""
for rel_id in releases:
if rel_id in self._releases:
yield self._releases[rel_id].to_dict()
else:
yield None
def snapshot_add(self, snapshots):
"""Add a snapshot to the storage
Args:
snapshot ([dict]): the snapshots to add, containing the
following keys:
- **id** (:class:`bytes`): id of the snapshot
- **branches** (:class:`dict`): branches the snapshot contains,
mapping the branch name (:class:`bytes`) to the branch target,
itself a :class:`dict` (or ``None`` if the branch points to an
unknown object)
- **target_type** (:class:`str`): one of ``content``,
``directory``, ``revision``, ``release``,
``snapshot``, ``alias``
- **target** (:class:`bytes`): identifier of the target
(currently a ``sha1_git`` for all object kinds, or the name
of the target branch for aliases)
Raises:
ValueError: if the origin's or visit's identifier does not exist.
Returns:
Summary dict of keys with associated count as values
snapshot_added: Count of object actually stored in db
"""
count = 0
snapshots = (Snapshot.from_dict(d) for d in snapshots)
snapshots = (snap for snap in snapshots
if snap.id not in self._snapshots)
for snapshot in snapshots:
if self.journal_writer:
self.journal_writer.write_addition('snapshot', snapshot)
sorted_branch_names = sorted(snapshot.branches)
self._snapshots[snapshot.id] = (snapshot, sorted_branch_names)
self._objects[snapshot.id].append(('snapshot', snapshot.id))
count += 1
return {'snapshot:add': count}
def snapshot_get(self, snapshot_id):
"""Get the content, possibly partial, of a snapshot with the given id
The branches of the snapshot are iterated in the lexicographical
order of their names.
.. warning:: At most 1000 branches contained in the snapshot will be
returned for performance reasons. In order to browse the whole
set of branches, the method :meth:`snapshot_get_branches`
should be used instead.
Args:
snapshot_id (bytes): identifier of the snapshot
Returns:
dict: a dict with three keys:
* **id**: identifier of the snapshot
* **branches**: a dict of branches contained in the snapshot
whose keys are the branches' names.
* **next_branch**: the name of the first branch not returned
or :const:`None` if the snapshot has less than 1000
branches.
"""
return self.snapshot_get_branches(snapshot_id)
def snapshot_get_by_origin_visit(self, origin, visit):
"""Get the content, possibly partial, of a snapshot for the given origin visit
The branches of the snapshot are iterated in the lexicographical
order of their names.
.. warning:: At most 1000 branches contained in the snapshot will be
returned for performance reasons. In order to browse the whole
set of branches, the method :meth:`snapshot_get_branches`
should be used instead.
Args:
origin (int): the origin's identifier
visit (int): the visit's identifier
Returns:
dict: None if the snapshot does not exist;
a dict with three keys otherwise:
* **id**: identifier of the snapshot
* **branches**: a dict of branches contained in the snapshot
whose keys are the branches' names.
* **next_branch**: the name of the first branch not returned
or :const:`None` if the snapshot has less than 1000
branches.
"""
origin_url = self._get_origin_url(origin)
if not origin_url:
return
if origin_url not in self._origins or \
visit > len(self._origin_visits[origin_url]):
return None
snapshot_id = self._origin_visits[origin_url][visit-1].snapshot
if snapshot_id:
return self.snapshot_get(snapshot_id)
else:
return None
def snapshot_get_latest(self, origin, allowed_statuses=None):
"""Get the content, possibly partial, of the latest snapshot for the
given origin, optionally only from visits that have one of the given
allowed_statuses
The branches of the snapshot are iterated in the lexicographical
order of their names.
.. warning:: At most 1000 branches contained in the snapshot will be
returned for performance reasons. In order to browse the whole
set of branches, the methods :meth:`origin_visit_get_latest`
and :meth:`snapshot_get_branches` should be used instead.
Args:
origin (Union[str,int]): the origin's URL or identifier
allowed_statuses (list of str): list of visit statuses considered
to find the latest snapshot for the origin. For instance,
``allowed_statuses=['full']`` will only consider visits that
have successfully run to completion.
Returns:
dict: a dict with three keys:
* **id**: identifier of the snapshot
* **branches**: a dict of branches contained in the snapshot
whose keys are the branches' names.
* **next_branch**: the name of the first branch not returned
or :const:`None` if the snapshot has less than 1000
branches.
"""
origin_url = self._get_origin_url(origin)
if not origin_url:
return
visit = self.origin_visit_get_latest(
origin_url,
allowed_statuses=allowed_statuses,
require_snapshot=True)
if visit and visit['snapshot']:
snapshot = self.snapshot_get(visit['snapshot'])
if not snapshot:
raise ValueError(
'last origin visit references an unknown snapshot')
return snapshot
def snapshot_count_branches(self, snapshot_id, db=None, cur=None):
"""Count the number of branches in the snapshot with the given id
Args:
snapshot_id (bytes): identifier of the snapshot
Returns:
dict: A dict whose keys are the target types of branches and
values their corresponding amount
"""
(snapshot, _) = self._snapshots[snapshot_id]
return collections.Counter(branch.target_type.value if branch else None
for branch in snapshot.branches.values())
def snapshot_get_branches(self, snapshot_id, branches_from=b'',
branches_count=1000, target_types=None):
"""Get the content, possibly partial, of a snapshot with the given id
The branches of the snapshot are iterated in the lexicographical
order of their names.
Args:
snapshot_id (bytes): identifier of the snapshot
branches_from (bytes): optional parameter used to skip branches
whose name is lesser than it before returning them
branches_count (int): optional parameter used to restrain
the amount of returned branches
target_types (list): optional parameter used to filter the
target types of branch to return (possible values that can be
contained in that list are `'content', 'directory',
'revision', 'release', 'snapshot', 'alias'`)
Returns:
dict: None if the snapshot does not exist;
a dict with three keys otherwise:
* **id**: identifier of the snapshot
* **branches**: a dict of branches contained in the snapshot
whose keys are the branches' names.
* **next_branch**: the name of the first branch not returned
or :const:`None` if the snapshot has less than
`branches_count` branches after `branches_from` included.
"""
res = self._snapshots.get(snapshot_id)
if res is None:
return None
(snapshot, sorted_branch_names) = res
from_index = bisect.bisect_left(
sorted_branch_names, branches_from)
if target_types:
next_branch = None
branches = {}
for branch_name in sorted_branch_names[from_index:]:
branch = snapshot.branches[branch_name]
if branch and branch.target_type.value in target_types:
if len(branches) < branches_count:
branches[branch_name] = branch
else:
next_branch = branch_name
break
else:
# As there is no 'target_types', we can do that much faster
to_index = from_index + branches_count
returned_branch_names = sorted_branch_names[from_index:to_index]
branches = {branch_name: snapshot.branches[branch_name]
for branch_name in returned_branch_names}
if to_index >= len(sorted_branch_names):
next_branch = None
else:
next_branch = sorted_branch_names[to_index]
branches = {name: branch.to_dict() if branch else None
for (name, branch) in branches.items()}
return {
'id': snapshot_id,
'branches': branches,
'next_branch': next_branch,
}
def object_find_by_sha1_git(self, ids, db=None, cur=None):
"""Return the objects found with the given ids.
Args:
ids: a generator of sha1_gits
Returns:
dict: a mapping from id to the list of objects found. Each object
found is itself a dict with keys:
- sha1_git: the input id
- type: the type of object found
- id: the id of the object found
- object_id: the numeric id of the object found.
"""
ret = {}
for id_ in ids:
objs = self._objects.get(id_, [])
ret[id_] = [{
'sha1_git': id_,
'type': obj[0],
'id': obj[1],
'object_id': id_,
} for obj in objs]
return ret
def _convert_origin(self, t):
if t is None:
return None
(origin_id, origin) = t
origin = origin.to_dict()
if ENABLE_ORIGIN_IDS:
origin['id'] = origin_id
return origin
def origin_get(self, origins):
"""Return origins, either all identified by their ids or all
identified by tuples (type, url).
If the url is given and the type is omitted, one of the origins with
that url is returned.
Args:
origin: a list of dictionaries representing the individual
origins to find.
These dicts have either the key url (and optionally type):
- type (FIXME: enum TBD): the origin type ('git', 'wget', ...)
- url (bytes): the url the origin points to
or the id:
- id (int): the origin's identifier
Returns:
dict: the origin dictionary with the keys:
- id: origin's id
- type: origin's type
- url: origin's url
Raises:
ValueError: if the keys does not match (url and type) nor id.
"""
if isinstance(origins, dict):
# Old API
return_single = True
origins = [origins]
else:
return_single = False
# Sanity check to be error-compatible with the pgsql backend
if any('id' in origin for origin in origins) \
and not all('id' in origin for origin in origins):
raise ValueError(
'Either all origins or none at all should have an "id".')
if any('url' in origin for origin in origins) \
and not all('url' in origin for origin in origins):
raise ValueError(
'Either all origins or none at all should have '
'an "url" key.')
results = []
for origin in origins:
result = None
if 'id' in origin:
assert ENABLE_ORIGIN_IDS, 'origin ids are disabled'
if origin['id'] <= len(self._origins_by_id):
result = self._origins[self._origins_by_id[origin['id']-1]]
elif 'url' in origin:
if origin['url'] in self._origins:
result = self._origins[origin['url']]
else:
raise ValueError(
'Origin must have either id or url.')
results.append(self._convert_origin(result))
if return_single:
assert len(results) == 1
return results[0]
else:
return results
def origin_get_range(self, origin_from=1, origin_count=100):
"""Retrieve ``origin_count`` origins whose ids are greater
or equal than ``origin_from``.
Origins are sorted by id before retrieving them.
Args:
origin_from (int): the minimum id of origins to retrieve
origin_count (int): the maximum number of origins to retrieve
Yields:
dicts containing origin information as returned
by :meth:`swh.storage.in_memory.Storage.origin_get`.
"""
origin_from = max(origin_from, 1)
if origin_from <= len(self._origins_by_id):
max_idx = origin_from + origin_count - 1
if max_idx > len(self._origins_by_id):
max_idx = len(self._origins_by_id)
for idx in range(origin_from-1, max_idx):
yield self._convert_origin(
self._origins[self._origins_by_id[idx]])
def origin_search(self, url_pattern, offset=0, limit=50,
regexp=False, with_visit=False, db=None, cur=None):
"""Search for origins whose urls contain a provided string pattern
or match a provided regular expression.
The search is performed in a case insensitive way.
Args:
url_pattern (str): the string pattern to search for in origin urls
offset (int): number of found origins to skip before returning
results
limit (int): the maximum number of found origins to return
regexp (bool): if True, consider the provided pattern as a regular
expression and return origins whose urls match it
with_visit (bool): if True, filter out origins with no visit
Returns:
An iterable of dict containing origin information as returned
by :meth:`swh.storage.storage.Storage.origin_get`.
"""
origins = map(self._convert_origin, self._origins.values())
if regexp:
pat = re.compile(url_pattern)
origins = [orig for orig in origins if pat.search(orig['url'])]
else:
origins = [orig for orig in origins if url_pattern in orig['url']]
if with_visit:
origins = [orig for orig in origins
if len(self._origin_visits[orig['url']]) > 0]
if ENABLE_ORIGIN_IDS:
origins.sort(key=lambda origin: origin['id'])
return origins[offset:offset+limit]
def origin_count(self, url_pattern, regexp=False, with_visit=False,
db=None, cur=None):
"""Count origins whose urls contain a provided string pattern
or match a provided regular expression.
The pattern search in origin urls is performed in a case insensitive
way.
Args:
url_pattern (str): the string pattern to search for in origin urls
regexp (bool): if True, consider the provided pattern as a regular
expression and return origins whose urls match it
with_visit (bool): if True, filter out origins with no visit
Returns:
int: The number of origins matching the search criterion.
"""
return len(self.origin_search(url_pattern, regexp=regexp,
with_visit=with_visit,
limit=len(self._origins)))
def origin_add(self, origins):
"""Add origins to the storage
Args:
origins: list of dictionaries representing the individual origins,
with the following keys:
- type: the origin type ('git', 'svn', 'deb', ...)
- url (bytes): the url the origin points to
Returns:
list: given origins as dict updated with their id
"""
origins = copy.deepcopy(origins)
for origin in origins:
if ENABLE_ORIGIN_IDS:
origin['id'] = self.origin_add_one(origin)
else:
self.origin_add_one(origin)
return origins
def origin_add_one(self, origin):
"""Add origin to the storage
Args:
origin: dictionary representing the individual origin to add. This
dict has the following keys:
- type (FIXME: enum TBD): the origin type ('git', 'wget', ...)
- url (bytes): the url the origin points to
Returns:
the id of the added origin, or of the identical one that already
exists.
"""
origin = Origin.from_dict(origin)
if origin.url in self._origins:
if ENABLE_ORIGIN_IDS:
(origin_id, _) = self._origins[origin.url]
else:
if self.journal_writer:
self.journal_writer.write_addition('origin', origin)
if ENABLE_ORIGIN_IDS:
# origin ids are in the range [1, +inf[
origin_id = len(self._origins) + 1
self._origins_by_id.append(origin.url)
assert len(self._origins_by_id) == origin_id
else:
origin_id = None
self._origins[origin.url] = (origin_id, origin)
self._origin_visits[origin.url] = []
self._objects[origin.url].append(('origin', origin.url))
if ENABLE_ORIGIN_IDS:
return origin_id
else:
return origin.url
- def fetch_history_start(self, origin_id):
- """Add an entry for origin origin_id in fetch_history. Returns the id
- of the added fetch_history entry
- """
- assert not ENABLE_ORIGIN_IDS, 'origin ids are disabled'
- pass
-
- def fetch_history_end(self, fetch_history_id, data):
- """Close the fetch_history entry with id `fetch_history_id`, replacing
- its data with `data`.
- """
- pass
-
- def fetch_history_get(self, fetch_history_id):
- """Get the fetch_history entry with id `fetch_history_id`.
- """
- raise NotImplementedError('fetch_history_get is deprecated, use '
- 'origin_visit_get instead.')
-
def origin_visit_add(self, origin, date, type=None):
"""Add an origin_visit for the origin at date with status 'ongoing'.
For backward compatibility, `type` is optional and defaults to
the origin's type.
Args:
origin (Union[int,str]): visited origin's identifier or URL
date (Union[str,datetime]): timestamp of such visit
type (str): the type of loader used for the visit (hg, git, ...)
Returns:
dict: dictionary with keys origin and visit where:
- origin: origin's identifier
- visit: the visit's identifier for the new visit occurrence
"""
origin_url = self._get_origin_url(origin)
if origin_url is None:
raise ValueError('Unknown origin.')
if isinstance(date, str):
# FIXME: Converge on iso8601 at some point
date = dateutil.parser.parse(date)
elif not isinstance(date, datetime.datetime):
raise TypeError('date must be a datetime or a string.')
visit_ret = None
if origin_url in self._origins:
(origin_id, origin) = self._origins[origin_url]
# visit ids are in the range [1, +inf[
visit_id = len(self._origin_visits[origin_url]) + 1
status = 'ongoing'
visit = OriginVisit(
origin=origin,
date=date,
type=type or origin.type,
status=status,
snapshot=None,
metadata=None,
visit=visit_id,
)
self._origin_visits[origin_url].append(visit)
visit_ret = {
'origin': origin_id if ENABLE_ORIGIN_IDS else origin.url,
'visit': visit_id,
}
self._objects[(origin_url, visit_id)].append(
('origin_visit', None))
if self.journal_writer:
self.journal_writer.write_addition('origin_visit', visit)
return visit_ret
def origin_visit_update(self, origin, visit_id, status=None,
metadata=None, snapshot=None):
"""Update an origin_visit's status.
Args:
origin (Union[int,str]): visited origin's identifier or URL
visit_id (int): visit's identifier
status: visit's new status
metadata: data associated to the visit
snapshot (sha1_git): identifier of the snapshot to add to
the visit
Returns:
None
"""
origin_url = self._get_origin_url(origin)
if origin_url is None:
raise ValueError('Unknown origin.')
try:
visit = self._origin_visits[origin_url][visit_id-1]
except IndexError:
raise ValueError('Unknown visit_id for this origin') \
from None
updates = {}
if status:
updates['status'] = status
if metadata:
updates['metadata'] = metadata
if snapshot:
updates['snapshot'] = snapshot
visit = attr.evolve(visit, **updates)
if self.journal_writer:
(_, origin) = self._origins[origin_url]
self.journal_writer.write_update('origin_visit', visit)
self._origin_visits[origin_url][visit_id-1] = visit
if origin_url not in self._origin_visits or \
visit_id > len(self._origin_visits[origin_url]):
return
def origin_visit_upsert(self, visits):
"""Add a origin_visits with a specific id and with all its data.
If there is already an origin_visit with the same
`(origin_url, visit_id)`, updates it instead of inserting a new one.
Args:
visits: iterable of dicts with keys:
origin: dict with keys either `id` or `url`
visit: origin visit id
type: type of loader used for the visit
date: timestamp of such visit
status: Visit's new status
metadata: Data associated to the visit
snapshot (sha1_git): identifier of the snapshot to add to
the visit
"""
visits = [OriginVisit.from_dict(d) for d in visits]
if self.journal_writer:
for visit in visits:
(_, visit.origin) = self._origins[visit.origin.url]
self.journal_writer.write_addition('origin_visit', visit)
for visit in visits:
visit_id = visit.visit
origin_url = visit.origin.url
self._objects[(origin_url, visit_id)].append(
('origin_visit', None))
while len(self._origin_visits[origin_url]) <= visit_id:
self._origin_visits[origin_url].append(None)
self._origin_visits[origin_url][visit_id-1] = visit
def _convert_visit(self, visit):
if visit is None:
return
(origin_id, origin) = self._origins[visit.origin.url]
visit = visit.to_dict()
if ENABLE_ORIGIN_IDS:
visit['origin'] = origin_id
else:
visit['origin'] = origin.url
return visit
def origin_visit_get(self, origin, last_visit=None, limit=None):
"""Retrieve all the origin's visit's information.
Args:
origin (int): the origin's identifier
last_visit (int): visit's id from which listing the next ones,
default to None
limit (int): maximum number of results to return,
default to None
Yields:
List of visits.
"""
origin_url = self._get_origin_url(origin)
if origin_url in self._origin_visits:
visits = self._origin_visits[origin_url]
if last_visit is not None:
visits = visits[last_visit:]
if limit is not None:
visits = visits[:limit]
for visit in visits:
if not visit:
continue
visit_id = visit.visit
yield self._convert_visit(
self._origin_visits[origin_url][visit_id-1])
def origin_visit_find_by_date(self, origin, visit_date):
"""Retrieves the origin visit whose date is closest to the provided
timestamp.
In case of a tie, the visit with largest id is selected.
Args:
origin (str): The occurrence's origin (URL).
target (datetime): target timestamp
Returns:
A visit.
"""
origin_url = self._get_origin_url(origin)
if origin_url in self._origin_visits:
visits = self._origin_visits[origin_url]
visit = min(
visits,
key=lambda v: (abs(v.date - visit_date), -v.visit))
return self._convert_visit(visit)
def origin_visit_get_by(self, origin, visit):
"""Retrieve origin visit's information.
Args:
origin (int): the origin's identifier
Returns:
The information on that particular (origin, visit) or None if
it does not exist
"""
origin_url = self._get_origin_url(origin)
if origin_url in self._origin_visits and \
visit <= len(self._origin_visits[origin_url]):
return self._convert_visit(
self._origin_visits[origin_url][visit-1])
def origin_visit_get_latest(
self, origin, allowed_statuses=None, require_snapshot=False):
"""Get the latest origin visit for the given origin, optionally
looking only for those with one of the given allowed_statuses
or for those with a known snapshot.
Args:
origin (str): the origin's URL
allowed_statuses (list of str): list of visit statuses considered
to find the latest visit. For instance,
``allowed_statuses=['full']`` will only consider visits that
have successfully run to completion.
require_snapshot (bool): If True, only a visit with a snapshot
will be returned.
Returns:
dict: a dict with the following keys:
origin: the URL of the origin
visit: origin visit id
type: type of loader used for the visit
date: timestamp of such visit
status: Visit's new status
metadata: Data associated to the visit
snapshot (Optional[sha1_git]): identifier of the snapshot
associated to the visit
"""
res = self._origins.get(origin)
if not res:
return
(_, origin) = res
visits = self._origin_visits[origin.url]
if allowed_statuses is not None:
visits = [visit for visit in visits
if visit.status in allowed_statuses]
if require_snapshot:
visits = [visit for visit in visits
if visit.snapshot]
visit = max(
visits, key=lambda v: (v.date, v.visit), default=None)
return self._convert_visit(visit)
def stat_counters(self):
"""compute statistics about the number of tuples in various tables
Returns:
dict: a dictionary mapping textual labels (e.g., content) to
integer values (e.g., the number of tuples in table content)
"""
keys = (
'content',
'directory',
'origin',
'origin_visit',
'person',
'release',
'revision',
'skipped_content',
'snapshot'
)
stats = {key: 0 for key in keys}
stats.update(collections.Counter(
obj_type
for (obj_type, obj_id)
in itertools.chain(*self._objects.values())))
return stats
def refresh_stat_counters(self):
"""Recomputes the statistics for `stat_counters`."""
pass
def origin_metadata_add(self, origin_id, ts, provider, tool, metadata,
db=None, cur=None):
""" Add an origin_metadata for the origin at ts with provenance and
metadata.
Args:
origin_id (int): the origin's id for which the metadata is added
ts (datetime): timestamp of the found metadata
provider: id of the provider of metadata (ex:'hal')
tool: id of the tool used to extract metadata
metadata (jsonb): the metadata retrieved at the time and location
"""
if isinstance(origin_id, str):
origin = self.origin_get({'url': origin_id})
if not origin:
return
origin_id = origin['id']
if isinstance(ts, str):
ts = dateutil.parser.parse(ts)
origin_metadata = {
'origin_id': origin_id,
'discovery_date': ts,
'tool_id': tool,
'metadata': metadata,
'provider_id': provider,
}
self._origin_metadata[origin_id].append(origin_metadata)
return None
def origin_metadata_get_by(self, origin_id, provider_type=None, db=None,
cur=None):
"""Retrieve list of all origin_metadata entries for the origin_id
Args:
origin_id (int): the unique origin's identifier
provider_type (str): (optional) type of provider
Returns:
list of dicts: the origin_metadata dictionary with the keys:
- origin_id (int): origin's identifier
- discovery_date (datetime): timestamp of discovery
- tool_id (int): metadata's extracting tool
- metadata (jsonb)
- provider_id (int): metadata's provider
- provider_name (str)
- provider_type (str)
- provider_url (str)
"""
if isinstance(origin_id, str):
origin = self.origin_get({'url': origin_id})
if not origin:
return
origin_id = origin['id']
metadata = []
for item in self._origin_metadata[origin_id]:
item = copy.deepcopy(item)
provider = self.metadata_provider_get(item['provider_id'])
for attr_name in ('name', 'type', 'url'):
item['provider_' + attr_name] = \
provider['provider_' + attr_name]
metadata.append(item)
return metadata
def tool_add(self, tools):
"""Add new tools to the storage.
Args:
tools (iterable of :class:`dict`): Tool information to add to
storage. Each tool is a :class:`dict` with the following keys:
- name (:class:`str`): name of the tool
- version (:class:`str`): version of the tool
- configuration (:class:`dict`): configuration of the tool,
must be json-encodable
Returns:
:class:`dict`: All the tools inserted in storage
(including the internal ``id``). The order of the list is not
guaranteed to match the order of the initial list.
"""
inserted = []
for tool in tools:
key = self._tool_key(tool)
assert 'id' not in tool
record = copy.deepcopy(tool)
record['id'] = key # TODO: remove this
if key not in self._tools:
self._tools[key] = record
inserted.append(copy.deepcopy(self._tools[key]))
return inserted
def tool_get(self, tool):
"""Retrieve tool information.
Args:
tool (dict): Tool information we want to retrieve from storage.
The dicts have the same keys as those used in :func:`tool_add`.
Returns:
dict: The full tool information if it exists (``id`` included),
None otherwise.
"""
return self._tools.get(self._tool_key(tool))
def metadata_provider_add(self, provider_name, provider_type, provider_url,
metadata):
"""Add a metadata provider.
Args:
provider_name (str): Its name
provider_type (str): Its type
provider_url (str): Its URL
metadata: JSON-encodable object
Returns:
an identifier of the provider
"""
provider = {
'provider_name': provider_name,
'provider_type': provider_type,
'provider_url': provider_url,
'metadata': metadata,
}
key = self._metadata_provider_key(provider)
provider['id'] = key
self._metadata_providers[key] = provider
return key
def metadata_provider_get(self, provider_id, db=None, cur=None):
"""Get a metadata provider
Args:
provider_id: Its identifier, as given by `metadata_provider_add`.
Returns:
dict: same as `metadata_provider_add`;
or None if it does not exist.
"""
return self._metadata_providers.get(provider_id)
def metadata_provider_get_by(self, provider, db=None, cur=None):
"""Get a metadata provider
Args:
provider_name: Its name
provider_url: Its URL
Returns:
dict: same as `metadata_provider_add`;
or None if it does not exist.
"""
key = self._metadata_provider_key(provider)
return self._metadata_providers.get(key)
def _get_origin_url(self, origin):
if isinstance(origin, str):
return origin
elif isinstance(origin, int):
if origin <= len(self._origins_by_id):
return self._origins_by_id[origin-1]
else:
return None
else:
raise TypeError('origin must be a string or an integer.')
def _person_add(self, person):
"""Add a person in storage.
Note: Private method, do not use outside of this class.
Args:
person: dictionary with keys fullname, name and email.
"""
key = ('person', person.fullname)
if key not in self._objects:
person_id = len(self._persons) + 1
self._persons.append(person)
self._objects[key].append(('person', person_id))
else:
person_id = self._objects[key][0][1]
person = self._persons[person_id-1]
return person
@staticmethod
def _content_key(content):
"""A stable key for a content"""
return tuple(getattr(content, key)
for key in sorted(DEFAULT_ALGORITHMS))
@staticmethod
def _content_key_algorithm(content):
""" A stable key and the algorithm for a content"""
if isinstance(content, Content):
content = content.to_dict()
return tuple((content.get(key), key)
for key in sorted(DEFAULT_ALGORITHMS))
@staticmethod
def _tool_key(tool):
return '%r %r %r' % (tool['name'], tool['version'],
tuple(sorted(tool['configuration'].items())))
@staticmethod
def _metadata_provider_key(provider):
return '%r %r' % (provider['provider_name'], provider['provider_url'])
diff --git a/swh/storage/sql/30-swh-schema.sql b/swh/storage/sql/30-swh-schema.sql
index b8b0260c..c6b95b18 100644
--- a/swh/storage/sql/30-swh-schema.sql
+++ b/swh/storage/sql/30-swh-schema.sql
@@ -1,496 +1,471 @@
---
--- SQL implementation of the Software Heritage data model
---
-- schema versions
create table dbversion
(
version int primary key,
release timestamptz,
description text
);
comment on table dbversion is 'Details of current db version';
comment on column dbversion.version is 'SQL schema version';
comment on column dbversion.release is 'Version deployment timestamp';
comment on column dbversion.description is 'Release description';
-- latest schema version
insert into dbversion(version, release, description)
- values(140, now(), 'Work In Progress');
+ values(141, now(), 'Work In Progress');
-- a SHA1 checksum
create domain sha1 as bytea check (length(value) = 20);
-- a Git object ID, i.e., a Git-style salted SHA1 checksum
create domain sha1_git as bytea check (length(value) = 20);
-- a SHA256 checksum
create domain sha256 as bytea check (length(value) = 32);
-- a blake2 checksum
create domain blake2s256 as bytea check (length(value) = 32);
-- UNIX path (absolute, relative, individual path component, etc.)
create domain unix_path as bytea;
-- a set of UNIX-like access permissions, as manipulated by, e.g., chmod
create domain file_perms as int;
-- Checksums about actual file content. Note that the content itself is not
-- stored in the DB, but on external (key-value) storage. A single checksum is
-- used as key there, but the other can be used to verify that we do not inject
-- content collisions not knowingly.
create table content
(
sha1 sha1 not null,
sha1_git sha1_git not null,
sha256 sha256 not null,
blake2s256 blake2s256,
length bigint not null,
ctime timestamptz not null default now(),
-- creation time, i.e. time of (first) injection into the storage
status content_status not null default 'visible',
object_id bigserial
);
comment on table content is 'Checksums of file content which is actually stored externally';
comment on column content.sha1 is 'Content sha1 hash';
comment on column content.sha1_git is 'Git object sha1 hash';
comment on column content.sha256 is 'Content Sha256 hash';
comment on column content.blake2s256 is 'Content blake2s hash';
comment on column content.length is 'Content length';
comment on column content.ctime is 'First seen time';
comment on column content.status is 'Content status (absent, visible, hidden)';
comment on column content.object_id is 'Content identifier';
-- An origin is a place, identified by an URL, where software source code
-- artifacts can be found. We support different kinds of origins, e.g., git and
-- other VCS repositories, web pages that list tarballs URLs (e.g.,
-- http://www.kernel.org), indirect tarball URLs (e.g.,
-- http://www.example.org/latest.tar.gz), etc. The key feature of an origin is
-- that it can be *fetched* from (wget, git clone, svn checkout, etc.) to
-- retrieve all the contained software.
create table origin
(
id bigserial not null,
type text, -- TODO use an enum here (?)
url text not null
);
comment on column origin.id is 'Artifact origin id';
comment on column origin.type is 'Type of origin';
comment on column origin.url is 'URL of origin';
-- Content blobs observed somewhere, but not ingested into the archive for
-- whatever reason. This table is separate from the content table as we might
-- not have the sha1 checksum of skipped contents (for instance when we inject
-- git repositories, objects that are too big will be skipped here, and we will
-- only know their sha1_git). 'reason' contains the reason the content was
-- skipped. origin is a nullable column allowing to find out which origin
-- contains that skipped content.
create table skipped_content
(
sha1 sha1,
sha1_git sha1_git,
sha256 sha256,
blake2s256 blake2s256,
length bigint not null,
ctime timestamptz not null default now(),
status content_status not null default 'absent',
reason text not null,
origin bigint,
object_id bigserial
);
comment on table skipped_content is 'Content blobs observed, but not ingested in the archive';
comment on column skipped_content.sha1 is 'Skipped content sha1 hash';
comment on column skipped_content.sha1_git is 'Git object sha1 hash';
comment on column skipped_content.sha256 is 'Skipped content sha256 hash';
comment on column skipped_content.blake2s256 is 'Skipped content blake2s hash';
comment on column skipped_content.length is 'Skipped content length';
comment on column skipped_content.ctime is 'First seen time';
comment on column skipped_content.status is 'Skipped content status (absent, visible, hidden)';
comment on column skipped_content.reason is 'Reason for skipping';
comment on column skipped_content.origin is 'Origin table identifier';
comment on column skipped_content.object_id is 'Skipped content identifier';
--- Log of all origin fetches (i.e., origin crawling) that have been done in the
--- past, or are still ongoing. Similar to list_history, but for origins.
-create table fetch_history
-(
- id bigserial,
- origin bigint,
- date timestamptz not null,
- status boolean, -- true if and only if the fetch has been successful
- result jsonb, -- more detailed returned values, times, etc...
- stdout text,
- stderr text, -- null when status is true, filled otherwise
- duration interval -- fetch duration of NULL if still ongoing
-);
-
-comment on table fetch_history is 'Log of all origin fetches';
-comment on column fetch_history.id is 'Identifier for fetch history';
-comment on column fetch_history.origin is 'Origin table identifier';
-comment on column fetch_history.date is 'Fetch start time';
-comment on column fetch_history.status is 'True indicates successful fetch';
-comment on column fetch_history.result is 'Detailed return values, times etc';
-comment on column fetch_history.stdout is 'Standard output of fetch operation';
-comment on column fetch_history.stderr is 'Standard error of fetch operation';
-comment on column fetch_history.duration is 'Time taken to complete fetch, NULL if ongoing';
-
-
-- A file-system directory. A directory is a list of directory entries (see
-- tables: directory_entry_{dir,file}).
--
-- To list the contents of a directory:
-- 1. list the contained directory_entry_dir using array dir_entries
-- 2. list the contained directory_entry_file using array file_entries
-- 3. list the contained directory_entry_rev using array rev_entries
-- 4. UNION
--
-- Synonyms/mappings:
-- * git: tree
create table directory
(
id sha1_git not null,
dir_entries bigint[], -- sub-directories, reference directory_entry_dir
file_entries bigint[], -- contained files, reference directory_entry_file
rev_entries bigint[], -- mounted revisions, reference directory_entry_rev
object_id bigserial -- short object identifier
);
comment on table directory is 'Contents of a directory, synonymous to tree (git)';
comment on column directory.id is 'Git object sha1 hash';
comment on column directory.dir_entries is 'Sub-directories, reference directory_entry_dir';
comment on column directory.file_entries is 'Contained files, reference directory_entry_file';
comment on column directory.rev_entries is 'Mounted revisions, reference directory_entry_rev';
comment on column directory.object_id is 'Short object identifier';
-- A directory entry pointing to a (sub-)directory.
create table directory_entry_dir
(
id bigserial,
target sha1_git not null, -- id of target directory
name unix_path not null, -- path name, relative to containing dir
perms file_perms not null -- unix-like permissions
);
comment on table directory_entry_dir is 'Directory entry for directory';
comment on column directory_entry_dir.id is 'Directory identifier';
comment on column directory_entry_dir.target is 'Target directory identifier';
comment on column directory_entry_dir.name is 'Path name, relative to containing directory';
comment on column directory_entry_dir.perms is 'Unix-like permissions';
-- A directory entry pointing to a file content.
create table directory_entry_file
(
id bigserial,
target sha1_git not null, -- id of target file
name unix_path not null, -- path name, relative to containing dir
perms file_perms not null -- unix-like permissions
);
comment on table directory_entry_file is 'Directory entry for file';
comment on column directory_entry_file.id is 'File identifier';
comment on column directory_entry_file.target is 'Target file identifier';
comment on column directory_entry_file.name is 'Path name, relative to containing directory';
comment on column directory_entry_file.perms is 'Unix-like permissions';
-- A directory entry pointing to a revision.
create table directory_entry_rev
(
id bigserial,
target sha1_git not null, -- id of target revision
name unix_path not null, -- path name, relative to containing dir
perms file_perms not null -- unix-like permissions
);
comment on table directory_entry_rev is 'Directory entry for revision';
comment on column directory_entry_dir.id is 'Revision identifier';
comment on column directory_entry_dir.target is 'Target revision in identifier';
comment on column directory_entry_dir.name is 'Path name, relative to containing directory';
comment on column directory_entry_dir.perms is 'Unix-like permissions';
-- A person referenced by some source code artifacts, e.g., a VCS revision or
-- release metadata.
create table person
(
id bigserial,
name bytea, -- advisory: not null if we managed to parse a name
email bytea, -- advisory: not null if we managed to parse an email
fullname bytea not null -- freeform specification; what is actually used in the checksums
-- will usually be of the form 'name '
);
comment on table person is 'Person referenced in code artifact release metadata';
comment on column person.id is 'Person identifier';
comment on column person.name is 'Name';
comment on column person.email is 'Email';
comment on column person.fullname is 'Full name (raw name)';
-- The state of a source code tree at a specific point in time.
--
-- Synonyms/mappings:
-- * git / subversion / etc: commit
-- * tarball: a specific tarball
--
-- Revisions are organized as DAGs. Each revision points to 0, 1, or more (in
-- case of merges) parent revisions. Each revision points to a directory, i.e.,
-- a file-system tree containing files and directories.
create table revision
(
id sha1_git not null,
date timestamptz,
date_offset smallint,
committer_date timestamptz,
committer_date_offset smallint,
type revision_type not null,
directory sha1_git, -- source code 'root' directory
message bytea,
author bigint,
committer bigint,
synthetic boolean not null default false, -- true iff revision has been created by Software Heritage
metadata jsonb, -- extra metadata (tarball checksums, extra commit information, etc...)
object_id bigserial,
date_neg_utc_offset boolean,
committer_date_neg_utc_offset boolean
);
comment on table revision is 'Revision represents the state of a source code tree at a
specific point in time';
comment on column revision.id is 'Git id of sha1 checksum';
comment on column revision.date is 'Timestamp when revision was authored';
comment on column revision.date_offset is 'Authored timestamp offset from UTC';
comment on column revision.committer_date is 'Timestamp when revision was committed';
comment on column revision.committer_date_offset is 'Committed timestamp offset from UTC';
comment on column revision.type is 'Possible revision types (''git'', ''tar'', ''dsc'', ''svn'', ''hg'')';
comment on column revision.directory is 'Directory identifier';
comment on column revision.message is 'Revision message';
comment on column revision.author is 'Author identifier';
comment on column revision.committer is 'Committer identifier';
comment on column revision.synthetic is 'true iff revision has been created by Software Heritage';
comment on column revision.metadata is 'extra metadata (tarball checksums, extra commit information, etc...)';
comment on column revision.object_id is 'Object identifier';
comment on column revision.date_neg_utc_offset is 'True indicates -0 UTC offset for author timestamp';
comment on column revision.committer_date_neg_utc_offset is 'True indicates -0 UTC offset for committer timestamp';
-- either this table or the sha1_git[] column on the revision table
create table revision_history
(
id sha1_git not null,
parent_id sha1_git not null,
parent_rank int not null default 0
-- parent position in merge commits, 0-based
);
comment on table revision_history is 'Sequence of revision history with parent and position in history';
comment on column revision_history.id is 'Revision history git object sha1 checksum';
comment on column revision_history.parent_id is 'Parent revision git object identifier';
comment on column revision_history.parent_rank is 'Parent position in merge commits, 0-based';
-- Crawling history of software origins visited by Software Heritage. Each
-- visit is a 3-way mapping between a software origin, a timestamp, and a
-- snapshot object capturing the full-state of the origin at visit time.
create table origin_visit
(
origin bigint not null,
visit bigint not null,
date timestamptz not null,
type text not null,
status origin_visit_status not null,
metadata jsonb,
snapshot sha1_git
);
comment on column origin_visit.origin is 'Visited origin';
comment on column origin_visit.visit is 'Sequential visit number for the origin';
comment on column origin_visit.date is 'Visit timestamp';
comment on column origin_visit.type is 'Type of loader that did the visit (hg, git, ...)';
comment on column origin_visit.status is 'Visit result';
comment on column origin_visit.metadata is 'Origin metadata at visit time';
comment on column origin_visit.snapshot is 'Origin snapshot at visit time';
-- A snapshot represents the entire state of a software origin as crawled by
-- Software Heritage. This table is a simple mapping between (public) intrinsic
-- snapshot identifiers and (private) numeric sequential identifiers.
create table snapshot
(
object_id bigserial not null, -- PK internal object identifier
id sha1_git not null -- snapshot intrinsic identifier
);
comment on table snapshot is 'State of a software origin as crawled by Software Heritage';
comment on column snapshot.object_id is 'Internal object identifier';
comment on column snapshot.id is 'Intrinsic snapshot identifier';
-- Each snapshot associate "branch" names to other objects in the Software
-- Heritage Merkle DAG. This table describes branches as mappings between names
-- and target typed objects.
create table snapshot_branch
(
object_id bigserial not null, -- PK internal object identifier
name bytea not null, -- branch name, e.g., "master" or "feature/drag-n-drop"
target bytea, -- target object identifier, e.g., a revision identifier
target_type snapshot_target -- target object type, e.g., "revision"
);
comment on table snapshot_branch is 'Associates branches with objects in Heritage Merkle DAG';
comment on column snapshot_branch.object_id is 'Internal object identifier';
comment on column snapshot_branch.name is 'Branch name';
comment on column snapshot_branch.target is 'Target object identifier';
comment on column snapshot_branch.target_type is 'Target object type';
-- Mapping between snapshots and their branches.
create table snapshot_branches
(
snapshot_id bigint not null, -- snapshot identifier, ref. snapshot.object_id
branch_id bigint not null -- branch identifier, ref. snapshot_branch.object_id
);
comment on table snapshot_branches is 'Mapping between snapshot and their branches';
comment on column snapshot_branches.snapshot_id is 'Snapshot identifier';
comment on column snapshot_branches.branch_id is 'Branch identifier';
-- A "memorable" point in time in the development history of a software
-- project.
--
-- Synonyms/mappings:
-- * git: tag (of the annotated kind, otherwise they are just references)
-- * tarball: the release version number
create table release
(
id sha1_git not null,
target sha1_git,
date timestamptz,
date_offset smallint,
name bytea,
comment bytea,
author bigint,
synthetic boolean not null default false, -- true iff release has been created by Software Heritage
object_id bigserial,
target_type object_type not null,
date_neg_utc_offset boolean
);
comment on table release is 'Details of a software release, synonymous with
a tag (git) or version number (tarball)';
comment on column release.id is 'Release git identifier';
comment on column release.target is 'Target git identifier';
comment on column release.date is 'Release timestamp';
comment on column release.date_offset is 'Timestamp offset from UTC';
comment on column release.name is 'Name';
comment on column release.comment is 'Comment';
comment on column release.author is 'Author';
comment on column release.synthetic is 'Indicates if created by Software Heritage';
comment on column release.object_id is 'Object identifier';
comment on column release.target_type is 'Object type (''content'', ''directory'', ''revision'',
''release'', ''snapshot'')';
comment on column release.date_neg_utc_offset is 'True indicates -0 UTC offset for release timestamp';
-- Tools
create table tool
(
id serial not null,
name text not null,
version text not null,
configuration jsonb
);
comment on table tool is 'Tool information';
comment on column tool.id is 'Tool identifier';
comment on column tool.version is 'Tool name';
comment on column tool.version is 'Tool version';
comment on column tool.configuration is 'Tool configuration: command line, flags, etc...';
create table metadata_provider
(
id serial not null,
provider_name text not null,
provider_type text not null,
provider_url text,
metadata jsonb
);
comment on table metadata_provider is 'Metadata provider information';
comment on column metadata_provider.id is 'Provider''s identifier';
comment on column metadata_provider.provider_name is 'Provider''s name';
comment on column metadata_provider.provider_url is 'Provider''s url';
comment on column metadata_provider.metadata is 'Other metadata about provider';
-- Discovery of metadata during a listing, loading, deposit or external_catalog of an origin
-- also provides a translation to a defined json schema using a translation tool (tool_id)
create table origin_metadata
(
id bigserial not null, -- PK internal object identifier
origin_id bigint not null, -- references origin(id)
discovery_date timestamptz not null, -- when it was extracted
provider_id bigint not null, -- ex: 'hal', 'lister-github', 'loader-github'
tool_id bigint not null,
metadata jsonb not null
);
comment on table origin_metadata is 'keeps all metadata found concerning an origin';
comment on column origin_metadata.id is 'the origin_metadata object''s id';
comment on column origin_metadata.origin_id is 'the origin id for which the metadata was found';
comment on column origin_metadata.discovery_date is 'the date of retrieval';
comment on column origin_metadata.provider_id is 'the metadata provider: github, openhub, deposit, etc.';
comment on column origin_metadata.tool_id is 'the tool used for extracting metadata: lister-github, etc.';
comment on column origin_metadata.metadata is 'metadata in json format but with original terms';
-- Keep a cache of object counts
create table object_counts
(
object_type text, -- table for which we're counting objects (PK)
value bigint, -- count of objects in the table
last_update timestamptz, -- last update for the object count in this table
single_update boolean -- whether we update this table standalone (true) or through bucketed counts (false)
);
comment on table object_counts is 'Cache of object counts';
comment on column object_counts.object_type is 'Object type (''content'', ''directory'', ''revision'',
''release'', ''snapshot'')';
comment on column object_counts.value is 'Count of objects in the table';
comment on column object_counts.last_update is 'Last update for object count';
comment on column object_counts.single_update is 'standalone (true) or bucketed counts (false)';
create table object_counts_bucketed
(
line serial not null, -- PK
object_type text not null, -- table for which we're counting objects
identifier text not null, -- identifier across which we're bucketing objects
bucket_start bytea, -- lower bound (inclusive) for the bucket
bucket_end bytea, -- upper bound (exclusive) for the bucket
value bigint, -- count of objects in the bucket
last_update timestamptz -- last update for the object count in this bucket
);
comment on table object_counts_bucketed is 'Bucketed count for objects ordered by type';
comment on column object_counts_bucketed.line is 'Auto incremented idenitfier value';
comment on column object_counts_bucketed.object_type is 'Object type (''content'', ''directory'', ''revision'',
''release'', ''snapshot'')';
comment on column object_counts_bucketed.identifier is 'Common identifier for bucketed objects';
comment on column object_counts_bucketed.bucket_start is 'Lower bound (inclusive) for the bucket';
comment on column object_counts_bucketed.bucket_end is 'Upper bound (exclusive) for the bucket';
comment on column object_counts_bucketed.value is 'Count of objects in the bucket';
comment on column object_counts_bucketed.last_update is 'Last update for the object count in this bucket';
diff --git a/swh/storage/sql/60-swh-indexes.sql b/swh/storage/sql/60-swh-indexes.sql
index 7ae186ce..97681635 100644
--- a/swh/storage/sql/60-swh-indexes.sql
+++ b/swh/storage/sql/60-swh-indexes.sql
@@ -1,186 +1,178 @@
-- content
create unique index concurrently content_pkey on content(sha1);
create unique index concurrently on content(sha1_git);
create index concurrently on content(sha256);
create index concurrently on content(blake2s256);
create index concurrently on content(ctime); -- TODO use a BRIN index here (postgres >= 9.5)
create unique index concurrently on content(object_id);
alter table content add primary key using index content_pkey;
-- origin
create unique index concurrently origin_pkey on origin(id);
alter table origin add primary key using index origin_pkey;
create index concurrently on origin using gin (url gin_trgm_ops);
create index concurrently on origin using hash (url);
-- skipped_content
alter table skipped_content add constraint skipped_content_sha1_sha1_git_sha256_key unique (sha1, sha1_git, sha256);
create index concurrently on skipped_content(sha1);
create index concurrently on skipped_content(sha1_git);
create index concurrently on skipped_content(sha256);
create index concurrently on skipped_content(blake2s256);
create unique index concurrently on skipped_content(object_id);
alter table skipped_content add constraint skipped_content_origin_fkey foreign key (origin) references origin(id) not valid;
alter table skipped_content validate constraint skipped_content_origin_fkey;
--- fetch_history
-
-create unique index concurrently fetch_history_pkey on fetch_history(id);
-alter table fetch_history add primary key using index fetch_history_pkey;
-
-alter table fetch_history add constraint fetch_history_origin_fkey foreign key (origin) references origin(id) not valid;
-alter table fetch_history validate constraint fetch_history_origin_fkey;
-
-- directory
create unique index concurrently directory_pkey on directory(id);
alter table directory add primary key using index directory_pkey;
create index concurrently on directory using gin (dir_entries);
create index concurrently on directory using gin (file_entries);
create index concurrently on directory using gin (rev_entries);
create unique index concurrently on directory(object_id);
-- directory_entry_dir
create unique index concurrently directory_entry_dir_pkey on directory_entry_dir(id);
alter table directory_entry_dir add primary key using index directory_entry_dir_pkey;
create unique index concurrently on directory_entry_dir(target, name, perms);
-- directory_entry_file
create unique index concurrently directory_entry_file_pkey on directory_entry_file(id);
alter table directory_entry_file add primary key using index directory_entry_file_pkey;
create unique index concurrently on directory_entry_file(target, name, perms);
-- directory_entry_rev
create unique index concurrently directory_entry_rev_pkey on directory_entry_rev(id);
alter table directory_entry_rev add primary key using index directory_entry_rev_pkey;
create unique index concurrently on directory_entry_rev(target, name, perms);
-- person
create unique index concurrently person_pkey on person(id);
alter table person add primary key using index person_pkey;
create unique index concurrently on person(fullname);
create index concurrently on person(name);
create index concurrently on person(email);
-- revision
create unique index concurrently revision_pkey on revision(id);
alter table revision add primary key using index revision_pkey;
alter table revision add constraint revision_author_fkey foreign key (author) references person(id) not valid;
alter table revision validate constraint revision_author_fkey;
alter table revision add constraint revision_committer_fkey foreign key (committer) references person(id) not valid;
alter table revision validate constraint revision_committer_fkey;
create index concurrently on revision(directory);
create unique index concurrently on revision(object_id);
-- revision_history
create unique index concurrently revision_history_pkey on revision_history(id, parent_rank);
alter table revision_history add primary key using index revision_history_pkey;
create index concurrently on revision_history(parent_id);
alter table revision_history add constraint revision_history_id_fkey foreign key (id) references revision(id) not valid;
alter table revision_history validate constraint revision_history_id_fkey;
-- snapshot
create unique index concurrently snapshot_pkey on snapshot(object_id);
alter table snapshot add primary key using index snapshot_pkey;
create unique index concurrently on snapshot(id);
-- snapshot_branch
create unique index concurrently snapshot_branch_pkey on snapshot_branch(object_id);
alter table snapshot_branch add primary key using index snapshot_branch_pkey;
create unique index concurrently on snapshot_branch (target_type, target, name);
alter table snapshot_branch add constraint snapshot_branch_target_check check ((target_type is null) = (target is null)) not valid;
alter table snapshot_branch validate constraint snapshot_branch_target_check;
alter table snapshot_branch add constraint snapshot_target_check check (target_type not in ('content', 'directory', 'revision', 'release', 'snapshot') or length(target) = 20) not valid;
alter table snapshot_branch validate constraint snapshot_target_check;
create unique index concurrently on snapshot_branch (name) where target_type is null and target is null;
-- snapshot_branches
create unique index concurrently snapshot_branches_pkey on snapshot_branches(snapshot_id, branch_id);
alter table snapshot_branches add primary key using index snapshot_branches_pkey;
alter table snapshot_branches add constraint snapshot_branches_snapshot_id_fkey foreign key (snapshot_id) references snapshot(object_id) not valid;
alter table snapshot_branches validate constraint snapshot_branches_snapshot_id_fkey;
alter table snapshot_branches add constraint snapshot_branches_branch_id_fkey foreign key (branch_id) references snapshot_branch(object_id) not valid;
alter table snapshot_branches validate constraint snapshot_branches_branch_id_fkey;
-- origin_visit
create unique index concurrently origin_visit_pkey on origin_visit(origin, visit);
alter table origin_visit add primary key using index origin_visit_pkey;
create index concurrently on origin_visit(date);
alter table origin_visit add constraint origin_visit_origin_fkey foreign key (origin) references origin(id) not valid;
alter table origin_visit validate constraint origin_visit_origin_fkey;
-- release
create unique index concurrently release_pkey on release(id);
alter table release add primary key using index release_pkey;
create index concurrently on release(target, target_type);
create unique index concurrently on release(object_id);
alter table release add constraint release_author_fkey foreign key (author) references person(id) not valid;
alter table release validate constraint release_author_fkey;
-- if the author is null, then the date must be null
alter table release add constraint release_author_date_check check ((date is null) or (author is not null)) not valid;
alter table release validate constraint release_author_date_check;
-- tool
create unique index tool_pkey on tool(id);
alter table tool add primary key using index tool_pkey;
create unique index on tool(name, version, configuration);
-- metadata_provider
create unique index concurrently metadata_provider_pkey on metadata_provider(id);
alter table metadata_provider add primary key using index metadata_provider_pkey;
create index concurrently on metadata_provider(provider_name, provider_url);
-- origin_metadata
create unique index concurrently origin_metadata_pkey on origin_metadata(id);
alter table origin_metadata add primary key using index origin_metadata_pkey;
create index concurrently on origin_metadata(origin_id, provider_id, tool_id);
alter table origin_metadata add constraint origin_metadata_origin_fkey foreign key (origin_id) references origin(id) not valid;
alter table origin_metadata validate constraint origin_metadata_origin_fkey;
alter table origin_metadata add constraint origin_metadata_provider_fkey foreign key (provider_id) references metadata_provider(id) not valid;
alter table origin_metadata validate constraint origin_metadata_provider_fkey;
alter table origin_metadata add constraint origin_metadata_tool_fkey foreign key (tool_id) references tool(id) not valid;
alter table origin_metadata validate constraint origin_metadata_tool_fkey;
-- object_counts
create unique index concurrently object_counts_pkey on object_counts(object_type);
alter table object_counts add primary key using index object_counts_pkey;
-- object_counts_bucketed
create unique index concurrently object_counts_bucketed_pkey on object_counts_bucketed(line);
alter table object_counts_bucketed add primary key using index object_counts_bucketed_pkey;
diff --git a/swh/storage/storage.py b/swh/storage/storage.py
index efc15de2..daa17a7f 100644
--- a/swh/storage/storage.py
+++ b/swh/storage/storage.py
@@ -1,1948 +1,1907 @@
# Copyright (C) 2015-2019 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
from collections import defaultdict
import copy
from concurrent.futures import ThreadPoolExecutor
from contextlib import contextmanager
import datetime
import itertools
import json
import dateutil.parser
import psycopg2
import psycopg2.pool
from . import converters
from .common import db_transaction_generator, db_transaction
from .db import Db
from .exc import StorageDBError
from .algos import diff
from swh.model.hashutil import ALGORITHMS, hash_to_bytes
from swh.objstorage import get_objstorage
from swh.objstorage.exc import ObjNotFoundError
try:
from swh.journal.writer import get_journal_writer
except ImportError:
get_journal_writer = None # type: ignore
# mypy limitation, see https://github.com/python/mypy/issues/1153
# Max block size of contents to return
BULK_BLOCK_CONTENT_LEN_MAX = 10000
EMPTY_SNAPSHOT_ID = hash_to_bytes('1a8893e6a86f444e8be8e7bda6cb34fb1735a00e')
"""Identifier for the empty snapshot"""
class Storage():
"""SWH storage proxy, encompassing DB and object storage
"""
def __init__(self, db, objstorage, min_pool_conns=1, max_pool_conns=10,
journal_writer=None):
"""
Args:
db_conn: either a libpq connection string, or a psycopg2 connection
obj_root: path to the root of the object storage
"""
try:
if isinstance(db, psycopg2.extensions.connection):
self._pool = None
self._db = Db(db)
else:
self._pool = psycopg2.pool.ThreadedConnectionPool(
min_pool_conns, max_pool_conns, db
)
self._db = None
except psycopg2.OperationalError as e:
raise StorageDBError(e)
self.objstorage = get_objstorage(**objstorage)
if journal_writer:
if get_journal_writer is None:
raise EnvironmentError(
'You need the swh.journal package to use the '
'journal_writer feature')
self.journal_writer = get_journal_writer(**journal_writer)
else:
self.journal_writer = None
def get_db(self):
if self._db:
return self._db
else:
return Db.from_pool(self._pool)
def put_db(self, db):
if db is not self._db:
db.put_conn()
@contextmanager
def db(self):
db = None
try:
db = self.get_db()
yield db
finally:
if db:
self.put_db(db)
@db_transaction()
def check_config(self, *, check_write, db, cur):
"""Check that the storage is configured and ready to go."""
if not self.objstorage.check_config(check_write=check_write):
return False
# Check permissions on one of the tables
if check_write:
check = 'INSERT'
else:
check = 'SELECT'
cur.execute(
"select has_table_privilege(current_user, 'content', %s)",
(check,)
)
return cur.fetchone()[0]
def _content_unique_key(self, hash, db):
"""Given a hash (tuple or dict), return a unique key from the
aggregation of keys.
"""
keys = db.content_hash_keys
if isinstance(hash, tuple):
return hash
return tuple([hash[k] for k in keys])
@staticmethod
def _normalize_content(d):
d = d.copy()
if 'status' not in d:
d['status'] = 'visible'
if 'length' not in d:
d['length'] = -1
return d
@staticmethod
def _validate_content(d):
"""Sanity checks on status / reason / length, that postgresql
doesn't enforce."""
if d['status'] not in ('visible', 'absent', 'hidden'):
raise ValueError('Invalid content status: {}'.format(d['status']))
if d['status'] != 'absent' and d.get('reason') is not None:
raise ValueError(
'Must not provide a reason if content is not absent.')
if d['length'] < -1:
raise ValueError('Content length must be positive or -1.')
def _filter_new_content(self, content, db, cur):
"""Sort contents into buckets 'with data' and 'without data',
and filter out those already in the database."""
content_by_status = defaultdict(list)
for d in content:
content_by_status[d['status']].append(d)
content_with_data = content_by_status['visible'] \
+ content_by_status['hidden']
content_without_data = content_by_status['absent']
missing_content = set(self.content_missing(content_with_data,
db=db, cur=cur))
missing_skipped = set(self._content_unique_key(hashes, db)
for hashes in self.skipped_content_missing(
content_without_data, db=db, cur=cur))
content_with_data = [
cont for cont in content_with_data
if cont['sha1'] in missing_content]
content_without_data = [
cont for cont in content_without_data
if self._content_unique_key(cont, db) in missing_skipped]
summary = {
'content:add': len(missing_content),
'skipped_content:add': len(missing_skipped),
}
return (content_with_data, content_without_data, summary)
def _content_add_metadata(self, db, cur,
content_with_data, content_without_data):
"""Add content to the postgresql database but not the object storage.
"""
if content_with_data:
# create temporary table for metadata injection
db.mktemp('content', cur)
db.copy_to(content_with_data, 'tmp_content',
db.content_add_keys, cur)
# move metadata in place
try:
db.content_add_from_temp(cur)
except psycopg2.IntegrityError as e:
from . import HashCollision
if e.diag.sqlstate == '23505' and \
e.diag.table_name == 'content':
constraint_to_hash_name = {
'content_pkey': 'sha1',
'content_sha1_git_idx': 'sha1_git',
'content_sha256_idx': 'sha256',
}
colliding_hash_name = constraint_to_hash_name \
.get(e.diag.constraint_name)
raise HashCollision(colliding_hash_name) from None
else:
raise
if content_without_data:
content_without_data = \
[cont.copy() for cont in content_without_data]
origins = db.origin_get_by_url(
[cont.get('origin') for cont in content_without_data],
cur=cur)
for (cont, origin) in zip(content_without_data, origins):
origin = dict(zip(db.origin_cols, origin))
if 'origin' in cont:
cont['origin'] = origin['id']
db.mktemp('skipped_content', cur)
db.copy_to(content_without_data, 'tmp_skipped_content',
db.skipped_content_keys, cur)
# move metadata in place
db.skipped_content_add_from_temp(cur)
@db_transaction()
def content_add(self, content, db, cur):
"""Add content blobs to the storage
Note: in case of DB errors, objects might have already been added to
the object storage and will not be removed. Since addition to the
object storage is idempotent, that should not be a problem.
Args:
contents (iterable): iterable of dictionaries representing
individual pieces of content to add. Each dictionary has the
following keys:
- data (bytes): the actual content
- length (int): content length (default: -1)
- one key for each checksum algorithm in
:data:`swh.model.hashutil.ALGORITHMS`, mapped to the
corresponding checksum
- status (str): one of visible, hidden, absent
- reason (str): if status = absent, the reason why
- origin (int): if status = absent, the origin we saw the
content in
Raises:
In case of errors, nothing is stored in the db (in the
objstorage, it could though). The following exceptions can
occur:
- HashCollision in case of collision
- Any other exceptions raise by the db
Returns:
Summary dict with the following key and associated values:
content:add: New contents added
content:add:bytes: Sum of the contents' length data
skipped_content:add: New skipped contents (no data) added
"""
content = [dict(c.items()) for c in content] # semi-shallow copy
now = datetime.datetime.now(tz=datetime.timezone.utc)
for item in content:
item['ctime'] = now
content = [self._normalize_content(c) for c in content]
for c in content:
self._validate_content(c)
(content_with_data, content_without_data, summary) = \
self._filter_new_content(content, db, cur)
if self.journal_writer:
for item in content_with_data:
if 'data' in item:
item = item.copy()
del item['data']
self.journal_writer.write_addition('content', item)
for item in content_without_data:
self.journal_writer.write_addition('content', item)
def add_to_objstorage():
"""Add to objstorage the new missing_content
Returns:
Sum of all the content's data length pushed to the
objstorage. Content present twice is only sent once.
"""
content_bytes_added = 0
data = {}
for cont in content_with_data:
if cont['sha1'] not in data:
data[cont['sha1']] = cont['data']
content_bytes_added += max(0, cont['length'])
# FIXME: Since we do the filtering anyway now, we might as
# well make the objstorage's add_batch call return what we
# want here (real bytes added)... that'd simplify this...
self.objstorage.add_batch(data)
return content_bytes_added
with ThreadPoolExecutor(max_workers=1) as executor:
added_to_objstorage = executor.submit(add_to_objstorage)
self._content_add_metadata(
db, cur, content_with_data, content_without_data)
# Wait for objstorage addition before returning from the
# transaction, bubbling up any exception
content_bytes_added = added_to_objstorage.result()
summary['content:add:bytes'] = content_bytes_added
return summary
@db_transaction()
def content_update(self, content, keys=[], db=None, cur=None):
"""Update content blobs to the storage. Does nothing for unknown
contents or skipped ones.
Args:
content (iterable): iterable of dictionaries representing
individual pieces of content to update. Each dictionary has the
following keys:
- data (bytes): the actual content
- length (int): content length (default: -1)
- one key for each checksum algorithm in
:data:`swh.model.hashutil.ALGORITHMS`, mapped to the
corresponding checksum
- status (str): one of visible, hidden, absent
keys (list): List of keys (str) whose values needs an update, e.g.,
new hash column
"""
# TODO: Add a check on input keys. How to properly implement
# this? We don't know yet the new columns.
if self.journal_writer:
raise NotImplementedError(
'content_update is not yet support with a journal_writer.')
db.mktemp('content', cur)
select_keys = list(set(db.content_get_metadata_keys).union(set(keys)))
db.copy_to(content, 'tmp_content', select_keys, cur)
db.content_update_from_temp(keys_to_update=keys,
cur=cur)
@db_transaction()
def content_add_metadata(self, content, db, cur):
"""Add content metadata to the storage (like `content_add`, but
without inserting to the objstorage).
Args:
content (iterable): iterable of dictionaries representing
individual pieces of content to add. Each dictionary has the
following keys:
- length (int): content length (default: -1)
- one key for each checksum algorithm in
:data:`swh.model.hashutil.ALGORITHMS`, mapped to the
corresponding checksum
- status (str): one of visible, hidden, absent
- reason (str): if status = absent, the reason why
- origin (int): if status = absent, the origin we saw the
content in
- ctime (datetime): time of insertion in the archive
Returns:
Summary dict with the following key and associated values:
content:add: New contents added
skipped_content:add: New skipped contents (no data) added
"""
content = [self._normalize_content(c) for c in content]
for c in content:
self._validate_content(c)
(content_with_data, content_without_data, summary) = \
self._filter_new_content(content, db, cur)
if self.journal_writer:
for item in itertools.chain(content_with_data,
content_without_data):
assert 'data' not in content
self.journal_writer.write_addition('content', item)
self._content_add_metadata(
db, cur, content_with_data, content_without_data)
return summary
def content_get(self, content):
"""Retrieve in bulk contents and their data.
This generator yields exactly as many items than provided sha1
identifiers, but callers should not assume this will always be true.
It may also yield `None` values in case an object was not found.
Args:
content: iterables of sha1
Yields:
Dict[str, bytes]: Generates streams of contents as dict with their
raw data:
- sha1 (bytes): content id
- data (bytes): content's raw data
Raises:
ValueError in case of too much contents are required.
cf. BULK_BLOCK_CONTENT_LEN_MAX
"""
# FIXME: Make this method support slicing the `data`.
if len(content) > BULK_BLOCK_CONTENT_LEN_MAX:
raise ValueError(
"Send at maximum %s contents." % BULK_BLOCK_CONTENT_LEN_MAX)
for obj_id in content:
try:
data = self.objstorage.get(obj_id)
except ObjNotFoundError:
yield None
continue
yield {'sha1': obj_id, 'data': data}
@db_transaction()
def content_get_range(self, start, end, limit=1000, db=None, cur=None):
"""Retrieve contents within range [start, end] bound by limit.
Note that this function may return more than one blob per hash. The
limit is enforced with multiplicity (ie. two blobs with the same hash
will count twice toward the limit).
Args:
**start** (bytes): Starting identifier range (expected smaller
than end)
**end** (bytes): Ending identifier range (expected larger
than start)
**limit** (int): Limit result (default to 1000)
Returns:
a dict with keys:
- contents [dict]: iterable of contents in between the range.
- next (bytes): There remains content in the range
starting from this next sha1
"""
if limit is None:
raise ValueError('Development error: limit should not be None')
contents = []
next_content = None
for counter, content_row in enumerate(
db.content_get_range(start, end, limit+1, cur)):
content = dict(zip(db.content_get_metadata_keys, content_row))
if counter >= limit:
# take the last commit for the next page starting from this
next_content = content['sha1']
break
contents.append(content)
return {
'contents': contents,
'next': next_content,
}
@db_transaction_generator(statement_timeout=500)
def content_get_metadata(self, content, db=None, cur=None):
"""Retrieve content metadata in bulk
Args:
content: iterable of content identifiers (sha1)
Returns:
an iterable with content metadata corresponding to the given ids
"""
for metadata in db.content_get_metadata_from_sha1s(content, cur):
yield dict(zip(db.content_get_metadata_keys, metadata))
@db_transaction_generator()
def content_missing(self, content, key_hash='sha1', db=None, cur=None):
"""List content missing from storage
Args:
content ([dict]): iterable of dictionaries whose keys are
either 'length' or an item of
:data:`swh.model.hashutil.ALGORITHMS`;
mapped to the corresponding checksum
(or length).
key_hash (str): name of the column to use as hash id
result (default: 'sha1')
Returns:
iterable ([bytes]): missing content ids (as per the
key_hash column)
Raises:
TODO: an exception when we get a hash collision.
"""
keys = db.content_hash_keys
if key_hash not in keys:
raise ValueError("key_hash should be one of %s" % keys)
key_hash_idx = keys.index(key_hash)
if not content:
return
for obj in db.content_missing_from_list(content, cur):
yield obj[key_hash_idx]
@db_transaction_generator()
def content_missing_per_sha1(self, contents, db=None, cur=None):
"""List content missing from storage based only on sha1.
Args:
contents: Iterable of sha1 to check for absence.
Returns:
iterable: missing ids
Raises:
TODO: an exception when we get a hash collision.
"""
for obj in db.content_missing_per_sha1(contents, cur):
yield obj[0]
@db_transaction_generator()
def skipped_content_missing(self, contents, db=None, cur=None):
"""List skipped_content missing from storage
Args:
content: iterable of dictionaries containing the data for each
checksum algorithm.
Returns:
iterable: missing signatures
"""
for content in db.skipped_content_missing(contents, cur):
yield dict(zip(db.content_hash_keys, content))
@db_transaction()
def content_find(self, content, db=None, cur=None):
"""Find a content hash in db.
Args:
content: a dictionary representing one content hash, mapping
checksum algorithm names (see swh.model.hashutil.ALGORITHMS) to
checksum values
Returns:
a triplet (sha1, sha1_git, sha256) if the content exist
or None otherwise.
Raises:
ValueError: in case the key of the dictionary is not sha1, sha1_git
nor sha256.
"""
if not set(content).intersection(ALGORITHMS):
raise ValueError('content keys must contain at least one of: '
'sha1, sha1_git, sha256, blake2s256')
contents = db.content_find(sha1=content.get('sha1'),
sha1_git=content.get('sha1_git'),
sha256=content.get('sha256'),
blake2s256=content.get('blake2s256'),
cur=cur)
return [dict(zip(db.content_find_cols, content))
for content in contents]
@db_transaction()
def directory_add(self, directories, db, cur):
"""Add directories to the storage
Args:
directories (iterable): iterable of dictionaries representing the
individual directories to add. Each dict has the following
keys:
- id (sha1_git): the id of the directory to add
- entries (list): list of dicts for each entry in the
directory. Each dict has the following keys:
- name (bytes)
- type (one of 'file', 'dir', 'rev'): type of the
directory entry (file, directory, revision)
- target (sha1_git): id of the object pointed at by the
directory entry
- perms (int): entry permissions
Returns:
Summary dict of keys with associated count as values:
directory:add: Number of directories actually added
"""
summary = {'directory:add': 0}
dirs = set()
dir_entries = {
'file': defaultdict(list),
'dir': defaultdict(list),
'rev': defaultdict(list),
}
for cur_dir in directories:
dir_id = cur_dir['id']
dirs.add(dir_id)
for src_entry in cur_dir['entries']:
entry = src_entry.copy()
entry['dir_id'] = dir_id
if entry['type'] not in ('file', 'dir', 'rev'):
raise ValueError(
'Entry type must be file, dir, or rev; not %s'
% entry['type'])
dir_entries[entry['type']][dir_id].append(entry)
dirs_missing = set(self.directory_missing(dirs, db=db, cur=cur))
if not dirs_missing:
return summary
if self.journal_writer:
self.journal_writer.write_additions(
'directory',
(dir_ for dir_ in directories
if dir_['id'] in dirs_missing))
# Copy directory ids
dirs_missing_dict = ({'id': dir} for dir in dirs_missing)
db.mktemp('directory', cur)
db.copy_to(dirs_missing_dict, 'tmp_directory', ['id'], cur)
# Copy entries
for entry_type, entry_list in dir_entries.items():
entries = itertools.chain.from_iterable(
entries_for_dir
for dir_id, entries_for_dir
in entry_list.items()
if dir_id in dirs_missing)
db.mktemp_dir_entry(entry_type)
db.copy_to(
entries,
'tmp_directory_entry_%s' % entry_type,
['target', 'name', 'perms', 'dir_id'],
cur,
)
# Do the final copy
db.directory_add_from_temp(cur)
summary['directory:add'] = len(dirs_missing)
return summary
@db_transaction_generator()
def directory_missing(self, directories, db=None, cur=None):
"""List directories missing from storage
Args:
directories (iterable): an iterable of directory ids
Yields:
missing directory ids
"""
for obj in db.directory_missing_from_list(directories, cur):
yield obj[0]
@db_transaction_generator(statement_timeout=20000)
def directory_ls(self, directory, recursive=False, db=None, cur=None):
"""Get entries for one directory.
Args:
- directory: the directory to list entries from.
- recursive: if flag on, this list recursively from this directory.
Returns:
List of entries for such directory.
If `recursive=True`, names in the path of a dir/file not at the
root are concatenated with a slash (`/`).
"""
if recursive:
res_gen = db.directory_walk(directory, cur=cur)
else:
res_gen = db.directory_walk_one(directory, cur=cur)
for line in res_gen:
yield dict(zip(db.directory_ls_cols, line))
@db_transaction(statement_timeout=2000)
def directory_entry_get_by_path(self, directory, paths, db=None, cur=None):
"""Get the directory entry (either file or dir) from directory with path.
Args:
- directory: sha1 of the top level directory
- paths: path to lookup from the top level directory. From left
(top) to right (bottom).
Returns:
The corresponding directory entry if found, None otherwise.
"""
res = db.directory_entry_get_by_path(directory, paths, cur)
if res:
return dict(zip(db.directory_ls_cols, res))
@db_transaction()
def revision_add(self, revisions, db, cur):
"""Add revisions to the storage
Args:
revisions (Iterable[dict]): iterable of dictionaries representing
the individual revisions to add. Each dict has the following
keys:
- **id** (:class:`sha1_git`): id of the revision to add
- **date** (:class:`dict`): date the revision was written
- **committer_date** (:class:`dict`): date the revision got
added to the origin
- **type** (one of 'git', 'tar'): type of the
revision added
- **directory** (:class:`sha1_git`): the directory the
revision points at
- **message** (:class:`bytes`): the message associated with
the revision
- **author** (:class:`Dict[str, bytes]`): dictionary with
keys: name, fullname, email
- **committer** (:class:`Dict[str, bytes]`): dictionary with
keys: name, fullname, email
- **metadata** (:class:`jsonb`): extra information as
dictionary
- **synthetic** (:class:`bool`): revision's nature (tarball,
directory creates synthetic revision`)
- **parents** (:class:`list[sha1_git]`): the parents of
this revision
date dictionaries have the form defined in :mod:`swh.model`.
Returns:
Summary dict of keys with associated count as values
revision:add: New objects actually stored in db
"""
summary = {'revision:add': 0}
revisions_missing = set(self.revision_missing(
set(revision['id'] for revision in revisions),
db=db, cur=cur))
if not revisions_missing:
return summary
db.mktemp_revision(cur)
revisions_filtered = [
revision for revision in revisions
if revision['id'] in revisions_missing]
if self.journal_writer:
self.journal_writer.write_additions('revision', revisions_filtered)
revisions_filtered = map(converters.revision_to_db, revisions_filtered)
parents_filtered = []
db.copy_to(
revisions_filtered, 'tmp_revision', db.revision_add_cols,
cur,
lambda rev: parents_filtered.extend(rev['parents']))
db.revision_add_from_temp(cur)
db.copy_to(parents_filtered, 'revision_history',
['id', 'parent_id', 'parent_rank'], cur)
return {'revision:add': len(revisions_missing)}
@db_transaction_generator()
def revision_missing(self, revisions, db=None, cur=None):
"""List revisions missing from storage
Args:
revisions (iterable): revision ids
Yields:
missing revision ids
"""
if not revisions:
return
for obj in db.revision_missing_from_list(revisions, cur):
yield obj[0]
@db_transaction_generator(statement_timeout=1000)
def revision_get(self, revisions, db=None, cur=None):
"""Get all revisions from storage
Args:
revisions: an iterable of revision ids
Returns:
iterable: an iterable of revisions as dictionaries (or None if the
revision doesn't exist)
"""
for line in db.revision_get_from_list(revisions, cur):
data = converters.db_to_revision(
dict(zip(db.revision_get_cols, line))
)
if not data['type']:
yield None
continue
yield data
@db_transaction_generator(statement_timeout=2000)
def revision_log(self, revisions, limit=None, db=None, cur=None):
"""Fetch revision entry from the given root revisions.
Args:
revisions: array of root revision to lookup
limit: limitation on the output result. Default to None.
Yields:
List of revision log from such revisions root.
"""
for line in db.revision_log(revisions, limit, cur):
data = converters.db_to_revision(
dict(zip(db.revision_get_cols, line))
)
if not data['type']:
yield None
continue
yield data
@db_transaction_generator(statement_timeout=2000)
def revision_shortlog(self, revisions, limit=None, db=None, cur=None):
"""Fetch the shortlog for the given revisions
Args:
revisions: list of root revisions to lookup
limit: depth limitation for the output
Yields:
a list of (id, parents) tuples.
"""
yield from db.revision_shortlog(revisions, limit, cur)
@db_transaction()
def release_add(self, releases, db, cur):
"""Add releases to the storage
Args:
releases (Iterable[dict]): iterable of dictionaries representing
the individual releases to add. Each dict has the following
keys:
- **id** (:class:`sha1_git`): id of the release to add
- **revision** (:class:`sha1_git`): id of the revision the
release points to
- **date** (:class:`dict`): the date the release was made
- **name** (:class:`bytes`): the name of the release
- **comment** (:class:`bytes`): the comment associated with
the release
- **author** (:class:`Dict[str, bytes]`): dictionary with
keys: name, fullname, email
the date dictionary has the form defined in :mod:`swh.model`.
Returns:
Summary dict of keys with associated count as values
release:add: New objects contents actually stored in db
"""
summary = {'release:add': 0}
release_ids = set(release['id'] for release in releases)
releases_missing = set(self.release_missing(release_ids,
db=db, cur=cur))
if not releases_missing:
return summary
db.mktemp_release(cur)
releases_missing = list(releases_missing)
releases_filtered = [
release for release in releases
if release['id'] in releases_missing
]
if self.journal_writer:
self.journal_writer.write_additions('release', releases_filtered)
releases_filtered = map(converters.release_to_db, releases_filtered)
db.copy_to(releases_filtered, 'tmp_release', db.release_add_cols,
cur)
db.release_add_from_temp(cur)
return {'release:add': len(releases_missing)}
@db_transaction_generator()
def release_missing(self, releases, db=None, cur=None):
"""List releases missing from storage
Args:
releases: an iterable of release ids
Returns:
a list of missing release ids
"""
if not releases:
return
for obj in db.release_missing_from_list(releases, cur):
yield obj[0]
@db_transaction_generator(statement_timeout=500)
def release_get(self, releases, db=None, cur=None):
"""Given a list of sha1, return the releases's information
Args:
releases: list of sha1s
Yields:
dicts with the same keys as those given to `release_add`
(or ``None`` if a release does not exist)
"""
for release in db.release_get_from_list(releases, cur):
data = converters.db_to_release(
dict(zip(db.release_get_cols, release))
)
yield data if data['target_type'] else None
@db_transaction()
def snapshot_add(self, snapshots, db=None, cur=None):
"""Add snapshots to the storage.
Args:
snapshot ([dict]): the snapshots to add, containing the
following keys:
- **id** (:class:`bytes`): id of the snapshot
- **branches** (:class:`dict`): branches the snapshot contains,
mapping the branch name (:class:`bytes`) to the branch target,
itself a :class:`dict` (or ``None`` if the branch points to an
unknown object)
- **target_type** (:class:`str`): one of ``content``,
``directory``, ``revision``, ``release``,
``snapshot``, ``alias``
- **target** (:class:`bytes`): identifier of the target
(currently a ``sha1_git`` for all object kinds, or the name
of the target branch for aliases)
Raises:
ValueError: if the origin or visit id does not exist.
Returns:
Summary dict of keys with associated count as values
snapshot:add: Count of object actually stored in db
"""
created_temp_table = False
count = 0
for snapshot in snapshots:
if not db.snapshot_exists(snapshot['id'], cur):
if not created_temp_table:
db.mktemp_snapshot_branch(cur)
created_temp_table = True
db.copy_to(
(
{
'name': name,
'target': info['target'] if info else None,
'target_type': (info['target_type']
if info else None),
}
for name, info in snapshot['branches'].items()
),
'tmp_snapshot_branch',
['name', 'target', 'target_type'],
cur,
)
if self.journal_writer:
self.journal_writer.write_addition('snapshot', snapshot)
db.snapshot_add(snapshot['id'], cur)
count += 1
return {'snapshot:add': count}
@db_transaction(statement_timeout=2000)
def snapshot_get(self, snapshot_id, db=None, cur=None):
"""Get the content, possibly partial, of a snapshot with the given id
The branches of the snapshot are iterated in the lexicographical
order of their names.
.. warning:: At most 1000 branches contained in the snapshot will be
returned for performance reasons. In order to browse the whole
set of branches, the method :meth:`snapshot_get_branches`
should be used instead.
Args:
snapshot_id (bytes): identifier of the snapshot
Returns:
dict: a dict with three keys:
* **id**: identifier of the snapshot
* **branches**: a dict of branches contained in the snapshot
whose keys are the branches' names.
* **next_branch**: the name of the first branch not returned
or :const:`None` if the snapshot has less than 1000
branches.
"""
return self.snapshot_get_branches(snapshot_id, db=db, cur=cur)
@db_transaction(statement_timeout=2000)
def snapshot_get_by_origin_visit(self, origin, visit, db=None, cur=None):
"""Get the content, possibly partial, of a snapshot for the given origin visit
The branches of the snapshot are iterated in the lexicographical
order of their names.
.. warning:: At most 1000 branches contained in the snapshot will be
returned for performance reasons. In order to browse the whole
set of branches, the method :meth:`snapshot_get_branches`
should be used instead.
Args:
origin (int): the origin identifier
visit (int): the visit identifier
Returns:
dict: None if the snapshot does not exist;
a dict with three keys otherwise:
* **id**: identifier of the snapshot
* **branches**: a dict of branches contained in the snapshot
whose keys are the branches' names.
* **next_branch**: the name of the first branch not returned
or :const:`None` if the snapshot has less than 1000
branches.
"""
snapshot_id = db.snapshot_get_by_origin_visit(origin, visit, cur)
if snapshot_id:
return self.snapshot_get(snapshot_id, db=db, cur=cur)
return None
@db_transaction(statement_timeout=4000)
def snapshot_get_latest(self, origin, allowed_statuses=None, db=None,
cur=None):
"""Get the content, possibly partial, of the latest snapshot for the
given origin, optionally only from visits that have one of the given
allowed_statuses
The branches of the snapshot are iterated in the lexicographical
order of their names.
.. warning:: At most 1000 branches contained in the snapshot will be
returned for performance reasons. In order to browse the whole
set of branches, the method :meth:`snapshot_get_branches`
should be used instead.
Args:
origin (Union[str,int]): the origin's URL or identifier
allowed_statuses (list of str): list of visit statuses considered
to find the latest snapshot for the visit. For instance,
``allowed_statuses=['full']`` will only consider visits that
have successfully run to completion.
Returns:
dict: a dict with three keys:
* **id**: identifier of the snapshot
* **branches**: a dict of branches contained in the snapshot
whose keys are the branches' names.
* **next_branch**: the name of the first branch not returned
or :const:`None` if the snapshot has less than 1000
branches.
"""
if isinstance(origin, int):
origin = self.origin_get({'id': origin}, db=db, cur=cur)
if not origin:
return
origin = origin['url']
origin_visit = self.origin_visit_get_latest(
origin, allowed_statuses=allowed_statuses, require_snapshot=True,
db=db, cur=cur)
if origin_visit and origin_visit['snapshot']:
snapshot = self.snapshot_get(
origin_visit['snapshot'], db=db, cur=cur)
if not snapshot:
raise ValueError(
'last origin visit references an unknown snapshot')
return snapshot
@db_transaction(statement_timeout=2000)
def snapshot_count_branches(self, snapshot_id, db=None, cur=None):
"""Count the number of branches in the snapshot with the given id
Args:
snapshot_id (bytes): identifier of the snapshot
Returns:
dict: A dict whose keys are the target types of branches and
values their corresponding amount
"""
return dict([bc for bc in
db.snapshot_count_branches(snapshot_id, cur)])
@db_transaction(statement_timeout=2000)
def snapshot_get_branches(self, snapshot_id, branches_from=b'',
branches_count=1000, target_types=None,
db=None, cur=None):
"""Get the content, possibly partial, of a snapshot with the given id
The branches of the snapshot are iterated in the lexicographical
order of their names.
Args:
snapshot_id (bytes): identifier of the snapshot
branches_from (bytes): optional parameter used to skip branches
whose name is lesser than it before returning them
branches_count (int): optional parameter used to restrain
the amount of returned branches
target_types (list): optional parameter used to filter the
target types of branch to return (possible values that can be
contained in that list are `'content', 'directory',
'revision', 'release', 'snapshot', 'alias'`)
Returns:
dict: None if the snapshot does not exist;
a dict with three keys otherwise:
* **id**: identifier of the snapshot
* **branches**: a dict of branches contained in the snapshot
whose keys are the branches' names.
* **next_branch**: the name of the first branch not returned
or :const:`None` if the snapshot has less than
`branches_count` branches after `branches_from` included.
"""
if snapshot_id == EMPTY_SNAPSHOT_ID:
return {
'id': snapshot_id,
'branches': {},
'next_branch': None,
}
branches = {}
next_branch = None
fetched_branches = list(db.snapshot_get_by_id(
snapshot_id, branches_from=branches_from,
branches_count=branches_count+1, target_types=target_types,
cur=cur,
))
for branch in fetched_branches[:branches_count]:
branch = dict(zip(db.snapshot_get_cols, branch))
del branch['snapshot_id']
name = branch.pop('name')
if branch == {'target': None, 'target_type': None}:
branch = None
branches[name] = branch
if len(fetched_branches) > branches_count:
branch = dict(zip(db.snapshot_get_cols, fetched_branches[-1]))
next_branch = branch['name']
if branches:
return {
'id': snapshot_id,
'branches': branches,
'next_branch': next_branch,
}
return None
@db_transaction()
def origin_visit_add(self, origin, date, type=None,
db=None, cur=None):
"""Add an origin_visit for the origin at ts with status 'ongoing'.
For backward compatibility, `type` is optional and defaults to
the origin's type.
Args:
origin (Union[int,str]): visited origin's identifier or URL
date (Union[str,datetime]): timestamp of such visit
type (str): the type of loader used for the visit (hg, git, ...)
Returns:
dict: dictionary with keys origin and visit where:
- origin: origin identifier
- visit: the visit identifier for the new visit occurrence
"""
if isinstance(origin, str):
origin = self.origin_get({'url': origin}, db=db, cur=cur)
origin_id = origin['id']
else:
origin = self.origin_get({'id': origin}, db=db, cur=cur)
origin_id = origin['id']
if isinstance(date, str):
# FIXME: Converge on iso8601 at some point
date = dateutil.parser.parse(date)
if type is None:
type = origin['type']
visit_id = db.origin_visit_add(origin_id, date, type, cur)
if self.journal_writer:
# We can write to the journal only after inserting to the
# DB, because we want the id of the visit
del origin['id']
self.journal_writer.write_addition('origin_visit', {
'origin': origin, 'date': date, 'type': type,
'visit': visit_id,
'status': 'ongoing', 'metadata': None, 'snapshot': None})
return {
'origin': origin_id,
'visit': visit_id,
}
@db_transaction()
def origin_visit_update(self, origin, visit_id, status=None,
metadata=None, snapshot=None,
db=None, cur=None):
"""Update an origin_visit's status.
Args:
origin (Union[int,str]): visited origin's identifier or URL
visit_id: Visit's id
status: Visit's new status
metadata: Data associated to the visit
snapshot (sha1_git): identifier of the snapshot to add to
the visit
Returns:
None
"""
if isinstance(origin, str):
origin_id = self.origin_get({'url': origin}, db=db, cur=cur)['id']
else:
origin_id = origin
visit = db.origin_visit_get(origin_id, visit_id, cur=cur)
if not visit:
raise ValueError('Invalid visit_id for this origin.')
visit = dict(zip(db.origin_visit_get_cols, visit))
updates = {}
if status and status != visit['status']:
updates['status'] = status
if metadata and metadata != visit['metadata']:
updates['metadata'] = metadata
if snapshot and snapshot != visit['snapshot']:
updates['snapshot'] = snapshot
if updates:
if self.journal_writer:
origin = self.origin_get(
[{'id': origin_id}], db=db, cur=cur)[0]
del origin['id']
self.journal_writer.write_update('origin_visit', {
**visit, **updates, 'origin': origin})
db.origin_visit_update(origin_id, visit_id, updates, cur)
@db_transaction()
def origin_visit_upsert(self, visits, db=None, cur=None):
"""Add a origin_visits with a specific id and with all its data.
If there is already an origin_visit with the same
`(origin_id, visit_id)`, overwrites it.
Args:
visits: iterable of dicts with keys:
origin: dict with keys either `id` or `url`
visit: origin visit id
date: timestamp of such visit
status: Visit's new status
metadata: Data associated to the visit
snapshot (sha1_git): identifier of the snapshot to add to
the visit
"""
visits = copy.deepcopy(visits)
for visit in visits:
if isinstance(visit['date'], str):
visit['date'] = dateutil.parser.parse(visit['date'])
visit['origin'] = \
self.origin_get([visit['origin']], db=db, cur=cur)[0]
if self.journal_writer:
for visit in visits:
visit = copy.deepcopy(visit)
if visit.get('type') is None:
visit['type'] = visit['origin']['type']
del visit['origin']['id']
self.journal_writer.write_addition('origin_visit', visit)
for visit in visits:
visit['origin'] = visit['origin']['id']
# TODO: upsert them all in a single query
db.origin_visit_upsert(**visit, cur=cur)
@db_transaction_generator(statement_timeout=500)
def origin_visit_get(self, origin, last_visit=None, limit=None, db=None,
cur=None):
"""Retrieve all the origin's visit's information.
Args:
origin (Union[int,str]): The occurrence's origin (identifier/URL).
last_visit: Starting point from which listing the next visits
Default to None
limit (int): Number of results to return from the last visit.
Default to None
Yields:
List of visits.
"""
if isinstance(origin, str):
origin = self.origin_get([{'url': origin}], db=db, cur=cur)[0]
if not origin:
return
origin = origin['id']
for line in db.origin_visit_get_all(
origin, last_visit=last_visit, limit=limit, cur=cur):
data = dict(zip(db.origin_visit_get_cols, line))
yield data
@db_transaction(statement_timeout=500)
def origin_visit_find_by_date(self, origin, visit_date, db=None, cur=None):
"""Retrieves the origin visit whose date is closest to the provided
timestamp.
In case of a tie, the visit with largest id is selected.
Args:
origin (str): The occurrence's origin (URL).
target (datetime): target timestamp
Returns:
A visit.
"""
origin = self.origin_get([{'url': origin}], db=db, cur=cur)[0]
if not origin:
return
origin = origin['id']
line = db.origin_visit_find_by_date(origin, visit_date, cur=cur)
if line:
return dict(zip(db.origin_visit_get_cols, line))
@db_transaction(statement_timeout=500)
def origin_visit_get_by(self, origin, visit, db=None, cur=None):
"""Retrieve origin visit's information.
Args:
origin: The occurrence's origin (identifier).
Returns:
The information on that particular (origin, visit) or None if
it does not exist
"""
if isinstance(origin, str):
origin = self.origin_get({'url': origin}, db=db, cur=cur)
if not origin:
return
origin = origin['id']
ori_visit = db.origin_visit_get(origin, visit, cur)
if not ori_visit:
return None
return dict(zip(db.origin_visit_get_cols, ori_visit))
@db_transaction(statement_timeout=4000)
def origin_visit_get_latest(
self, origin, allowed_statuses=None, require_snapshot=False,
db=None, cur=None):
"""Get the latest origin visit for the given origin, optionally
looking only for those with one of the given allowed_statuses
or for those with a known snapshot.
Args:
origin (str): the origin's URL
allowed_statuses (list of str): list of visit statuses considered
to find the latest visit. For instance,
``allowed_statuses=['full']`` will only consider visits that
have successfully run to completion.
require_snapshot (bool): If True, only a visit with a snapshot
will be returned.
Returns:
dict: a dict with the following keys:
origin: the URL of the origin
visit: origin visit id
type: type of loader used for the visit
date: timestamp of such visit
status: Visit's new status
metadata: Data associated to the visit
snapshot (Optional[sha1_git]): identifier of the snapshot
associated to the visit
"""
origin = self.origin_get({'url': origin}, db=db, cur=cur)
if not origin:
return
origin = origin['id']
origin_visit = db.origin_visit_get_latest(
origin, allowed_statuses=allowed_statuses,
require_snapshot=require_snapshot, cur=cur)
if origin_visit:
return dict(zip(db.origin_visit_get_cols, origin_visit))
@db_transaction(statement_timeout=2000)
def object_find_by_sha1_git(self, ids, db=None, cur=None):
"""Return the objects found with the given ids.
Args:
ids: a generator of sha1_gits
Returns:
dict: a mapping from id to the list of objects found. Each object
found is itself a dict with keys:
- sha1_git: the input id
- type: the type of object found
- id: the id of the object found
- object_id: the numeric id of the object found.
"""
ret = {id: [] for id in ids}
for retval in db.object_find_by_sha1_git(ids, cur=cur):
if retval[1]:
ret[retval[0]].append(dict(zip(db.object_find_by_sha1_git_cols,
retval)))
return ret
origin_keys = ['id', 'type', 'url']
@db_transaction(statement_timeout=500)
def origin_get(self, origins, db=None, cur=None):
"""Return origins, either all identified by their ids or all
identified by tuples (type, url).
If the url is given and the type is omitted, one of the origins with
that url is returned.
Args:
origin: a list of dictionaries representing the individual
origins to find.
These dicts have either the key url (and optionally type):
- type (FIXME: enum TBD): the origin type ('git', 'wget', ...)
- url (bytes): the url the origin points to
or the id:
- id: the origin id
Returns:
dict: the origin dictionary with the keys:
- id: origin's id
- type: origin's type
- url: origin's url
Raises:
ValueError: if the keys does not match (url and type) nor id.
"""
if isinstance(origins, dict):
# Old API
return_single = True
origins = [origins]
elif len(origins) == 0:
return []
else:
return_single = False
origin_ids = [origin.get('id') for origin in origins]
origin_urls = [origin.get('url') for origin in origins]
if any(origin_ids):
# Lookup per ID
if all(origin_ids):
results = db.origin_get_by_id(origin_ids, cur)
else:
raise ValueError(
'Either all origins or none at all should have an "id".')
elif any(origin_urls):
# Lookup per type + URL
if all(origin_urls):
results = db.origin_get_by_url(origin_urls, cur)
else:
raise ValueError(
'Either all origins or none at all should have '
'an "url" key.')
else: # unsupported lookup
raise ValueError('Origin must have either id or url.')
results = [dict(zip(self.origin_keys, result))
for result in results]
if return_single:
assert len(results) == 1
if results[0]['id'] is not None:
return results[0]
else:
return None
else:
return [None if res['id'] is None else res for res in results]
@db_transaction_generator()
def origin_get_range(self, origin_from=1, origin_count=100,
db=None, cur=None):
"""Retrieve ``origin_count`` origins whose ids are greater
or equal than ``origin_from``.
Origins are sorted by id before retrieving them.
Args:
origin_from (int): the minimum id of origins to retrieve
origin_count (int): the maximum number of origins to retrieve
Yields:
dicts containing origin information as returned
by :meth:`swh.storage.storage.Storage.origin_get`.
"""
for origin in db.origin_get_range(origin_from, origin_count, cur):
yield dict(zip(self.origin_keys, origin))
@db_transaction_generator()
def origin_search(self, url_pattern, offset=0, limit=50,
regexp=False, with_visit=False, db=None, cur=None):
"""Search for origins whose urls contain a provided string pattern
or match a provided regular expression.
The search is performed in a case insensitive way.
Args:
url_pattern (str): the string pattern to search for in origin urls
offset (int): number of found origins to skip before returning
results
limit (int): the maximum number of found origins to return
regexp (bool): if True, consider the provided pattern as a regular
expression and return origins whose urls match it
with_visit (bool): if True, filter out origins with no visit
Yields:
dicts containing origin information as returned
by :meth:`swh.storage.storage.Storage.origin_get`.
"""
for origin in db.origin_search(url_pattern, offset, limit,
regexp, with_visit, cur):
yield dict(zip(self.origin_keys, origin))
@db_transaction()
def origin_count(self, url_pattern, regexp=False,
with_visit=False, db=None, cur=None):
"""Count origins whose urls contain a provided string pattern
or match a provided regular expression.
The pattern search in origin urls is performed in a case insensitive
way.
Args:
url_pattern (str): the string pattern to search for in origin urls
regexp (bool): if True, consider the provided pattern as a regular
expression and return origins whose urls match it
with_visit (bool): if True, filter out origins with no visit
Returns:
int: The number of origins matching the search criterion.
"""
return db.origin_count(url_pattern, regexp, with_visit, cur)
@db_transaction()
def origin_add(self, origins, db=None, cur=None):
"""Add origins to the storage
Args:
origins: list of dictionaries representing the individual origins,
with the following keys:
- type: the origin type ('git', 'svn', 'deb', ...)
- url (bytes): the url the origin points to
Returns:
list: given origins as dict updated with their id
"""
origins = copy.deepcopy(origins)
for origin in origins:
origin['id'] = self.origin_add_one(origin, db=db, cur=cur)
return origins
@db_transaction()
def origin_add_one(self, origin, db=None, cur=None):
"""Add origin to the storage
Args:
origin: dictionary representing the individual origin to add. This
dict has the following keys:
- type (FIXME: enum TBD): the origin type ('git', 'wget', ...)
- url (bytes): the url the origin points to
Returns:
the id of the added origin, or of the identical one that already
exists.
"""
origin_id = list(db.origin_get_by_url(
[origin['url']], cur))[0][0]
if origin_id:
return origin_id
if self.journal_writer:
self.journal_writer.write_addition('origin', origin)
return db.origin_add(origin['type'], origin['url'], cur)
- @db_transaction()
- def fetch_history_start(self, origin_id, db=None, cur=None):
- """Add an entry for origin origin_id in fetch_history. Returns the id
- of the added fetch_history entry
- """
- if isinstance(origin_id, str):
- origin = \
- self.origin_get([{'url': origin_id}], db=db, cur=cur)
- if not origin:
- return
- origin_id = origin[0]['id']
- fetch_history = {
- 'origin': origin_id,
- 'date': datetime.datetime.now(tz=datetime.timezone.utc),
- }
-
- return db.create_fetch_history(fetch_history, cur)
-
- @db_transaction()
- def fetch_history_end(self, fetch_history_id, data, db=None, cur=None):
- """Close the fetch_history entry with id `fetch_history_id`, replacing
- its data with `data`.
- """
- now = datetime.datetime.now(tz=datetime.timezone.utc)
- fetch_history = db.get_fetch_history(fetch_history_id, cur)
-
- if not fetch_history:
- raise ValueError('No fetch_history with id %d' % fetch_history_id)
-
- fetch_history['duration'] = now - fetch_history['date']
-
- fetch_history.update(data)
-
- db.update_fetch_history(fetch_history, cur)
-
- @db_transaction()
- def fetch_history_get(self, fetch_history_id, db=None, cur=None):
- """Get the fetch_history entry with id `fetch_history_id`.
- """
- return db.get_fetch_history(fetch_history_id, cur)
-
@db_transaction(statement_timeout=500)
def stat_counters(self, db=None, cur=None):
"""compute statistics about the number of tuples in various tables
Returns:
dict: a dictionary mapping textual labels (e.g., content) to
integer values (e.g., the number of tuples in table content)
"""
return {k: v for (k, v) in db.stat_counters()}
@db_transaction()
def refresh_stat_counters(self, db=None, cur=None):
"""Recomputes the statistics for `stat_counters`."""
keys = [
'content',
'directory',
'directory_entry_dir',
'directory_entry_file',
'directory_entry_rev',
'origin',
'origin_visit',
'person',
'release',
'revision',
'revision_history',
'skipped_content',
'snapshot']
for key in keys:
cur.execute('select * from swh_update_counter(%s)', (key,))
@db_transaction()
def origin_metadata_add(self, origin_id, ts, provider, tool, metadata,
db=None, cur=None):
""" Add an origin_metadata for the origin at ts with provenance and
metadata.
Args:
origin_id (int): the origin's id for which the metadata is added
ts (datetime): timestamp of the found metadata
provider (int): the provider of metadata (ex:'hal')
tool (int): tool used to extract metadata
metadata (jsonb): the metadata retrieved at the time and location
Returns:
id (int): the origin_metadata unique id
"""
if isinstance(origin_id, str):
origin = self.origin_get({'url': origin_id}, db=db, cur=cur)
if not origin:
return
origin_id = origin['id']
if isinstance(ts, str):
ts = dateutil.parser.parse(ts)
return db.origin_metadata_add(origin_id, ts, provider, tool,
metadata, cur)
@db_transaction_generator(statement_timeout=500)
def origin_metadata_get_by(self, origin_id, provider_type=None, db=None,
cur=None):
"""Retrieve list of all origin_metadata entries for the origin_id
Args:
origin_id (int): the unique origin identifier
provider_type (str): (optional) type of provider
Returns:
list of dicts: the origin_metadata dictionary with the keys:
- origin_id (int): origin's id
- discovery_date (datetime): timestamp of discovery
- tool_id (int): metadata's extracting tool
- metadata (jsonb)
- provider_id (int): metadata's provider
- provider_name (str)
- provider_type (str)
- provider_url (str)
"""
if isinstance(origin_id, str):
origin = self.origin_get({'url': origin_id}, db=db, cur=cur)
if not origin:
return
origin_id = origin['id']
for line in db.origin_metadata_get_by(origin_id, provider_type, cur):
yield dict(zip(db.origin_metadata_get_cols, line))
@db_transaction()
def tool_add(self, tools, db=None, cur=None):
"""Add new tools to the storage.
Args:
tools (iterable of :class:`dict`): Tool information to add to
storage. Each tool is a :class:`dict` with the following keys:
- name (:class:`str`): name of the tool
- version (:class:`str`): version of the tool
- configuration (:class:`dict`): configuration of the tool,
must be json-encodable
Returns:
:class:`dict`: All the tools inserted in storage
(including the internal ``id``). The order of the list is not
guaranteed to match the order of the initial list.
"""
db.mktemp_tool(cur)
db.copy_to(tools, 'tmp_tool',
['name', 'version', 'configuration'],
cur)
tools = db.tool_add_from_temp(cur)
return [dict(zip(db.tool_cols, line)) for line in tools]
@db_transaction(statement_timeout=500)
def tool_get(self, tool, db=None, cur=None):
"""Retrieve tool information.
Args:
tool (dict): Tool information we want to retrieve from storage.
The dicts have the same keys as those used in :func:`tool_add`.
Returns:
dict: The full tool information if it exists (``id`` included),
None otherwise.
"""
tool_conf = tool['configuration']
if isinstance(tool_conf, dict):
tool_conf = json.dumps(tool_conf)
idx = db.tool_get(tool['name'],
tool['version'],
tool_conf)
if not idx:
return None
return dict(zip(db.tool_cols, idx))
@db_transaction()
def metadata_provider_add(self, provider_name, provider_type, provider_url,
metadata, db=None, cur=None):
"""Add a metadata provider.
Args:
provider_name (str): Its name
provider_type (str): Its type (eg. `'deposit-client'`)
provider_url (str): Its URL
metadata: JSON-encodable object
Returns:
int: an identifier of the provider
"""
return db.metadata_provider_add(provider_name, provider_type,
provider_url, metadata, cur)
@db_transaction()
def metadata_provider_get(self, provider_id, db=None, cur=None):
"""Get a metadata provider
Args:
provider_id: Its identifier, as given by `metadata_provider_add`.
Returns:
dict: same as `metadata_provider_add`;
or None if it does not exist.
"""
result = db.metadata_provider_get(provider_id)
if not result:
return None
return dict(zip(db.metadata_provider_cols, result))
@db_transaction()
def metadata_provider_get_by(self, provider, db=None, cur=None):
"""Get a metadata provider
Args:
provider (dict): A dictionary with keys:
* provider_name: Its name
* provider_url: Its URL
Returns:
dict: same as `metadata_provider_add`;
or None if it does not exist.
"""
result = db.metadata_provider_get_by(provider['provider_name'],
provider['provider_url'])
if not result:
return None
return dict(zip(db.metadata_provider_cols, result))
def diff_directories(self, from_dir, to_dir, track_renaming=False):
"""Compute the list of file changes introduced between two arbitrary
directories (insertion / deletion / modification / renaming of files).
Args:
from_dir (bytes): identifier of the directory to compare from
to_dir (bytes): identifier of the directory to compare to
track_renaming (bool): whether or not to track files renaming
Returns:
A list of dict describing the introduced file changes
(see :func:`swh.storage.algos.diff.diff_directories`
for more details).
"""
return diff.diff_directories(self, from_dir, to_dir, track_renaming)
def diff_revisions(self, from_rev, to_rev, track_renaming=False):
"""Compute the list of file changes introduced between two arbitrary
revisions (insertion / deletion / modification / renaming of files).
Args:
from_rev (bytes): identifier of the revision to compare from
to_rev (bytes): identifier of the revision to compare to
track_renaming (bool): whether or not to track files renaming
Returns:
A list of dict describing the introduced file changes
(see :func:`swh.storage.algos.diff.diff_directories`
for more details).
"""
return diff.diff_revisions(self, from_rev, to_rev, track_renaming)
def diff_revision(self, revision, track_renaming=False):
"""Compute the list of file changes introduced by a specific revision
(insertion / deletion / modification / renaming of files) by comparing
it against its first parent.
Args:
revision (bytes): identifier of the revision from which to
compute the list of files changes
track_renaming (bool): whether or not to track files renaming
Returns:
A list of dict describing the introduced file changes
(see :func:`swh.storage.algos.diff.diff_directories`
for more details).
"""
return diff.diff_revision(self, revision, track_renaming)
diff --git a/swh/storage/tests/storage_data.py b/swh/storage/tests/storage_data.py
index 56836330..1fb9bda2 100644
--- a/swh/storage/tests/storage_data.py
+++ b/swh/storage/tests/storage_data.py
@@ -1,532 +1,515 @@
# Copyright (C) 2015-2019 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import datetime
from swh.model.hashutil import hash_to_bytes
from swh.model import from_disk
class StorageData:
def __getattr__(self, key):
v = globals()[key]
if hasattr(v, 'copy'):
return v.copy()
return v
data = StorageData()
cont = {
'data': b'42\n',
'length': 3,
'sha1': hash_to_bytes(
'34973274ccef6ab4dfaaf86599792fa9c3fe4689'),
'sha1_git': hash_to_bytes(
'd81cc0710eb6cf9efd5b920a8453e1e07157b6cd'),
'sha256': hash_to_bytes(
'673650f936cb3b0a2f93ce09d81be10748b1b203c19e8176b4eefc1964a0cf3a'),
'blake2s256': hash_to_bytes(
'd5fe1939576527e42cfd76a9455a2432fe7f56669564577dd93c4280e76d661d'),
'status': 'visible',
}
cont2 = {
'data': b'4242\n',
'length': 5,
'sha1': hash_to_bytes(
'61c2b3a30496d329e21af70dd2d7e097046d07b7'),
'sha1_git': hash_to_bytes(
'36fade77193cb6d2bd826161a0979d64c28ab4fa'),
'sha256': hash_to_bytes(
'859f0b154fdb2d630f45e1ecae4a862915435e663248bb8461d914696fc047cd'),
'blake2s256': hash_to_bytes(
'849c20fad132b7c2d62c15de310adfe87be94a379941bed295e8141c6219810d'),
'status': 'visible',
}
cont3 = {
'data': b'424242\n',
'length': 7,
'sha1': hash_to_bytes(
'3e21cc4942a4234c9e5edd8a9cacd1670fe59f13'),
'sha1_git': hash_to_bytes(
'c932c7649c6dfa4b82327d121215116909eb3bea'),
'sha256': hash_to_bytes(
'92fb72daf8c6818288a35137b72155f507e5de8d892712ab96277aaed8cf8a36'),
'blake2s256': hash_to_bytes(
'76d0346f44e5a27f6bafdd9c2befd304aff83780f93121d801ab6a1d4769db11'),
'status': 'visible',
}
contents = (cont, cont2, cont3)
missing_cont = {
'data': b'missing\n',
'length': 8,
'sha1': hash_to_bytes(
'f9c24e2abb82063a3ba2c44efd2d3c797f28ac90'),
'sha1_git': hash_to_bytes(
'33e45d56f88993aae6a0198013efa80716fd8919'),
'sha256': hash_to_bytes(
'6bbd052ab054ef222c1c87be60cd191addedd24cc882d1f5f7f7be61dc61bb3a'),
'blake2s256': hash_to_bytes(
'306856b8fd879edb7b6f1aeaaf8db9bbecc993cd7f776c333ac3a782fa5c6eba'),
'status': 'absent',
}
skipped_cont = {
'length': 1024 * 1024 * 200,
'sha1_git': hash_to_bytes(
'33e45d56f88993aae6a0198013efa80716fd8920'),
'sha1': hash_to_bytes(
'43e45d56f88993aae6a0198013efa80716fd8920'),
'sha256': hash_to_bytes(
'7bbd052ab054ef222c1c87be60cd191addedd24cc882d1f5f7f7be61dc61bb3a'),
'blake2s256': hash_to_bytes(
'ade18b1adecb33f891ca36664da676e12c772cc193778aac9a137b8dc5834b9b'),
'reason': 'Content too long',
'status': 'absent',
'origin': 'file:///dev/zero',
}
skipped_cont2 = {
'length': 1024 * 1024 * 300,
'sha1_git': hash_to_bytes(
'44e45d56f88993aae6a0198013efa80716fd8921'),
'sha1': hash_to_bytes(
'54e45d56f88993aae6a0198013efa80716fd8920'),
'sha256': hash_to_bytes(
'8cbd052ab054ef222c1c87be60cd191addedd24cc882d1f5f7f7be61dc61bb3a'),
'blake2s256': hash_to_bytes(
'9ce18b1adecb33f891ca36664da676e12c772cc193778aac9a137b8dc5834b9b'),
'reason': 'Content too long',
'status': 'absent',
}
dir = {
'id': hash_to_bytes(
'340133423253310030f531e632a733ff37c3a930'),
'entries': [
{
'name': b'foo',
'type': 'file',
'target': hash_to_bytes( # cont
'd81cc0710eb6cf9efd5b920a8453e1e07157b6cd'),
'perms': from_disk.DentryPerms.content,
},
{
'name': b'bar\xc3',
'type': 'dir',
'target': b'12345678901234567890',
'perms': from_disk.DentryPerms.directory,
},
],
}
dir2 = {
'id': hash_to_bytes(
'340133423253310030f531e632a733ff37c3a935'),
'entries': [
{
'name': b'oof',
'type': 'file',
'target': hash_to_bytes( # cont2
'36fade77193cb6d2bd826161a0979d64c28ab4fa'),
'perms': from_disk.DentryPerms.content,
}
],
}
dir3 = {
'id': hash_to_bytes('33e45d56f88993aae6a0198013efa80716fd8921'),
'entries': [
{
'name': b'foo',
'type': 'file',
'target': hash_to_bytes( # cont
'd81cc0710eb6cf9efd5b920a8453e1e07157b6cd'),
'perms': from_disk.DentryPerms.content,
},
{
'name': b'subdir',
'type': 'dir',
'target': hash_to_bytes( # dir
'340133423253310030f531e632a733ff37c3a930'),
'perms': from_disk.DentryPerms.directory,
},
{
'name': b'hello',
'type': 'file',
'target': b'12345678901234567890',
'perms': from_disk.DentryPerms.content,
},
],
}
dir4 = {
'id': hash_to_bytes('33e45d56f88993aae6a0198013efa80716fd8922'),
'entries': [
{
'name': b'subdir1',
'type': 'dir',
'target': hash_to_bytes(
'33e45d56f88993aae6a0198013efa80716fd8921'), # dir3
'perms': from_disk.DentryPerms.directory,
},
]
}
dierctories = (dir, dir2, dir3, dir4)
minus_offset = datetime.timezone(datetime.timedelta(minutes=-120))
plus_offset = datetime.timezone(datetime.timedelta(minutes=120))
revision = {
'id': b'56789012345678901234',
'message': b'hello',
'author': {
'name': b'Nicolas Dandrimont',
'email': b'nicolas@example.com',
'fullname': b'Nicolas Dandrimont ',
},
'date': {
'timestamp': 1234567890,
'offset': 120,
'negative_utc': None,
},
'committer': {
'name': b'St\xc3fano Zacchiroli',
'email': b'stefano@example.com',
'fullname': b'St\xc3fano Zacchiroli '
},
'committer_date': {
'timestamp': 1123456789,
'offset': 0,
'negative_utc': True,
},
'parents': [b'01234567890123456789', b'23434512345123456789'],
'type': 'git',
'directory': hash_to_bytes( # dir
'340133423253310030f531e632a733ff37c3a930'),
'metadata': {
'checksums': {
'sha1': 'tarball-sha1',
'sha256': 'tarball-sha256',
},
'signed-off-by': 'some-dude',
'extra_headers': [
['gpgsig', b'test123'],
['mergetags', [b'foo\\bar', b'\x22\xaf\x89\x80\x01\x00']],
],
},
'synthetic': True
}
revision2 = {
'id': b'87659012345678904321',
'message': b'hello again',
'author': {
'name': b'Roberto Dicosmo',
'email': b'roberto@example.com',
'fullname': b'Roberto Dicosmo ',
},
'date': {
'timestamp': {
'seconds': 1234567843,
'microseconds': 220000,
},
'offset': -720,
'negative_utc': None,
},
'committer': {
'name': b'tony',
'email': b'ar@dumont.fr',
'fullname': b'tony ',
},
'committer_date': {
'timestamp': 1123456789,
'offset': 0,
'negative_utc': False,
},
'parents': [b'01234567890123456789'],
'type': 'git',
'directory': hash_to_bytes( # dir2
'340133423253310030f531e632a733ff37c3a935'),
'metadata': None,
'synthetic': False
}
revision3 = {
'id': hash_to_bytes('7026b7c1a2af56521e951c01ed20f255fa054238'),
'message': b'a simple revision with no parents this time',
'author': {
'name': b'Roberto Dicosmo',
'email': b'roberto@example.com',
'fullname': b'Roberto Dicosmo ',
},
'date': {
'timestamp': {
'seconds': 1234567843,
'microseconds': 220000,
},
'offset': -720,
'negative_utc': None,
},
'committer': {
'name': b'tony',
'email': b'ar@dumont.fr',
'fullname': b'tony ',
},
'committer_date': {
'timestamp': 1127351742,
'offset': 0,
'negative_utc': False,
},
'parents': [],
'type': 'git',
'directory': hash_to_bytes( # dir2
'340133423253310030f531e632a733ff37c3a935'),
'metadata': None,
'synthetic': True
}
revision4 = {
'id': hash_to_bytes('368a48fe15b7db2383775f97c6b247011b3f14f4'),
'message': b'parent of self.revision2',
'author': {
'name': b'me',
'email': b'me@soft.heri',
'fullname': b'me ',
},
'date': {
'timestamp': {
'seconds': 1244567843,
'microseconds': 220000,
},
'offset': -720,
'negative_utc': None,
},
'committer': {
'name': b'committer-dude',
'email': b'committer@dude.com',
'fullname': b'committer-dude ',
},
'committer_date': {
'timestamp': {
'seconds': 1244567843,
'microseconds': 220000,
},
'offset': -720,
'negative_utc': None,
},
'parents': [hash_to_bytes( # revision3
'7026b7c1a2af56521e951c01ed20f255fa054238')],
'type': 'git',
'directory': hash_to_bytes( # dir
'340133423253310030f531e632a733ff37c3a930'),
'metadata': None,
'synthetic': False
}
revisions = (revision, revision2, revision3, revision4)
origin = {
'url': 'file:///dev/null',
'type': 'git',
}
origin2 = {
'url': 'file:///dev/zero',
'type': 'hg',
}
origins = (origin, origin2)
provider = {
'name': 'hal',
'type': 'deposit-client',
'url': 'http:///hal/inria',
'metadata': {
'location': 'France'
}
}
metadata_tool = {
'name': 'swh-deposit',
'version': '0.0.1',
'configuration': {
'sword_version': '2'
}
}
date_visit1 = datetime.datetime(2015, 1, 1, 23, 0, 0,
tzinfo=datetime.timezone.utc)
date_visit2 = datetime.datetime(2017, 1, 1, 23, 0, 0,
tzinfo=datetime.timezone.utc)
date_visit3 = datetime.datetime(2018, 1, 1, 23, 0, 0,
tzinfo=datetime.timezone.utc)
release = {
'id': b'87659012345678901234',
'name': b'v0.0.1',
'author': {
'name': b'olasd',
'email': b'nic@olasd.fr',
'fullname': b'olasd ',
},
'date': {
'timestamp': 1234567890,
'offset': 42,
'negative_utc': None,
},
'target': b'43210987654321098765',
'target_type': 'revision',
'message': b'synthetic release',
'synthetic': True,
}
release2 = {
'id': b'56789012348765901234',
'name': b'v0.0.2',
'author': {
'name': b'tony',
'email': b'ar@dumont.fr',
'fullname': b'tony ',
},
'date': {
'timestamp': 1634366813,
'offset': -120,
'negative_utc': None,
},
'target': b'432109\xa9765432\xc309\x00765',
'target_type': 'revision',
'message': b'v0.0.2\nMisc performance improvements + bug fixes',
'synthetic': False
}
release3 = {
'id': b'87659012345678904321',
'name': b'v0.0.2',
'author': {
'name': b'tony',
'email': b'tony@ardumont.fr',
'fullname': b'tony ',
},
'date': {
'timestamp': 1634336813,
'offset': 0,
'negative_utc': False,
},
'target': b'87659012345678904321', # revision2
'target_type': 'revision',
'message': b'yet another synthetic release',
'synthetic': True,
}
releases = (release, release2, release3)
-fetch_history_date = datetime.datetime(
- 2015, 1, 2, 21, 0, 0,
- tzinfo=datetime.timezone.utc)
-
-fetch_history_end = datetime.datetime(
- 2015, 1, 2, 23, 0, 0,
- tzinfo=datetime.timezone.utc)
-
-fetch_history_data = {
- 'status': True,
- 'result': {'foo': 'bar'},
- 'stdout': 'blabla',
- 'stderr': 'blablabla',
-}
-
snapshot = {
'id': hash_to_bytes('2498dbf535f882bc7f9a18fb16c9ad27fda7bab7'),
'branches': {
b'master': {
'target': b'56789012345678901234', # revision
'target_type': 'revision',
},
},
}
empty_snapshot = {
'id': hash_to_bytes('1a8893e6a86f444e8be8e7bda6cb34fb1735a00e'),
'branches': {},
}
complete_snapshot = {
'id': hash_to_bytes('6e65b86363953b780d92b0a928f3e8fcdd10db36'),
'branches': {
b'directory': {
'target': hash_to_bytes(
'1bd0e65f7d2ff14ae994de17a1e7fe65111dcad8'),
'target_type': 'directory',
},
b'directory2': {
'target': hash_to_bytes(
'1bd0e65f7d2ff14ae994de17a1e7fe65111dcad8'),
'target_type': 'directory',
},
b'content': {
'target': hash_to_bytes(
'fe95a46679d128ff167b7c55df5d02356c5a1ae1'),
'target_type': 'content',
},
b'alias': {
'target': b'revision',
'target_type': 'alias',
},
b'revision': {
'target': hash_to_bytes(
'aafb16d69fd30ff58afdd69036a26047f3aebdc6'),
'target_type': 'revision',
},
b'release': {
'target': hash_to_bytes(
'7045404f3d1c54e6473c71bbb716529fbad4be24'),
'target_type': 'release',
},
b'snapshot': {
'target': hash_to_bytes(
'1a8893e6a86f444e8be8e7bda6cb34fb1735a00e'),
'target_type': 'snapshot',
},
b'dangling': None,
}
}
origin_metadata = {
'origin': origin,
'discovery_date': datetime.datetime(2015, 1, 1, 23, 0, 0,
tzinfo=datetime.timezone.utc),
'provider': provider,
'tool': 'swh-deposit',
'metadata': {
'name': 'test_origin_metadata',
'version': '0.0.1'
}
}
origin_metadata2 = {
'origin': origin,
'discovery_date': datetime.datetime(2017, 1, 1, 23, 0, 0,
tzinfo=datetime.timezone.utc),
'provider': provider,
'tool': 'swh-deposit',
'metadata': {
'name': 'test_origin_metadata',
'version': '0.0.1'
}
}
-
-fetch_history_duration = (fetch_history_end - fetch_history_date)
diff --git a/swh/storage/tests/test_storage.py b/swh/storage/tests/test_storage.py
index b059f08e..2db9725d 100644
--- a/swh/storage/tests/test_storage.py
+++ b/swh/storage/tests/test_storage.py
@@ -1,3499 +1,3469 @@
# Copyright (C) 2015-2019 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import copy
from contextlib import contextmanager
import datetime
import itertools
import queue
import threading
from collections import defaultdict
-from unittest.mock import Mock, patch
+from unittest.mock import Mock
import psycopg2
import pytest
from hypothesis import given, strategies, settings, HealthCheck
from typing import ClassVar, Optional
from swh.model import from_disk, identifiers
from swh.model.hashutil import hash_to_bytes
from swh.model.hypothesis_strategies import objects
from swh.storage import HashCollision
from .storage_data import data
@contextmanager
def db_transaction(storage):
with storage.db() as db:
with db.transaction() as cur:
yield db, cur
def normalize_entity(entity):
entity = copy.deepcopy(entity)
for key in ('date', 'committer_date'):
if key in entity:
entity[key] = identifiers.normalize_timestamp(entity[key])
return entity
def transform_entries(dir_, *, prefix=b''):
for ent in dir_['entries']:
yield {
'dir_id': dir_['id'],
'type': ent['type'],
'target': ent['target'],
'name': prefix + ent['name'],
'perms': ent['perms'],
'status': None,
'sha1': None,
'sha1_git': None,
'sha256': None,
'length': None,
}
def cmpdir(directory):
return (directory['type'], directory['dir_id'])
def short_revision(revision):
return [revision['id'], revision['parents']]
class TestStorage:
"""Main class for Storage testing.
This class is used as-is to test local storage (see TestLocalStorage
below) and remote storage (see TestRemoteStorage in
test_remote_storage.py.
We need to have the two classes inherit from this base class
separately to avoid nosetests running the tests from the base
class twice.
"""
maxDiff = None # type: ClassVar[Optional[int]]
_test_origin_ids = True
def test_check_config(self, swh_storage):
assert swh_storage.check_config(check_write=True)
assert swh_storage.check_config(check_write=False)
def test_content_add(self, swh_storage):
cont = data.cont
insertion_start_time = datetime.datetime.now(tz=datetime.timezone.utc)
actual_result = swh_storage.content_add([cont])
insertion_end_time = datetime.datetime.now(tz=datetime.timezone.utc)
assert actual_result == {
'content:add': 1,
'content:add:bytes': cont['length'],
'skipped_content:add': 0
}
assert list(swh_storage.content_get([cont['sha1']])) == \
[{'sha1': cont['sha1'], 'data': cont['data']}]
expected_cont = data.cont
del expected_cont['data']
journal_objects = list(swh_storage.journal_writer.objects)
for (obj_type, obj) in journal_objects:
assert insertion_start_time <= obj['ctime']
assert obj['ctime'] <= insertion_end_time
del obj['ctime']
assert journal_objects == [('content', expected_cont)]
def test_content_add_validation(self, swh_storage):
cont = data.cont
with pytest.raises(ValueError, match='status'):
swh_storage.content_add([{**cont, 'status': 'foobar'}])
with pytest.raises(ValueError, match="(?i)length"):
swh_storage.content_add([{**cont, 'length': -2}])
with pytest.raises((ValueError, psycopg2.IntegrityError),
match='reason') as cm:
swh_storage.content_add([{**cont, 'status': 'absent'}])
if type(cm.value) == psycopg2.IntegrityError:
assert cm.exception.pgcode == \
psycopg2.errorcodes.NOT_NULL_VIOLATION
with pytest.raises(
ValueError,
match="^Must not provide a reason if content is not absent.$"):
swh_storage.content_add([{**cont, 'reason': 'foobar'}])
def test_content_get_missing(self, swh_storage):
cont = data.cont
swh_storage.content_add([cont])
# Query a single missing content
results = list(swh_storage.content_get(
[data.cont2['sha1']]))
assert results == [None]
# Check content_get does not abort after finding a missing content
results = list(swh_storage.content_get(
[data.cont['sha1'], data.cont2['sha1']]))
assert results == [{'sha1': cont['sha1'], 'data': cont['data']}, None]
# Check content_get does not discard found countent when it finds
# a missing content.
results = list(swh_storage.content_get(
[data.cont2['sha1'], data.cont['sha1']]))
assert results == [None, {'sha1': cont['sha1'], 'data': cont['data']}]
def test_content_add_same_input(self, swh_storage):
cont = data.cont
actual_result = swh_storage.content_add([cont, cont])
assert actual_result == {
'content:add': 1,
'content:add:bytes': cont['length'],
'skipped_content:add': 0
}
def test_content_add_different_input(self, swh_storage):
cont = data.cont
cont2 = data.cont2
actual_result = swh_storage.content_add([cont, cont2])
assert actual_result == {
'content:add': 2,
'content:add:bytes': cont['length'] + cont2['length'],
'skipped_content:add': 0
}
def test_content_add_twice(self, swh_storage):
actual_result = swh_storage.content_add([data.cont])
assert actual_result == {
'content:add': 1,
'content:add:bytes': data.cont['length'],
'skipped_content:add': 0
}
assert len(swh_storage.journal_writer.objects) == 1
actual_result = swh_storage.content_add([data.cont, data.cont2])
assert actual_result == {
'content:add': 1,
'content:add:bytes': data.cont2['length'],
'skipped_content:add': 0
}
assert len(swh_storage.journal_writer.objects) == 2
assert len(swh_storage.content_find(data.cont)) == 1
assert len(swh_storage.content_find(data.cont2)) == 1
def test_content_add_collision(self, swh_storage):
cont1 = data.cont
# create (corrupted) content with same sha1{,_git} but != sha256
cont1b = cont1.copy()
sha256_array = bytearray(cont1b['sha256'])
sha256_array[0] += 1
cont1b['sha256'] = bytes(sha256_array)
with pytest.raises(HashCollision) as cm:
swh_storage.content_add([cont1, cont1b])
assert cm.value.args[0] in ['sha1', 'sha1_git', 'blake2s256']
def test_content_add_metadata(self, swh_storage):
cont = data.cont
del cont['data']
cont['ctime'] = datetime.datetime.now()
actual_result = swh_storage.content_add_metadata([cont])
assert actual_result == {
'content:add': 1,
'skipped_content:add': 0
}
expected_cont = cont.copy()
del expected_cont['ctime']
assert list(swh_storage.content_get_metadata([cont['sha1']])) == \
[expected_cont]
assert list(swh_storage.journal_writer.objects) == [('content', cont)]
def test_content_add_metadata_same_input(self, swh_storage):
cont = data.cont
del cont['data']
cont['ctime'] = datetime.datetime.now()
actual_result = swh_storage.content_add_metadata([cont, cont])
assert actual_result == {
'content:add': 1,
'skipped_content:add': 0
}
def test_content_add_metadata_different_input(self, swh_storage):
cont = data.cont
del cont['data']
cont['ctime'] = datetime.datetime.now()
cont2 = data.cont2
del cont2['data']
cont2['ctime'] = datetime.datetime.now()
actual_result = swh_storage.content_add_metadata([cont, cont2])
assert actual_result == {
'content:add': 2,
'skipped_content:add': 0
}
def test_content_add_metadata_collision(self, swh_storage):
cont1 = data.cont
del cont1['data']
cont1['ctime'] = datetime.datetime.now()
# create (corrupted) content with same sha1{,_git} but != sha256
cont1b = cont1.copy()
sha256_array = bytearray(cont1b['sha256'])
sha256_array[0] += 1
cont1b['sha256'] = bytes(sha256_array)
with pytest.raises(HashCollision) as cm:
swh_storage.content_add_metadata([cont1, cont1b])
assert cm.value.args[0] in ['sha1', 'sha1_git', 'blake2s256']
def test_skipped_content_add(self, swh_storage):
cont = data.skipped_cont
cont2 = data.skipped_cont2
cont2['blake2s256'] = None
missing = list(swh_storage.skipped_content_missing([cont, cont2]))
assert len(missing) == 2
actual_result = swh_storage.content_add([cont, cont, cont2])
assert actual_result == {
'content:add': 0,
'content:add:bytes': 0,
'skipped_content:add': 2,
}
missing = list(swh_storage.skipped_content_missing([cont, cont2]))
assert missing == []
@pytest.mark.property_based
@settings(deadline=None) # this test is very slow
@given(strategies.sets(
elements=strategies.sampled_from(
['sha256', 'sha1_git', 'blake2s256']),
min_size=0))
def test_content_missing(self, swh_storage, algos):
algos |= {'sha1'}
cont2 = data.cont2
missing_cont = data.missing_cont
swh_storage.content_add([cont2])
test_contents = [cont2]
missing_per_hash = defaultdict(list)
for i in range(256):
test_content = missing_cont.copy()
for hash in algos:
test_content[hash] = bytes([i]) + test_content[hash][1:]
missing_per_hash[hash].append(test_content[hash])
test_contents.append(test_content)
assert set(swh_storage.content_missing(test_contents)) == \
set(missing_per_hash['sha1'])
for hash in algos:
assert set(swh_storage.content_missing(
test_contents, key_hash=hash)) == set(missing_per_hash[hash])
@pytest.mark.property_based
@given(strategies.sets(
elements=strategies.sampled_from(
['sha256', 'sha1_git', 'blake2s256']),
min_size=0))
def test_content_missing_unknown_algo(self, swh_storage, algos):
algos |= {'sha1'}
cont2 = data.cont2
missing_cont = data.missing_cont
swh_storage.content_add([cont2])
test_contents = [cont2]
missing_per_hash = defaultdict(list)
for i in range(16):
test_content = missing_cont.copy()
for hash in algos:
test_content[hash] = bytes([i]) + test_content[hash][1:]
missing_per_hash[hash].append(test_content[hash])
test_content['nonexisting_algo'] = b'\x00'
test_contents.append(test_content)
assert set(
swh_storage.content_missing(test_contents)) == set(
missing_per_hash['sha1'])
for hash in algos:
assert set(swh_storage.content_missing(
test_contents, key_hash=hash)) == set(
missing_per_hash[hash])
def test_content_missing_per_sha1(self, swh_storage):
# given
cont2 = data.cont2
missing_cont = data.missing_cont
swh_storage.content_add([cont2])
# when
gen = swh_storage.content_missing_per_sha1([cont2['sha1'],
missing_cont['sha1']])
# then
assert list(gen) == [missing_cont['sha1']]
def test_content_get_metadata(self, swh_storage):
cont1 = data.cont
cont2 = data.cont2
swh_storage.content_add([cont1, cont2])
actual_md = list(swh_storage.content_get_metadata(
[cont1['sha1'], cont2['sha1']]))
# we only retrieve the metadata
cont1.pop('data')
cont2.pop('data')
assert actual_md in ([cont1, cont2], [cont2, cont1])
def test_content_get_metadata_missing_sha1(self, swh_storage):
cont1 = data.cont
cont2 = data.cont2
missing_cont = data.missing_cont
swh_storage.content_add([cont1, cont2])
gen = swh_storage.content_get_metadata([missing_cont['sha1']])
# All the metadata keys are None
missing_cont.pop('data')
for key in missing_cont:
if key != 'sha1':
missing_cont[key] = None
assert list(gen) == [missing_cont]
def test_directory_add(self, swh_storage):
init_missing = list(swh_storage.directory_missing([data.dir['id']]))
assert [data.dir['id']] == init_missing
actual_result = swh_storage.directory_add([data.dir])
assert actual_result == {'directory:add': 1}
assert list(swh_storage.journal_writer.objects) == \
[('directory', data.dir)]
actual_data = list(swh_storage.directory_ls(data.dir['id']))
expected_data = list(transform_entries(data.dir))
assert sorted(expected_data, key=cmpdir) \
== sorted(actual_data, key=cmpdir)
after_missing = list(swh_storage.directory_missing([data.dir['id']]))
assert after_missing == []
def test_directory_add_validation(self, swh_storage):
dir_ = copy.deepcopy(data.dir)
dir_['entries'][0]['type'] = 'foobar'
with pytest.raises(ValueError, match='type.*foobar'):
swh_storage.directory_add([dir_])
dir_ = copy.deepcopy(data.dir)
del dir_['entries'][0]['target']
with pytest.raises((TypeError, psycopg2.IntegrityError),
match='target') as cm:
swh_storage.directory_add([dir_])
if type(cm.value) == psycopg2.IntegrityError:
assert cm.value.pgcode == psycopg2.errorcodes.NOT_NULL_VIOLATION
def test_directory_add_twice(self, swh_storage):
actual_result = swh_storage.directory_add([data.dir])
assert actual_result == {'directory:add': 1}
assert list(swh_storage.journal_writer.objects) \
== [('directory', data.dir)]
actual_result = swh_storage.directory_add([data.dir])
assert actual_result == {'directory:add': 0}
assert list(swh_storage.journal_writer.objects) \
== [('directory', data.dir)]
def test_directory_get_recursive(self, swh_storage):
init_missing = list(swh_storage.directory_missing([data.dir['id']]))
assert init_missing == [data.dir['id']]
actual_result = swh_storage.directory_add(
[data.dir, data.dir2, data.dir3])
assert actual_result == {'directory:add': 3}
assert list(swh_storage.journal_writer.objects) == [
('directory', data.dir),
('directory', data.dir2),
('directory', data.dir3)]
# List directory containing a file and an unknown subdirectory
actual_data = list(swh_storage.directory_ls(
data.dir['id'], recursive=True))
expected_data = list(transform_entries(data.dir))
assert sorted(expected_data, key=cmpdir) \
== sorted(actual_data, key=cmpdir)
# List directory containing a file and an unknown subdirectory
actual_data = list(swh_storage.directory_ls(
data.dir2['id'], recursive=True))
expected_data = list(transform_entries(data.dir2))
assert sorted(expected_data, key=cmpdir) \
== sorted(actual_data, key=cmpdir)
# List directory containing a known subdirectory, entries should
# be both those of the directory and of the subdir
actual_data = list(swh_storage.directory_ls(
data.dir3['id'], recursive=True))
expected_data = list(itertools.chain(
transform_entries(data.dir3),
transform_entries(data.dir, prefix=b'subdir/')))
assert sorted(expected_data, key=cmpdir) \
== sorted(actual_data, key=cmpdir)
def test_directory_get_non_recursive(self, swh_storage):
init_missing = list(swh_storage.directory_missing([data.dir['id']]))
assert init_missing == [data.dir['id']]
actual_result = swh_storage.directory_add(
[data.dir, data.dir2, data.dir3])
assert actual_result == {'directory:add': 3}
assert list(swh_storage.journal_writer.objects) == [
('directory', data.dir),
('directory', data.dir2),
('directory', data.dir3)]
# List directory containing a file and an unknown subdirectory
actual_data = list(swh_storage.directory_ls(data.dir['id']))
expected_data = list(transform_entries(data.dir))
assert sorted(expected_data, key=cmpdir) \
== sorted(actual_data, key=cmpdir)
# List directory contaiining a single file
actual_data = list(swh_storage.directory_ls(data.dir2['id']))
expected_data = list(transform_entries(data.dir2))
assert sorted(expected_data, key=cmpdir) \
== sorted(actual_data, key=cmpdir)
# List directory containing a known subdirectory, entries should
# only be those of the parent directory, not of the subdir
actual_data = list(swh_storage.directory_ls(data.dir3['id']))
expected_data = list(transform_entries(data.dir3))
assert sorted(expected_data, key=cmpdir) \
== sorted(actual_data, key=cmpdir)
def test_directory_entry_get_by_path(self, swh_storage):
# given
init_missing = list(swh_storage.directory_missing([data.dir3['id']]))
assert [data.dir3['id']] == init_missing
actual_result = swh_storage.directory_add([data.dir3, data.dir4])
assert actual_result == {'directory:add': 2}
expected_entries = [
{
'dir_id': data.dir3['id'],
'name': b'foo',
'type': 'file',
'target': data.cont['sha1_git'],
'sha1': None,
'sha1_git': None,
'sha256': None,
'status': None,
'perms': from_disk.DentryPerms.content,
'length': None,
},
{
'dir_id': data.dir3['id'],
'name': b'subdir',
'type': 'dir',
'target': data.dir['id'],
'sha1': None,
'sha1_git': None,
'sha256': None,
'status': None,
'perms': from_disk.DentryPerms.directory,
'length': None,
},
{
'dir_id': data.dir3['id'],
'name': b'hello',
'type': 'file',
'target': b'12345678901234567890',
'sha1': None,
'sha1_git': None,
'sha256': None,
'status': None,
'perms': from_disk.DentryPerms.content,
'length': None,
},
]
# when (all must be found here)
for entry, expected_entry in zip(
data.dir3['entries'], expected_entries):
actual_entry = swh_storage.directory_entry_get_by_path(
data.dir3['id'],
[entry['name']])
assert actual_entry == expected_entry
# same, but deeper
for entry, expected_entry in zip(
data.dir3['entries'], expected_entries):
actual_entry = swh_storage.directory_entry_get_by_path(
data.dir4['id'],
[b'subdir1', entry['name']])
expected_entry = expected_entry.copy()
expected_entry['name'] = b'subdir1/' + expected_entry['name']
assert actual_entry == expected_entry
# when (nothing should be found here since data.dir is not persisted.)
for entry in data.dir['entries']:
actual_entry = swh_storage.directory_entry_get_by_path(
data.dir['id'],
[entry['name']])
assert actual_entry is None
def test_revision_add(self, swh_storage):
init_missing = swh_storage.revision_missing([data.revision['id']])
assert list(init_missing) == [data.revision['id']]
actual_result = swh_storage.revision_add([data.revision])
assert actual_result == {'revision:add': 1}
end_missing = swh_storage.revision_missing([data.revision['id']])
assert list(end_missing) == []
assert list(swh_storage.journal_writer.objects) \
== [('revision', data.revision)]
# already there so nothing added
actual_result = swh_storage.revision_add([data.revision])
assert actual_result == {'revision:add': 0}
def test_revision_add_validation(self, swh_storage):
rev = copy.deepcopy(data.revision)
rev['date']['offset'] = 2**16
with pytest.raises((ValueError, psycopg2.DataError),
match='offset') as cm:
swh_storage.revision_add([rev])
if type(cm.value) == psycopg2.DataError:
assert cm.value.pgcode \
== psycopg2.errorcodes.NUMERIC_VALUE_OUT_OF_RANGE
rev = copy.deepcopy(data.revision)
rev['committer_date']['offset'] = 2**16
with pytest.raises((ValueError, psycopg2.DataError),
match='offset') as cm:
swh_storage.revision_add([rev])
if type(cm.value) == psycopg2.DataError:
assert cm.value.pgcode \
== psycopg2.errorcodes.NUMERIC_VALUE_OUT_OF_RANGE
rev = copy.deepcopy(data.revision)
rev['type'] = 'foobar'
with pytest.raises((ValueError, psycopg2.DataError),
match='(?i)type') as cm:
swh_storage.revision_add([rev])
if type(cm.value) == psycopg2.DataError:
assert cm.value.pgcode == \
psycopg2.errorcodes.INVALID_TEXT_REPRESENTATION
def test_revision_add_twice(self, swh_storage):
actual_result = swh_storage.revision_add([data.revision])
assert actual_result == {'revision:add': 1}
assert list(swh_storage.journal_writer.objects) \
== [('revision', data.revision)]
actual_result = swh_storage.revision_add(
[data.revision, data.revision2])
assert actual_result == {'revision:add': 1}
assert list(swh_storage.journal_writer.objects) \
== [('revision', data.revision),
('revision', data.revision2)]
def test_revision_add_name_clash(self, swh_storage):
revision1 = data.revision
revision2 = data.revision2
revision1['author'] = {
'fullname': b'John Doe ',
'name': b'John Doe',
'email': b'john.doe@example.com'
}
revision2['author'] = {
'fullname': b'John Doe ',
'name': b'John Doe ',
'email': b'john.doe@example.com '
}
actual_result = swh_storage.revision_add([revision1, revision2])
assert actual_result == {'revision:add': 2}
def test_revision_log(self, swh_storage):
# given
# data.revision4 -is-child-of-> data.revision3
swh_storage.revision_add([data.revision3,
data.revision4])
# when
actual_results = list(swh_storage.revision_log(
[data.revision4['id']]))
# hack: ids generated
for actual_result in actual_results:
if 'id' in actual_result['author']:
del actual_result['author']['id']
if 'id' in actual_result['committer']:
del actual_result['committer']['id']
assert len(actual_results) == 2 # rev4 -child-> rev3
assert actual_results[0] == normalize_entity(data.revision4)
assert actual_results[1] == normalize_entity(data.revision3)
assert list(swh_storage.journal_writer.objects) == [
('revision', data.revision3),
('revision', data.revision4)]
def test_revision_log_with_limit(self, swh_storage):
# given
# data.revision4 -is-child-of-> data.revision3
swh_storage.revision_add([data.revision3,
data.revision4])
actual_results = list(swh_storage.revision_log(
[data.revision4['id']], 1))
# hack: ids generated
for actual_result in actual_results:
if 'id' in actual_result['author']:
del actual_result['author']['id']
if 'id' in actual_result['committer']:
del actual_result['committer']['id']
assert len(actual_results) == 1
assert actual_results[0] == data.revision4
def test_revision_log_unknown_revision(self, swh_storage):
rev_log = list(swh_storage.revision_log([data.revision['id']]))
assert rev_log == []
def test_revision_shortlog(self, swh_storage):
# given
# data.revision4 -is-child-of-> data.revision3
swh_storage.revision_add([data.revision3,
data.revision4])
# when
actual_results = list(swh_storage.revision_shortlog(
[data.revision4['id']]))
assert len(actual_results) == 2 # rev4 -child-> rev3
assert list(actual_results[0]) == short_revision(data.revision4)
assert list(actual_results[1]) == short_revision(data.revision3)
def test_revision_shortlog_with_limit(self, swh_storage):
# given
# data.revision4 -is-child-of-> data.revision3
swh_storage.revision_add([data.revision3,
data.revision4])
actual_results = list(swh_storage.revision_shortlog(
[data.revision4['id']], 1))
assert len(actual_results) == 1
assert list(actual_results[0]) == short_revision(data.revision4)
def test_revision_get(self, swh_storage):
swh_storage.revision_add([data.revision])
actual_revisions = list(swh_storage.revision_get(
[data.revision['id'], data.revision2['id']]))
# when
if 'id' in actual_revisions[0]['author']:
del actual_revisions[0]['author']['id'] # hack: ids are generated
if 'id' in actual_revisions[0]['committer']:
del actual_revisions[0]['committer']['id']
assert len(actual_revisions) == 2
assert actual_revisions[0] == normalize_entity(data.revision)
assert actual_revisions[1] is None
def test_revision_get_no_parents(self, swh_storage):
swh_storage.revision_add([data.revision3])
get = list(swh_storage.revision_get([data.revision3['id']]))
assert len(get) == 1
assert get[0]['parents'] == [] # no parents on this one
def test_release_add(self, swh_storage):
init_missing = swh_storage.release_missing([data.release['id'],
data.release2['id']])
assert [data.release['id'], data.release2['id']] == list(init_missing)
actual_result = swh_storage.release_add([data.release, data.release2])
assert actual_result == {'release:add': 2}
end_missing = swh_storage.release_missing([data.release['id'],
data.release2['id']])
assert list(end_missing) == []
assert list(swh_storage.journal_writer.objects) == [
('release', data.release),
('release', data.release2)]
# already present so nothing added
actual_result = swh_storage.release_add([data.release, data.release2])
assert actual_result == {'release:add': 0}
def test_release_add_no_author_date(self, swh_storage):
release = data.release
release['author'] = None
release['date'] = None
actual_result = swh_storage.release_add([release])
assert actual_result == {'release:add': 1}
end_missing = swh_storage.release_missing([data.release['id']])
assert list(end_missing) == []
assert list(swh_storage.journal_writer.objects) \
== [('release', release)]
def test_release_add_validation(self, swh_storage):
rel = copy.deepcopy(data.release)
rel['date']['offset'] = 2**16
with pytest.raises((ValueError, psycopg2.DataError),
match='offset') as cm:
swh_storage.release_add([rel])
if type(cm.value) == psycopg2.DataError:
assert cm.value.pgcode \
== psycopg2.errorcodes.NUMERIC_VALUE_OUT_OF_RANGE
rel = copy.deepcopy(data.release)
rel['author'] = None
with pytest.raises((ValueError, psycopg2.IntegrityError),
match='date') as cm:
swh_storage.release_add([rel])
if type(cm.value) == psycopg2.IntegrityError:
assert cm.value.pgcode == psycopg2.errorcodes.CHECK_VIOLATION
def test_release_add_twice(self, swh_storage):
actual_result = swh_storage.release_add([data.release])
assert actual_result == {'release:add': 1}
assert list(swh_storage.journal_writer.objects) \
== [('release', data.release)]
actual_result = swh_storage.release_add([data.release, data.release2])
assert actual_result == {'release:add': 1}
assert list(swh_storage.journal_writer.objects) \
== [('release', data.release),
('release', data.release2)]
def test_release_add_name_clash(self, swh_storage):
release1 = data.release.copy()
release2 = data.release2.copy()
release1['author'] = {
'fullname': b'John Doe ',
'name': b'John Doe',
'email': b'john.doe@example.com'
}
release2['author'] = {
'fullname': b'John Doe ',
'name': b'John Doe ',
'email': b'john.doe@example.com '
}
actual_result = swh_storage.release_add([release1, release2])
assert actual_result == {'release:add': 2}
def test_release_get(self, swh_storage):
# given
swh_storage.release_add([data.release, data.release2])
# when
actual_releases = list(swh_storage.release_get([data.release['id'],
data.release2['id']]))
# then
for actual_release in actual_releases:
if 'id' in actual_release['author']:
del actual_release['author']['id'] # hack: ids are generated
assert [
normalize_entity(data.release), normalize_entity(data.release2)] \
== [actual_releases[0], actual_releases[1]]
unknown_releases = \
list(swh_storage.release_get([data.release3['id']]))
assert unknown_releases[0] is None
def test_origin_add_one(self, swh_storage):
origin0 = swh_storage.origin_get(data.origin)
assert origin0 is None
id = swh_storage.origin_add_one(data.origin)
actual_origin = swh_storage.origin_get({'url': data.origin['url']})
if self._test_origin_ids:
assert actual_origin['id'] == id
assert actual_origin['url'] == data.origin['url']
id2 = swh_storage.origin_add_one(data.origin)
assert id == id2
def test_origin_add(self, swh_storage):
origin0 = swh_storage.origin_get([data.origin])[0]
assert origin0 is None
origin1, origin2 = swh_storage.origin_add([data.origin, data.origin2])
actual_origin = swh_storage.origin_get([{
'url': data.origin['url'],
}])[0]
if self._test_origin_ids:
assert actual_origin['id'] == origin1['id']
assert actual_origin['url'] == origin1['url']
actual_origin2 = swh_storage.origin_get([{
'url': data.origin2['url'],
}])[0]
if self._test_origin_ids:
assert actual_origin2['id'] == origin2['id']
assert actual_origin2['url'] == origin2['url']
if 'id' in actual_origin:
del actual_origin['id']
del actual_origin2['id']
assert list(swh_storage.journal_writer.objects) \
== [('origin', actual_origin),
('origin', actual_origin2)]
def test_origin_add_twice(self, swh_storage):
add1 = swh_storage.origin_add([data.origin, data.origin2])
assert list(swh_storage.journal_writer.objects) \
== [('origin', data.origin),
('origin', data.origin2)]
add2 = swh_storage.origin_add([data.origin, data.origin2])
assert list(swh_storage.journal_writer.objects) \
== [('origin', data.origin),
('origin', data.origin2)]
assert add1 == add2
def test_origin_add_validation(self, swh_storage):
with pytest.raises((TypeError, KeyError), match='url'):
swh_storage.origin_add([{'type': 'git'}])
def test_origin_get_legacy(self, swh_storage):
assert swh_storage.origin_get(data.origin) is None
id = swh_storage.origin_add_one(data.origin)
# lookup per url (returns id)
actual_origin0 = swh_storage.origin_get(
{'url': data.origin['url']})
if self._test_origin_ids:
assert actual_origin0['id'] == id
assert actual_origin0['url'] == data.origin['url']
# lookup per id (returns dict)
if self._test_origin_ids:
actual_origin1 = swh_storage.origin_get({'id': id})
assert actual_origin1 == {'id': id,
'type': data.origin['type'],
'url': data.origin['url']}
def test_origin_get(self, swh_storage):
assert swh_storage.origin_get(data.origin) is None
origin_id = swh_storage.origin_add_one(data.origin)
# lookup per url (returns id)
actual_origin0 = swh_storage.origin_get(
[{'url': data.origin['url']}])
assert len(actual_origin0) == 1
assert actual_origin0[0]['url'] == data.origin['url']
if self._test_origin_ids:
# lookup per id (returns dict)
actual_origin1 = swh_storage.origin_get([{'id': origin_id}])
assert len(actual_origin1) == 1
assert actual_origin1[0] == {'id': origin_id,
'type': data.origin['type'],
'url': data.origin['url']}
def test_origin_get_consistency(self, swh_storage):
assert swh_storage.origin_get(data.origin) is None
id = swh_storage.origin_add_one(data.origin)
with pytest.raises(ValueError):
swh_storage.origin_get([
{'url': data.origin['url']},
{'id': id}])
def test_origin_search_single_result(self, swh_storage):
found_origins = list(swh_storage.origin_search(data.origin['url']))
assert len(found_origins) == 0
found_origins = list(swh_storage.origin_search(data.origin['url'],
regexp=True))
assert len(found_origins) == 0
swh_storage.origin_add_one(data.origin)
origin_data = {
'type': data.origin['type'],
'url': data.origin['url']}
found_origins = list(swh_storage.origin_search(data.origin['url']))
assert len(found_origins) == 1
if 'id' in found_origins[0]:
del found_origins[0]['id']
assert found_origins[0] == origin_data
found_origins = list(swh_storage.origin_search(
'.' + data.origin['url'][1:-1] + '.', regexp=True))
assert len(found_origins) == 1
if 'id' in found_origins[0]:
del found_origins[0]['id']
assert found_origins[0] == origin_data
swh_storage.origin_add_one(data.origin2)
origin2_data = {
'type': data.origin2['type'],
'url': data.origin2['url']}
found_origins = list(swh_storage.origin_search(data.origin2['url']))
assert len(found_origins) == 1
if 'id' in found_origins[0]:
del found_origins[0]['id']
assert found_origins[0] == origin2_data
found_origins = list(swh_storage.origin_search(
'.' + data.origin2['url'][1:-1] + '.', regexp=True))
assert len(found_origins) == 1
if 'id' in found_origins[0]:
del found_origins[0]['id']
assert found_origins[0] == origin2_data
def test_origin_search_no_regexp(self, swh_storage):
swh_storage.origin_add_one(data.origin)
swh_storage.origin_add_one(data.origin2)
origin = swh_storage.origin_get({'url': data.origin['url']})
origin2 = swh_storage.origin_get({'url': data.origin2['url']})
# no pagination
found_origins = list(swh_storage.origin_search('/'))
assert len(found_origins) == 2
# offset=0
found_origins0 = list(swh_storage.origin_search('/', offset=0, limit=1)) # noqa
assert len(found_origins0) == 1
assert found_origins0[0] in [origin, origin2]
# offset=1
found_origins1 = list(swh_storage.origin_search('/', offset=1, limit=1)) # noqa
assert len(found_origins1) == 1
assert found_origins1[0] in [origin, origin2]
# check both origins were returned
assert found_origins0 != found_origins1
def test_origin_search_regexp_substring(self, swh_storage):
swh_storage.origin_add_one(data.origin)
swh_storage.origin_add_one(data.origin2)
origin = swh_storage.origin_get({'url': data.origin['url']})
origin2 = swh_storage.origin_get({'url': data.origin2['url']})
# no pagination
found_origins = list(swh_storage.origin_search('/', regexp=True))
assert len(found_origins) == 2
# offset=0
found_origins0 = list(swh_storage.origin_search('/', offset=0, limit=1, regexp=True)) # noqa
assert len(found_origins0) == 1
assert found_origins0[0] in [origin, origin2]
# offset=1
found_origins1 = list(swh_storage.origin_search('/', offset=1, limit=1, regexp=True)) # noqa
assert len(found_origins1) == 1
assert found_origins1[0] in [origin, origin2]
# check both origins were returned
assert found_origins0 != found_origins1
def test_origin_search_regexp_fullstring(self, swh_storage):
swh_storage.origin_add_one(data.origin)
swh_storage.origin_add_one(data.origin2)
origin = swh_storage.origin_get({'url': data.origin['url']})
origin2 = swh_storage.origin_get({'url': data.origin2['url']})
# no pagination
found_origins = list(swh_storage.origin_search('.*/.*', regexp=True))
assert len(found_origins) == 2
# offset=0
found_origins0 = list(swh_storage.origin_search('.*/.*', offset=0, limit=1, regexp=True)) # noqa
assert len(found_origins0) == 1
assert found_origins0[0] in [origin, origin2]
# offset=1
found_origins1 = list(swh_storage.origin_search('.*/.*', offset=1, limit=1, regexp=True)) # noqa
assert len(found_origins1) == 1
assert found_origins1[0] in [origin, origin2]
# check both origins were returned
assert found_origins0 != found_origins1
@pytest.mark.parametrize('use_url', [True, False])
def test_origin_visit_add(self, swh_storage, use_url):
if not self._test_origin_ids and not use_url:
return
# given
origin_id = swh_storage.origin_add_one(data.origin2)
assert origin_id is not None
origin_id_or_url = data.origin2['url'] if use_url else origin_id
# when
date_visit = datetime.datetime.now(datetime.timezone.utc)
origin_visit1 = swh_storage.origin_visit_add(
origin_id_or_url,
type='git',
date=date_visit)
actual_origin_visits = list(swh_storage.origin_visit_get(
origin_id_or_url))
assert {
'origin': origin_id,
'date': date_visit,
'visit': origin_visit1['visit'],
'type': 'git',
'status': 'ongoing',
'metadata': None,
'snapshot': None,
} in actual_origin_visits
expected_origin = data.origin2
origin_visit = {
'origin': expected_origin,
'date': date_visit,
'visit': origin_visit1['visit'],
'type': 'git',
'status': 'ongoing',
'metadata': None,
'snapshot': None,
}
objects = list(swh_storage.journal_writer.objects)
assert ('origin', expected_origin) in objects
assert ('origin_visit', origin_visit) in objects
def test_origin_visit_get__unknown_origin(self, swh_storage):
assert [] == list(swh_storage.origin_visit_get('foo'))
if self._test_origin_ids:
assert list(swh_storage.origin_visit_get(10)) == []
@pytest.mark.parametrize('use_url', [True, False])
def test_origin_visit_add_default_type(self, swh_storage, use_url):
if not self._test_origin_ids and not use_url:
return
# given
origin_id = swh_storage.origin_add_one(data.origin2)
origin_id_or_url = data.origin2['url'] if use_url else origin_id
assert origin_id is not None
# when
date_visit = datetime.datetime.now(datetime.timezone.utc)
date_visit2 = date_visit + datetime.timedelta(minutes=1)
origin_visit1 = swh_storage.origin_visit_add(
origin_id_or_url,
date=date_visit)
origin_visit2 = swh_storage.origin_visit_add(
origin_id_or_url,
date=date_visit2)
# then
assert origin_visit1['origin'] == origin_id
assert origin_visit1['visit'] is not None
actual_origin_visits = list(swh_storage.origin_visit_get(
origin_id_or_url))
expected_visits = [
{
'origin': origin_id,
'date': date_visit,
'visit': origin_visit1['visit'],
'type': 'hg',
'status': 'ongoing',
'metadata': None,
'snapshot': None,
},
{
'origin': origin_id,
'date': date_visit2,
'visit': origin_visit2['visit'],
'type': 'hg',
'status': 'ongoing',
'metadata': None,
'snapshot': None,
},
]
for visit in expected_visits:
assert visit in actual_origin_visits
objects = list(swh_storage.journal_writer.objects)
assert ('origin', data.origin2) in objects
for visit in expected_visits:
visit['origin'] = data.origin2
assert ('origin_visit', visit) in objects
def test_origin_visit_add_validation(self, swh_storage):
origin_id_or_url = swh_storage.origin_add_one(data.origin2)
with pytest.raises((TypeError, psycopg2.ProgrammingError)) as cm:
swh_storage.origin_visit_add(origin_id_or_url, date=[b'foo'])
if type(cm.value) == psycopg2.ProgrammingError:
assert cm.value.pgcode \
== psycopg2.errorcodes.UNDEFINED_FUNCTION
@pytest.mark.parametrize('use_url', [True, False])
def test_origin_visit_update(self, swh_storage, use_url):
if not self._test_origin_ids and not use_url:
return
# given
swh_storage.origin_add_one(data.origin)
origin_url = data.origin['url']
date_visit = datetime.datetime.now(datetime.timezone.utc)
origin_visit1 = swh_storage.origin_visit_add(
origin_url,
date=date_visit)
date_visit2 = date_visit + datetime.timedelta(minutes=1)
origin_visit2 = swh_storage.origin_visit_add(
origin_url,
date=date_visit2)
swh_storage.origin_add_one(data.origin2)
origin_url2 = data.origin2['url']
origin_visit3 = swh_storage.origin_visit_add(
origin_url2,
date=date_visit2)
# when
visit1_metadata = {
'contents': 42,
'directories': 22,
}
swh_storage.origin_visit_update(
origin_url,
origin_visit1['visit'], status='full',
metadata=visit1_metadata)
swh_storage.origin_visit_update(
origin_url2,
origin_visit3['visit'], status='partial')
# then
actual_origin_visits = list(swh_storage.origin_visit_get(
origin_url))
expected_visits = [{
'origin': origin_visit2['origin'],
'date': date_visit,
'visit': origin_visit1['visit'],
'type': data.origin['type'],
'status': 'full',
'metadata': visit1_metadata,
'snapshot': None,
}, {
'origin': origin_visit2['origin'],
'date': date_visit2,
'visit': origin_visit2['visit'],
'type': data.origin['type'],
'status': 'ongoing',
'metadata': None,
'snapshot': None,
}]
for visit in expected_visits:
assert visit in actual_origin_visits
actual_origin_visits_bis = list(swh_storage.origin_visit_get(
origin_url,
limit=1))
assert actual_origin_visits_bis == [
{
'origin': origin_visit2['origin'],
'date': date_visit,
'visit': origin_visit1['visit'],
'type': data.origin['type'],
'status': 'full',
'metadata': visit1_metadata,
'snapshot': None,
}]
actual_origin_visits_ter = list(swh_storage.origin_visit_get(
origin_url,
last_visit=origin_visit1['visit']))
assert actual_origin_visits_ter == [
{
'origin': origin_visit2['origin'],
'date': date_visit2,
'visit': origin_visit2['visit'],
'type': data.origin['type'],
'status': 'ongoing',
'metadata': None,
'snapshot': None,
}]
actual_origin_visits2 = list(swh_storage.origin_visit_get(
origin_url2))
assert actual_origin_visits2 == [
{
'origin': origin_visit3['origin'],
'date': date_visit2,
'visit': origin_visit3['visit'],
'type': data.origin2['type'],
'status': 'partial',
'metadata': None,
'snapshot': None,
}]
expected_origin = data.origin.copy()
expected_origin2 = data.origin2.copy()
data1 = {
'origin': expected_origin,
'date': date_visit,
'visit': origin_visit1['visit'],
'type': data.origin['type'],
'status': 'ongoing',
'metadata': None,
'snapshot': None,
}
data2 = {
'origin': expected_origin,
'date': date_visit2,
'visit': origin_visit2['visit'],
'type': data.origin['type'],
'status': 'ongoing',
'metadata': None,
'snapshot': None,
}
data3 = {
'origin': expected_origin2,
'date': date_visit2,
'visit': origin_visit3['visit'],
'type': data.origin2['type'],
'status': 'ongoing',
'metadata': None,
'snapshot': None,
}
data4 = {
'origin': expected_origin,
'date': date_visit,
'visit': origin_visit1['visit'],
'type': data.origin['type'],
'metadata': visit1_metadata,
'status': 'full',
'snapshot': None,
}
data5 = {
'origin': expected_origin2,
'date': date_visit2,
'visit': origin_visit3['visit'],
'type': data.origin2['type'],
'status': 'partial',
'metadata': None,
'snapshot': None,
}
objects = list(swh_storage.journal_writer.objects)
assert ('origin', expected_origin) in objects
assert ('origin', expected_origin2) in objects
assert ('origin_visit', data1) in objects
assert ('origin_visit', data2) in objects
assert ('origin_visit', data3) in objects
assert ('origin_visit', data4) in objects
assert ('origin_visit', data5) in objects
def test_origin_visit_update_validation(self, swh_storage):
origin_id = swh_storage.origin_add_one(data.origin)
visit = swh_storage.origin_visit_add(
origin_id,
date=data.date_visit2)
with pytest.raises((ValueError, psycopg2.DataError),
match='status') as cm:
swh_storage.origin_visit_update(
origin_id, visit['visit'], status='foobar')
if type(cm.value) == psycopg2.DataError:
assert cm.value.pgcode == \
psycopg2.errorcodes.INVALID_TEXT_REPRESENTATION
def test_origin_visit_find_by_date(self, swh_storage):
# given
swh_storage.origin_add_one(data.origin)
swh_storage.origin_visit_add(
data.origin['url'],
date=data.date_visit2)
origin_visit2 = swh_storage.origin_visit_add(
data.origin['url'],
date=data.date_visit3)
origin_visit3 = swh_storage.origin_visit_add(
data.origin['url'],
date=data.date_visit2)
# Simple case
visit = swh_storage.origin_visit_find_by_date(
data.origin['url'], data.date_visit3)
assert visit['visit'] == origin_visit2['visit']
# There are two visits at the same date, the latest must be returned
visit = swh_storage.origin_visit_find_by_date(
data.origin['url'], data.date_visit2)
assert visit['visit'] == origin_visit3['visit']
def test_origin_visit_find_by_date__unknown_origin(self, swh_storage):
swh_storage.origin_visit_find_by_date('foo', data.date_visit2)
@pytest.mark.parametrize('use_url', [True, False])
def test_origin_visit_update_missing_snapshot(self, swh_storage, use_url):
if not self._test_origin_ids and not use_url:
return
# given
origin_id = swh_storage.origin_add_one(data.origin)
origin_id_or_url = data.origin['url'] if use_url else origin_id
origin_visit = swh_storage.origin_visit_add(
origin_id_or_url,
date=data.date_visit1)
# when
swh_storage.origin_visit_update(
origin_id_or_url,
origin_visit['visit'],
snapshot=data.snapshot['id'])
# then
actual_origin_visit = swh_storage.origin_visit_get_by(
origin_id_or_url,
origin_visit['visit'])
assert actual_origin_visit['snapshot'] == data.snapshot['id']
# when
swh_storage.snapshot_add([data.snapshot])
assert actual_origin_visit['snapshot'] == data.snapshot['id']
@pytest.mark.parametrize('use_url', [True, False])
def test_origin_visit_get_by(self, swh_storage, use_url):
if not self._test_origin_ids and not use_url:
return
origin_id = swh_storage.origin_add_one(data.origin)
origin_id2 = swh_storage.origin_add_one(data.origin2)
origin_id_or_url = data.origin['url'] if use_url else origin_id
origin2_id_or_url = data.origin2['url'] if use_url else origin_id2
origin_visit1 = swh_storage.origin_visit_add(
origin_id_or_url,
date=data.date_visit2)
swh_storage.snapshot_add([data.snapshot])
swh_storage.origin_visit_update(
origin_id_or_url,
origin_visit1['visit'],
snapshot=data.snapshot['id'])
# Add some other {origin, visit} entries
swh_storage.origin_visit_add(
origin_id_or_url,
date=data.date_visit3)
swh_storage.origin_visit_add(
origin2_id_or_url,
date=data.date_visit3)
# when
visit1_metadata = {
'contents': 42,
'directories': 22,
}
swh_storage.origin_visit_update(
origin_id_or_url,
origin_visit1['visit'], status='full',
metadata=visit1_metadata)
expected_origin_visit = origin_visit1.copy()
expected_origin_visit.update({
'origin': origin_id,
'visit': origin_visit1['visit'],
'date': data.date_visit2,
'type': data.origin['type'],
'metadata': visit1_metadata,
'status': 'full',
'snapshot': data.snapshot['id'],
})
# when
actual_origin_visit1 = swh_storage.origin_visit_get_by(
origin_id_or_url,
origin_visit1['visit'])
# then
assert actual_origin_visit1 == expected_origin_visit
def test_origin_visit_get_by__unknown_origin(self, swh_storage):
if self._test_origin_ids:
assert swh_storage.origin_visit_get_by(2, 10) is None
assert swh_storage.origin_visit_get_by('foo', 10) is None
@pytest.mark.parametrize('use_url', [True, False])
def test_origin_visit_upsert_new(self, swh_storage, use_url):
if not self._test_origin_ids and not use_url:
return
# given
origin_id = swh_storage.origin_add_one(data.origin2)
origin_url = data.origin2['url']
assert origin_id is not None
# when
swh_storage.origin_visit_upsert([
{
'origin': data.origin2,
'date': data.date_visit2,
'visit': 123,
'type': data.origin2['type'],
'status': 'full',
'metadata': None,
'snapshot': None,
},
{
'origin': data.origin2,
'date': '2018-01-01 23:00:00+00',
'visit': 1234,
'type': data.origin2['type'],
'status': 'full',
'metadata': None,
'snapshot': None,
},
])
# then
actual_origin_visits = list(swh_storage.origin_visit_get(
origin_url))
assert actual_origin_visits == [
{
'origin': origin_id,
'date': data.date_visit2,
'visit': 123,
'type': data.origin2['type'],
'status': 'full',
'metadata': None,
'snapshot': None,
},
{
'origin': origin_id,
'date': data.date_visit3,
'visit': 1234,
'type': data.origin2['type'],
'status': 'full',
'metadata': None,
'snapshot': None,
},
]
expected_origin = data.origin2
data1 = {
'origin': expected_origin,
'date': data.date_visit2,
'visit': 123,
'type': data.origin2['type'],
'status': 'full',
'metadata': None,
'snapshot': None,
}
data2 = {
'origin': expected_origin,
'date': data.date_visit3,
'visit': 1234,
'type': data.origin2['type'],
'status': 'full',
'metadata': None,
'snapshot': None,
}
assert list(swh_storage.journal_writer.objects) == [
('origin', expected_origin),
('origin_visit', data1),
('origin_visit', data2)]
@pytest.mark.parametrize('use_url', [True, False])
def test_origin_visit_upsert_existing(self, swh_storage, use_url):
if not self._test_origin_ids and not use_url:
return
# given
origin_id = swh_storage.origin_add_one(data.origin2)
origin_url = data.origin2['url']
assert origin_id is not None
# when
origin_visit1 = swh_storage.origin_visit_add(
origin_url,
date=data.date_visit2)
swh_storage.origin_visit_upsert([{
'origin': data.origin2,
'date': data.date_visit2,
'visit': origin_visit1['visit'],
'type': data.origin2['type'],
'status': 'full',
'metadata': None,
'snapshot': None,
}])
# then
assert origin_visit1['origin'] == origin_id
assert origin_visit1['visit'] is not None
actual_origin_visits = list(swh_storage.origin_visit_get(
origin_url))
assert actual_origin_visits == [
{
'origin': origin_id,
'date': data.date_visit2,
'visit': origin_visit1['visit'],
'type': data.origin2['type'],
'status': 'full',
'metadata': None,
'snapshot': None,
}]
expected_origin = data.origin2
data1 = {
'origin': expected_origin,
'date': data.date_visit2,
'visit': origin_visit1['visit'],
'type': data.origin2['type'],
'status': 'ongoing',
'metadata': None,
'snapshot': None,
}
data2 = {
'origin': expected_origin,
'date': data.date_visit2,
'visit': origin_visit1['visit'],
'type': data.origin2['type'],
'status': 'full',
'metadata': None,
'snapshot': None,
}
assert list(swh_storage.journal_writer.objects) == [
('origin', expected_origin),
('origin_visit', data1),
('origin_visit', data2)]
def test_origin_visit_get_by_no_result(self, swh_storage):
if self._test_origin_ids:
actual_origin_visit = swh_storage.origin_visit_get_by(
10, 999)
assert actual_origin_visit is None
swh_storage.origin_add([data.origin])
actual_origin_visit = swh_storage.origin_visit_get_by(
data.origin['url'], 999)
assert actual_origin_visit is None
@pytest.mark.parametrize('use_url', [True, False])
def test_origin_visit_get_latest(self, swh_storage, use_url):
if not self._test_origin_ids and not use_url:
return
swh_storage.origin_add_one(data.origin)
origin_url = data.origin['url']
origin_visit1 = swh_storage.origin_visit_add(
origin_url,
data.date_visit1)
visit1_id = origin_visit1['visit']
origin_visit2 = swh_storage.origin_visit_add(
origin_url,
data.date_visit2)
visit2_id = origin_visit2['visit']
# Add a visit with the same date as the previous one
origin_visit3 = swh_storage.origin_visit_add(
origin_url,
data.date_visit2)
visit3_id = origin_visit3['visit']
origin_visit1 = swh_storage.origin_visit_get_by(origin_url, visit1_id)
origin_visit2 = swh_storage.origin_visit_get_by(origin_url, visit2_id)
origin_visit3 = swh_storage.origin_visit_get_by(origin_url, visit3_id)
# Two visits, both with no snapshot
assert origin_visit3 == swh_storage.origin_visit_get_latest(origin_url)
assert swh_storage.origin_visit_get_latest(
origin_url, require_snapshot=True) is None
# Add snapshot to visit1; require_snapshot=True makes it return
# visit1 and require_snapshot=False still returns visit2
swh_storage.snapshot_add([data.complete_snapshot])
swh_storage.origin_visit_update(
origin_url, visit1_id,
snapshot=data.complete_snapshot['id'])
assert {**origin_visit1, 'snapshot': data.complete_snapshot['id']} \
== swh_storage.origin_visit_get_latest(
origin_url, require_snapshot=True)
assert origin_visit3 == swh_storage.origin_visit_get_latest(origin_url)
# Status filter: all three visits are status=ongoing, so no visit
# returned
assert swh_storage.origin_visit_get_latest(
origin_url, allowed_statuses=['full']) is None
# Mark the first visit as completed and check status filter again
swh_storage.origin_visit_update(
origin_url,
visit1_id, status='full')
assert {
**origin_visit1,
'snapshot': data.complete_snapshot['id'],
'status': 'full'} == swh_storage.origin_visit_get_latest(
origin_url, allowed_statuses=['full'])
assert origin_visit3 == swh_storage.origin_visit_get_latest(origin_url)
# Add snapshot to visit2 and check that the new snapshot is returned
swh_storage.snapshot_add([data.empty_snapshot])
swh_storage.origin_visit_update(
origin_url, visit2_id,
snapshot=data.empty_snapshot['id'])
assert {**origin_visit2, 'snapshot': data.empty_snapshot['id']} == \
swh_storage.origin_visit_get_latest(
origin_url, require_snapshot=True)
assert origin_visit3 == swh_storage.origin_visit_get_latest(origin_url)
# Check that the status filter is still working
assert {
**origin_visit1,
'snapshot': data.complete_snapshot['id'],
'status': 'full'} == swh_storage.origin_visit_get_latest(
origin_url, allowed_statuses=['full'])
# Add snapshot to visit3 (same date as visit2)
swh_storage.snapshot_add([data.complete_snapshot])
swh_storage.origin_visit_update(
origin_url, visit3_id, snapshot=data.complete_snapshot['id'])
assert {
**origin_visit1,
'snapshot': data.complete_snapshot['id'],
'status': 'full'} == swh_storage.origin_visit_get_latest(
origin_url, allowed_statuses=['full'])
assert {
**origin_visit1,
'snapshot': data.complete_snapshot['id'],
'status': 'full'} == swh_storage.origin_visit_get_latest(
origin_url, allowed_statuses=['full'], require_snapshot=True)
assert {
**origin_visit3,
'snapshot': data.complete_snapshot['id']
} == swh_storage.origin_visit_get_latest(origin_url)
assert {
**origin_visit3,
'snapshot': data.complete_snapshot['id']
} == swh_storage.origin_visit_get_latest(
origin_url, require_snapshot=True)
def test_person_fullname_unicity(self, swh_storage):
# given (person injection through revisions for example)
revision = data.revision
# create a revision with same committer fullname but wo name and email
revision2 = copy.deepcopy(data.revision2)
revision2['committer'] = dict(revision['committer'])
revision2['committer']['email'] = None
revision2['committer']['name'] = None
swh_storage.revision_add([revision])
swh_storage.revision_add([revision2])
# when getting added revisions
revisions = list(
swh_storage.revision_get([revision['id'], revision2['id']]))
# then
# check committers are the same
assert revisions[0]['committer'] == revisions[1]['committer']
def test_snapshot_add_get_empty(self, swh_storage):
origin_id = swh_storage.origin_add_one(data.origin)
origin_visit1 = swh_storage.origin_visit_add(
origin_id, data.date_visit1)
visit_id = origin_visit1['visit']
actual_result = swh_storage.snapshot_add([data.empty_snapshot])
assert actual_result == {'snapshot:add': 1}
swh_storage.origin_visit_update(
origin_id, visit_id, snapshot=data.empty_snapshot['id'])
by_id = swh_storage.snapshot_get(data.empty_snapshot['id'])
assert by_id == {**data.empty_snapshot, 'next_branch': None}
by_ov = swh_storage.snapshot_get_by_origin_visit(origin_id, visit_id)
assert by_ov == {**data.empty_snapshot, 'next_branch': None}
expected_origin = data.origin.copy()
data1 = {
'origin': expected_origin,
'date': data.date_visit1,
'visit': origin_visit1['visit'],
'type': data.origin['type'],
'status': 'ongoing',
'metadata': None,
'snapshot': None,
}
data2 = {
'origin': expected_origin,
'date': data.date_visit1,
'visit': origin_visit1['visit'],
'type': data.origin['type'],
'status': 'ongoing',
'metadata': None,
'snapshot': data.empty_snapshot['id'],
}
assert list(swh_storage.journal_writer.objects) == \
[('origin', expected_origin),
('origin_visit', data1),
('snapshot', data.empty_snapshot),
('origin_visit', data2)]
def test_snapshot_add_get_complete(self, swh_storage):
origin_id = swh_storage.origin_add_one(data.origin)
origin_visit1 = swh_storage.origin_visit_add(
origin_id, data.date_visit1)
visit_id = origin_visit1['visit']
actual_result = swh_storage.snapshot_add([data.complete_snapshot])
swh_storage.origin_visit_update(
origin_id, visit_id, snapshot=data.complete_snapshot['id'])
assert actual_result == {'snapshot:add': 1}
by_id = swh_storage.snapshot_get(data.complete_snapshot['id'])
assert by_id == {**data.complete_snapshot, 'next_branch': None}
by_ov = swh_storage.snapshot_get_by_origin_visit(origin_id, visit_id)
assert by_ov == {**data.complete_snapshot, 'next_branch': None}
def test_snapshot_add_many(self, swh_storage):
actual_result = swh_storage.snapshot_add(
[data.snapshot, data.complete_snapshot])
assert actual_result == {'snapshot:add': 2}
assert {**data.complete_snapshot, 'next_branch': None} \
== swh_storage.snapshot_get(data.complete_snapshot['id'])
assert {**data.snapshot, 'next_branch': None} \
== swh_storage.snapshot_get(data.snapshot['id'])
def test_snapshot_add_many_incremental(self, swh_storage):
actual_result = swh_storage.snapshot_add([data.complete_snapshot])
assert actual_result == {'snapshot:add': 1}
actual_result2 = swh_storage.snapshot_add(
[data.snapshot, data.complete_snapshot])
assert actual_result2 == {'snapshot:add': 1}
assert {**data.complete_snapshot, 'next_branch': None} \
== swh_storage.snapshot_get(data.complete_snapshot['id'])
assert {**data.snapshot, 'next_branch': None} \
== swh_storage.snapshot_get(data.snapshot['id'])
def test_snapshot_add_twice(self, swh_storage):
actual_result = swh_storage.snapshot_add([data.empty_snapshot])
assert actual_result == {'snapshot:add': 1}
assert list(swh_storage.journal_writer.objects) \
== [('snapshot', data.empty_snapshot)]
actual_result = swh_storage.snapshot_add([data.snapshot])
assert actual_result == {'snapshot:add': 1}
assert list(swh_storage.journal_writer.objects) \
== [('snapshot', data.empty_snapshot),
('snapshot', data.snapshot)]
def test_snapshot_add_validation(self, swh_storage):
snap = copy.deepcopy(data.snapshot)
snap['branches'][b'foo'] = {'target_type': 'revision'}
with pytest.raises(KeyError, match='target'):
swh_storage.snapshot_add([snap])
snap = copy.deepcopy(data.snapshot)
snap['branches'][b'foo'] = {'target': b'\x42'*20}
with pytest.raises(KeyError, match='target_type'):
swh_storage.snapshot_add([snap])
def test_snapshot_add_count_branches(self, swh_storage):
origin_id = swh_storage.origin_add_one(data.origin)
origin_visit1 = swh_storage.origin_visit_add(
origin_id, data.date_visit1)
visit_id = origin_visit1['visit']
actual_result = swh_storage.snapshot_add([data.complete_snapshot])
swh_storage.origin_visit_update(
origin_id, visit_id, snapshot=data.complete_snapshot['id'])
assert actual_result == {'snapshot:add': 1}
snp_id = data.complete_snapshot['id']
snp_size = swh_storage.snapshot_count_branches(snp_id)
expected_snp_size = {
'alias': 1,
'content': 1,
'directory': 2,
'release': 1,
'revision': 1,
'snapshot': 1,
None: 1
}
assert snp_size == expected_snp_size
def test_snapshot_add_get_paginated(self, swh_storage):
origin_id = swh_storage.origin_add_one(data.origin)
origin_visit1 = swh_storage.origin_visit_add(
origin_id, data.date_visit1)
visit_id = origin_visit1['visit']
swh_storage.snapshot_add([data.complete_snapshot])
swh_storage.origin_visit_update(
origin_id, visit_id,
snapshot=data.complete_snapshot['id'])
snp_id = data.complete_snapshot['id']
branches = data.complete_snapshot['branches']
branch_names = list(sorted(branches))
# Test branch_from
snapshot = swh_storage.snapshot_get_branches(
snp_id, branches_from=b'release')
rel_idx = branch_names.index(b'release')
expected_snapshot = {
'id': snp_id,
'branches': {
name: branches[name]
for name in branch_names[rel_idx:]
},
'next_branch': None,
}
assert snapshot == expected_snapshot
# Test branches_count
snapshot = swh_storage.snapshot_get_branches(
snp_id, branches_count=1)
expected_snapshot = {
'id': snp_id,
'branches': {
branch_names[0]: branches[branch_names[0]],
},
'next_branch': b'content',
}
assert snapshot == expected_snapshot
# test branch_from + branches_count
snapshot = swh_storage.snapshot_get_branches(
snp_id, branches_from=b'directory', branches_count=3)
dir_idx = branch_names.index(b'directory')
expected_snapshot = {
'id': snp_id,
'branches': {
name: branches[name]
for name in branch_names[dir_idx:dir_idx + 3]
},
'next_branch': branch_names[dir_idx + 3],
}
assert snapshot == expected_snapshot
def test_snapshot_add_get_filtered(self, swh_storage):
origin_id = swh_storage.origin_add_one(data.origin)
origin_visit1 = swh_storage.origin_visit_add(
origin_id, data.date_visit1)
visit_id = origin_visit1['visit']
swh_storage.snapshot_add([data.complete_snapshot])
swh_storage.origin_visit_update(
origin_id, visit_id, snapshot=data.complete_snapshot['id'])
snp_id = data.complete_snapshot['id']
branches = data.complete_snapshot['branches']
snapshot = swh_storage.snapshot_get_branches(
snp_id, target_types=['release', 'revision'])
expected_snapshot = {
'id': snp_id,
'branches': {
name: tgt
for name, tgt in branches.items()
if tgt and tgt['target_type'] in ['release', 'revision']
},
'next_branch': None,
}
assert snapshot == expected_snapshot
snapshot = swh_storage.snapshot_get_branches(
snp_id, target_types=['alias'])
expected_snapshot = {
'id': snp_id,
'branches': {
name: tgt
for name, tgt in branches.items()
if tgt and tgt['target_type'] == 'alias'
},
'next_branch': None,
}
assert snapshot == expected_snapshot
def test_snapshot_add_get_filtered_and_paginated(self, swh_storage):
origin_id = swh_storage.origin_add_one(data.origin)
origin_visit1 = swh_storage.origin_visit_add(
origin_id, data.date_visit1)
visit_id = origin_visit1['visit']
swh_storage.snapshot_add([data.complete_snapshot])
swh_storage.origin_visit_update(
origin_id, visit_id, snapshot=data.complete_snapshot['id'])
snp_id = data.complete_snapshot['id']
branches = data.complete_snapshot['branches']
branch_names = list(sorted(branches))
# Test branch_from
snapshot = swh_storage.snapshot_get_branches(
snp_id, target_types=['directory', 'release'],
branches_from=b'directory2')
expected_snapshot = {
'id': snp_id,
'branches': {
name: branches[name]
for name in (b'directory2', b'release')
},
'next_branch': None,
}
assert snapshot == expected_snapshot
# Test branches_count
snapshot = swh_storage.snapshot_get_branches(
snp_id, target_types=['directory', 'release'],
branches_count=1)
expected_snapshot = {
'id': snp_id,
'branches': {
b'directory': branches[b'directory']
},
'next_branch': b'directory2',
}
assert snapshot == expected_snapshot
# Test branches_count
snapshot = swh_storage.snapshot_get_branches(
snp_id, target_types=['directory', 'release'],
branches_count=2)
expected_snapshot = {
'id': snp_id,
'branches': {
name: branches[name]
for name in (b'directory', b'directory2')
},
'next_branch': b'release',
}
assert snapshot == expected_snapshot
# test branch_from + branches_count
snapshot = swh_storage.snapshot_get_branches(
snp_id, target_types=['directory', 'release'],
branches_from=b'directory2', branches_count=1)
dir_idx = branch_names.index(b'directory2')
expected_snapshot = {
'id': snp_id,
'branches': {
branch_names[dir_idx]: branches[branch_names[dir_idx]],
},
'next_branch': b'release',
}
assert snapshot == expected_snapshot
def test_snapshot_add_get(self, swh_storage):
origin_id = swh_storage.origin_add_one(data.origin)
origin_visit1 = swh_storage.origin_visit_add(
origin_id, data.date_visit1)
visit_id = origin_visit1['visit']
swh_storage.snapshot_add([data.snapshot])
swh_storage.origin_visit_update(
origin_id, visit_id, snapshot=data.snapshot['id'])
by_id = swh_storage.snapshot_get(data.snapshot['id'])
assert by_id == {**data.snapshot, 'next_branch': None}
by_ov = swh_storage.snapshot_get_by_origin_visit(origin_id, visit_id)
assert by_ov == {**data.snapshot, 'next_branch': None}
origin_visit_info = swh_storage.origin_visit_get_by(
origin_id, visit_id)
assert origin_visit_info['snapshot'] == data.snapshot['id']
def test_snapshot_add_nonexistent_visit(self, swh_storage):
origin_id = swh_storage.origin_add_one(data.origin)
visit_id = 54164461156
swh_storage.journal_writer.objects[:] = []
swh_storage.snapshot_add([data.snapshot])
with pytest.raises(ValueError):
swh_storage.origin_visit_update(
origin_id, visit_id, snapshot=data.snapshot['id'])
assert list(swh_storage.journal_writer.objects) == [
('snapshot', data.snapshot)]
def test_snapshot_add_twice__by_origin_visit(self, swh_storage):
origin_id = swh_storage.origin_add_one(data.origin)
origin_visit1 = swh_storage.origin_visit_add(
origin_id, data.date_visit1)
visit1_id = origin_visit1['visit']
swh_storage.snapshot_add([data.snapshot])
swh_storage.origin_visit_update(
origin_id, visit1_id, snapshot=data.snapshot['id'])
by_ov1 = swh_storage.snapshot_get_by_origin_visit(
origin_id, visit1_id)
assert by_ov1 == {**data.snapshot, 'next_branch': None}
origin_visit2 = swh_storage.origin_visit_add(
origin_id, data.date_visit2)
visit2_id = origin_visit2['visit']
swh_storage.snapshot_add([data.snapshot])
swh_storage.origin_visit_update(
origin_id, visit2_id, snapshot=data.snapshot['id'])
by_ov2 = swh_storage.snapshot_get_by_origin_visit(
origin_id, visit2_id)
assert by_ov2 == {**data.snapshot, 'next_branch': None}
expected_origin = data.origin.copy()
data1 = {
'origin': expected_origin,
'date': data.date_visit1,
'visit': origin_visit1['visit'],
'type': data.origin['type'],
'status': 'ongoing',
'metadata': None,
'snapshot': None,
}
data2 = {
'origin': expected_origin,
'date': data.date_visit1,
'visit': origin_visit1['visit'],
'type': data.origin['type'],
'status': 'ongoing',
'metadata': None,
'snapshot': data.snapshot['id'],
}
data3 = {
'origin': expected_origin,
'date': data.date_visit2,
'visit': origin_visit2['visit'],
'type': data.origin['type'],
'status': 'ongoing',
'metadata': None,
'snapshot': None,
}
data4 = {
'origin': expected_origin,
'date': data.date_visit2,
'visit': origin_visit2['visit'],
'type': data.origin['type'],
'status': 'ongoing',
'metadata': None,
'snapshot': data.snapshot['id'],
}
assert list(swh_storage.journal_writer.objects) \
== [('origin', expected_origin),
('origin_visit', data1),
('snapshot', data.snapshot),
('origin_visit', data2),
('origin_visit', data3),
('origin_visit', data4)]
@pytest.mark.parametrize('use_url', [True, False])
def test_snapshot_get_latest(self, swh_storage, use_url):
if not self._test_origin_ids and not use_url:
return
origin_id = swh_storage.origin_add_one(data.origin)
origin_url = data.origin['url']
origin_visit1 = swh_storage.origin_visit_add(
origin_id, data.date_visit1)
visit1_id = origin_visit1['visit']
origin_visit2 = swh_storage.origin_visit_add(
origin_id, data.date_visit2)
visit2_id = origin_visit2['visit']
# Add a visit with the same date as the previous one
origin_visit3 = swh_storage.origin_visit_add(
origin_id, data.date_visit2)
visit3_id = origin_visit3['visit']
# Two visits, both with no snapshot: latest snapshot is None
assert swh_storage.snapshot_get_latest(origin_url) is None
# Add snapshot to visit1, latest snapshot = visit 1 snapshot
swh_storage.snapshot_add([data.complete_snapshot])
swh_storage.origin_visit_update(
origin_id, visit1_id, snapshot=data.complete_snapshot['id'])
assert {**data.complete_snapshot, 'next_branch': None} \
== swh_storage.snapshot_get_latest(origin_url)
# Status filter: all three visits are status=ongoing, so no snapshot
# returned
assert swh_storage.snapshot_get_latest(
origin_url,
allowed_statuses=['full']) is None
# Mark the first visit as completed and check status filter again
swh_storage.origin_visit_update(origin_id, visit1_id, status='full')
assert {**data.complete_snapshot, 'next_branch': None} \
== swh_storage.snapshot_get_latest(
origin_url,
allowed_statuses=['full'])
# Add snapshot to visit2 and check that the new snapshot is returned
swh_storage.snapshot_add([data.empty_snapshot])
swh_storage.origin_visit_update(
origin_id, visit2_id, snapshot=data.empty_snapshot['id'])
assert {**data.empty_snapshot, 'next_branch': None} \
== swh_storage.snapshot_get_latest(origin_id)
# Check that the status filter is still working
assert {**data.complete_snapshot, 'next_branch': None} \
== swh_storage.snapshot_get_latest(
origin_url,
allowed_statuses=['full'])
# Add snapshot to visit3 (same date as visit2) and check that
# the new snapshot is returned
swh_storage.snapshot_add([data.complete_snapshot])
swh_storage.origin_visit_update(
origin_id, visit3_id, snapshot=data.complete_snapshot['id'])
assert {**data.complete_snapshot, 'next_branch': None} \
== swh_storage.snapshot_get_latest(origin_url)
@pytest.mark.parametrize('use_url', [True, False])
def test_snapshot_get_latest__missing_snapshot(self, swh_storage, use_url):
if not self._test_origin_ids and not use_url:
return
# Origin does not exist
origin_url = data.origin['url']
assert swh_storage.snapshot_get_latest(origin_url) is None
swh_storage.origin_add_one(data.origin)
origin_visit1 = swh_storage.origin_visit_add(
origin_url,
data.date_visit1)
visit1_id = origin_visit1['visit']
origin_visit2 = swh_storage.origin_visit_add(
origin_url,
data.date_visit2)
visit2_id = origin_visit2['visit']
# Two visits, both with no snapshot: latest snapshot is None
assert swh_storage.snapshot_get_latest(origin_url) is None
# Add unknown snapshot to visit1, check that the inconsistency is
# detected
swh_storage.origin_visit_update(
origin_url,
visit1_id, snapshot=data.complete_snapshot['id'])
with pytest.raises(ValueError):
swh_storage.snapshot_get_latest(
origin_url)
# Status filter: both visits are status=ongoing, so no snapshot
# returned
assert swh_storage.snapshot_get_latest(
origin_url,
allowed_statuses=['full']) is None
# Mark the first visit as completed and check status filter again
swh_storage.origin_visit_update(
origin_url,
visit1_id, status='full')
with pytest.raises(ValueError):
swh_storage.snapshot_get_latest(
origin_url,
allowed_statuses=['full']),
# Actually add the snapshot and check status filter again
swh_storage.snapshot_add([data.complete_snapshot])
assert {**data.complete_snapshot, 'next_branch': None} \
== swh_storage.snapshot_get_latest(origin_url)
# Add unknown snapshot to visit2 and check that the inconsistency
# is detected
swh_storage.origin_visit_update(
origin_url,
visit2_id, snapshot=data.snapshot['id'])
with pytest.raises(ValueError):
swh_storage.snapshot_get_latest(
origin_url)
# Actually add that snapshot and check that the new one is returned
swh_storage.snapshot_add([data.snapshot])
assert{**data.snapshot, 'next_branch': None} \
== swh_storage.snapshot_get_latest(origin_url)
def test_stat_counters(self, swh_storage):
expected_keys = ['content', 'directory',
'origin', 'revision']
# Initially, all counters are 0
swh_storage.refresh_stat_counters()
counters = swh_storage.stat_counters()
assert set(expected_keys) <= set(counters)
for key in expected_keys:
assert counters[key] == 0
# Add a content. Only the content counter should increase.
swh_storage.content_add([data.cont])
swh_storage.refresh_stat_counters()
counters = swh_storage.stat_counters()
assert set(expected_keys) <= set(counters)
for key in expected_keys:
if key != 'content':
assert counters[key] == 0
assert counters['content'] == 1
# Add other objects. Check their counter increased as well.
swh_storage.origin_add_one(data.origin2)
origin_visit1 = swh_storage.origin_visit_add(
data.origin2['url'], date=data.date_visit2)
swh_storage.snapshot_add([data.snapshot])
swh_storage.origin_visit_update(
data.origin2['url'], origin_visit1['visit'],
snapshot=data.snapshot['id'])
swh_storage.directory_add([data.dir])
swh_storage.revision_add([data.revision])
swh_storage.release_add([data.release])
swh_storage.refresh_stat_counters()
counters = swh_storage.stat_counters()
assert counters['content'] == 1
assert counters['directory'] == 1
assert counters['snapshot'] == 1
assert counters['origin'] == 1
assert counters['origin_visit'] == 1
assert counters['revision'] == 1
assert counters['release'] == 1
assert counters['snapshot'] == 1
if 'person' in counters:
assert counters['person'] == 3
def test_content_find_ctime(self, swh_storage):
cont = data.cont.copy()
del cont['data']
now = datetime.datetime.now(tz=datetime.timezone.utc)
cont['ctime'] = now
swh_storage.content_add_metadata([cont])
actually_present = swh_storage.content_find({'sha1': cont['sha1']})
# check ctime up to one second
dt = actually_present[0]['ctime'] - now
assert abs(dt.total_seconds()) <= 1
del actually_present[0]['ctime']
assert actually_present[0] == {
'sha1': cont['sha1'],
'sha256': cont['sha256'],
'sha1_git': cont['sha1_git'],
'blake2s256': cont['blake2s256'],
'length': cont['length'],
'status': 'visible'
}
def test_content_find_with_present_content(self, swh_storage):
# 1. with something to find
cont = data.cont
swh_storage.content_add([cont, data.cont2])
actually_present = swh_storage.content_find(
{'sha1': cont['sha1']}
)
assert 1 == len(actually_present)
actually_present[0].pop('ctime')
assert actually_present[0] == {
'sha1': cont['sha1'],
'sha256': cont['sha256'],
'sha1_git': cont['sha1_git'],
'blake2s256': cont['blake2s256'],
'length': cont['length'],
'status': 'visible'
}
# 2. with something to find
actually_present = swh_storage.content_find(
{'sha1_git': cont['sha1_git']})
assert 1 == len(actually_present)
actually_present[0].pop('ctime')
assert actually_present[0] == {
'sha1': cont['sha1'],
'sha256': cont['sha256'],
'sha1_git': cont['sha1_git'],
'blake2s256': cont['blake2s256'],
'length': cont['length'],
'status': 'visible'
}
# 3. with something to find
actually_present = swh_storage.content_find(
{'sha256': cont['sha256']})
assert 1 == len(actually_present)
actually_present[0].pop('ctime')
assert actually_present[0] == {
'sha1': cont['sha1'],
'sha256': cont['sha256'],
'sha1_git': cont['sha1_git'],
'blake2s256': cont['blake2s256'],
'length': cont['length'],
'status': 'visible'
}
# 4. with something to find
actually_present = swh_storage.content_find({
'sha1': cont['sha1'],
'sha1_git': cont['sha1_git'],
'sha256': cont['sha256'],
'blake2s256': cont['blake2s256'],
})
assert 1 == len(actually_present)
actually_present[0].pop('ctime')
assert actually_present[0] == {
'sha1': cont['sha1'],
'sha256': cont['sha256'],
'sha1_git': cont['sha1_git'],
'blake2s256': cont['blake2s256'],
'length': cont['length'],
'status': 'visible'
}
def test_content_find_with_non_present_content(self, swh_storage):
# 1. with something that does not exist
missing_cont = data.missing_cont
actually_present = swh_storage.content_find(
{'sha1': missing_cont['sha1']})
assert actually_present == []
# 2. with something that does not exist
actually_present = swh_storage.content_find(
{'sha1_git': missing_cont['sha1_git']})
assert actually_present == []
# 3. with something that does not exist
actually_present = swh_storage.content_find(
{'sha256': missing_cont['sha256']})
assert actually_present == []
def test_content_find_with_duplicate_input(self, swh_storage):
cont1 = data.cont
duplicate_cont = cont1.copy()
# Create fake data with colliding sha256 and blake2s256
sha1_array = bytearray(duplicate_cont['sha1'])
sha1_array[0] += 1
duplicate_cont['sha1'] = bytes(sha1_array)
sha1git_array = bytearray(duplicate_cont['sha1_git'])
sha1git_array[0] += 1
duplicate_cont['sha1_git'] = bytes(sha1git_array)
# Inject the data
swh_storage.content_add([cont1, duplicate_cont])
finder = {'blake2s256': duplicate_cont['blake2s256'],
'sha256': duplicate_cont['sha256']}
actual_result = list(swh_storage.content_find(finder))
cont1.pop('data')
duplicate_cont.pop('data')
actual_result[0].pop('ctime')
actual_result[1].pop('ctime')
expected_result = [
cont1, duplicate_cont
]
for result in expected_result:
assert result in actual_result
def test_content_find_with_duplicate_sha256(self, swh_storage):
cont1 = data.cont
duplicate_cont = cont1.copy()
# Create fake data with colliding sha256
for hashalgo in ('sha1', 'sha1_git', 'blake2s256'):
value = bytearray(duplicate_cont[hashalgo])
value[0] += 1
duplicate_cont[hashalgo] = bytes(value)
swh_storage.content_add([cont1, duplicate_cont])
finder = {
'sha256': duplicate_cont['sha256']
}
actual_result = list(swh_storage.content_find(finder))
assert len(actual_result) == 2
cont1.pop('data')
duplicate_cont.pop('data')
actual_result[0].pop('ctime')
actual_result[1].pop('ctime')
expected_result = [
cont1, duplicate_cont
]
assert expected_result == sorted(actual_result,
key=lambda x: x['sha1'])
# Find with both sha256 and blake2s256
finder = {
'sha256': duplicate_cont['sha256'],
'blake2s256': duplicate_cont['blake2s256']
}
actual_result = list(swh_storage.content_find(finder))
assert len(actual_result) == 1
actual_result[0].pop('ctime')
expected_result = [duplicate_cont]
assert actual_result[0] == duplicate_cont
def test_content_find_with_duplicate_blake2s256(self, swh_storage):
cont1 = data.cont
duplicate_cont = cont1.copy()
# Create fake data with colliding sha256 and blake2s256
sha1_array = bytearray(duplicate_cont['sha1'])
sha1_array[0] += 1
duplicate_cont['sha1'] = bytes(sha1_array)
sha1git_array = bytearray(duplicate_cont['sha1_git'])
sha1git_array[0] += 1
duplicate_cont['sha1_git'] = bytes(sha1git_array)
sha256_array = bytearray(duplicate_cont['sha256'])
sha256_array[0] += 1
duplicate_cont['sha256'] = bytes(sha256_array)
swh_storage.content_add([cont1, duplicate_cont])
finder = {
'blake2s256': duplicate_cont['blake2s256']
}
actual_result = list(swh_storage.content_find(finder))
cont1.pop('data')
duplicate_cont.pop('data')
actual_result[0].pop('ctime')
actual_result[1].pop('ctime')
expected_result = [
cont1, duplicate_cont
]
for result in expected_result:
assert result in actual_result
# Find with both sha256 and blake2s256
finder = {
'sha256': duplicate_cont['sha256'],
'blake2s256': duplicate_cont['blake2s256']
}
actual_result = list(swh_storage.content_find(finder))
actual_result[0].pop('ctime')
expected_result = [
duplicate_cont
]
assert expected_result == actual_result
def test_content_find_bad_input(self, swh_storage):
# 1. with bad input
with pytest.raises(ValueError):
swh_storage.content_find({}) # empty is bad
# 2. with bad input
with pytest.raises(ValueError):
swh_storage.content_find(
{'unknown-sha1': 'something'}) # not the right key
def test_object_find_by_sha1_git(self, swh_storage):
sha1_gits = [b'00000000000000000000']
expected = {
b'00000000000000000000': [],
}
swh_storage.content_add([data.cont])
sha1_gits.append(data.cont['sha1_git'])
expected[data.cont['sha1_git']] = [{
'sha1_git': data.cont['sha1_git'],
'type': 'content',
'id': data.cont['sha1'],
}]
swh_storage.directory_add([data.dir])
sha1_gits.append(data.dir['id'])
expected[data.dir['id']] = [{
'sha1_git': data.dir['id'],
'type': 'directory',
'id': data.dir['id'],
}]
swh_storage.revision_add([data.revision])
sha1_gits.append(data.revision['id'])
expected[data.revision['id']] = [{
'sha1_git': data.revision['id'],
'type': 'revision',
'id': data.revision['id'],
}]
swh_storage.release_add([data.release])
sha1_gits.append(data.release['id'])
expected[data.release['id']] = [{
'sha1_git': data.release['id'],
'type': 'release',
'id': data.release['id'],
}]
ret = swh_storage.object_find_by_sha1_git(sha1_gits)
for val in ret.values():
for obj in val:
if 'object_id' in obj:
del obj['object_id']
assert expected == ret
def test_tool_add(self, swh_storage):
tool = {
'name': 'some-unknown-tool',
'version': 'some-version',
'configuration': {"debian-package": "some-package"},
}
actual_tool = swh_storage.tool_get(tool)
assert actual_tool is None # does not exist
# add it
actual_tools = swh_storage.tool_add([tool])
assert len(actual_tools) == 1
actual_tool = actual_tools[0]
assert actual_tool is not None # now it exists
new_id = actual_tool.pop('id')
assert actual_tool == tool
actual_tools2 = swh_storage.tool_add([tool])
actual_tool2 = actual_tools2[0]
assert actual_tool2 is not None # now it exists
new_id2 = actual_tool2.pop('id')
assert new_id == new_id2
assert actual_tool == actual_tool2
def test_tool_add_multiple(self, swh_storage):
tool = {
'name': 'some-unknown-tool',
'version': 'some-version',
'configuration': {"debian-package": "some-package"},
}
actual_tools = list(swh_storage.tool_add([tool]))
assert len(actual_tools) == 1
new_tools = [tool, {
'name': 'yet-another-tool',
'version': 'version',
'configuration': {},
}]
actual_tools = swh_storage.tool_add(new_tools)
assert len(actual_tools) == 2
# order not guaranteed, so we iterate over results to check
for tool in actual_tools:
_id = tool.pop('id')
assert _id is not None
assert tool in new_tools
def test_tool_get_missing(self, swh_storage):
tool = {
'name': 'unknown-tool',
'version': '3.1.0rc2-31-ga2cbb8c',
'configuration': {"command_line": "nomossa "},
}
actual_tool = swh_storage.tool_get(tool)
assert actual_tool is None
def test_tool_metadata_get_missing_context(self, swh_storage):
tool = {
'name': 'swh-metadata-translator',
'version': '0.0.1',
'configuration': {"context": "unknown-context"},
}
actual_tool = swh_storage.tool_get(tool)
assert actual_tool is None
def test_tool_metadata_get(self, swh_storage):
tool = {
'name': 'swh-metadata-translator',
'version': '0.0.1',
'configuration': {"type": "local", "context": "npm"},
}
expected_tool = swh_storage.tool_add([tool])[0]
# when
actual_tool = swh_storage.tool_get(tool)
# then
assert expected_tool == actual_tool
def test_metadata_provider_get(self, swh_storage):
# given
no_provider = swh_storage.metadata_provider_get(6459456445615)
assert no_provider is None
# when
provider_id = swh_storage.metadata_provider_add(
data.provider['name'],
data.provider['type'],
data.provider['url'],
data.provider['metadata'])
actual_provider = swh_storage.metadata_provider_get(provider_id)
expected_provider = {
'provider_name': data.provider['name'],
'provider_url': data.provider['url']
}
# then
del actual_provider['id']
assert actual_provider, expected_provider
def test_metadata_provider_get_by(self, swh_storage):
# given
no_provider = swh_storage.metadata_provider_get_by({
'provider_name': data.provider['name'],
'provider_url': data.provider['url']
})
assert no_provider is None
# when
provider_id = swh_storage.metadata_provider_add(
data.provider['name'],
data.provider['type'],
data.provider['url'],
data.provider['metadata'])
actual_provider = swh_storage.metadata_provider_get_by({
'provider_name': data.provider['name'],
'provider_url': data.provider['url']
})
# then
assert provider_id, actual_provider['id']
@pytest.mark.parametrize('use_url', [True, False])
def test_origin_metadata_add(self, swh_storage, use_url):
if not self._test_origin_ids:
pytest.skip('requires origin id')
# given
origin = swh_storage.origin_add([data.origin])[0]
tools = swh_storage.tool_add([data.metadata_tool])
tool = tools[0]
swh_storage.metadata_provider_add(
data.provider['name'],
data.provider['type'],
data.provider['url'],
data.provider['metadata'])
provider = swh_storage.metadata_provider_get_by({
'provider_name': data.provider['name'],
'provider_url': data.provider['url']
})
# when adding for the same origin 2 metadatas
origin = origin['url' if use_url else 'id']
n_om = len(list(swh_storage.origin_metadata_get_by(origin)))
swh_storage.origin_metadata_add(
origin,
data.origin_metadata['discovery_date'],
provider['id'],
tool['id'],
data.origin_metadata['metadata'])
swh_storage.origin_metadata_add(
origin,
'2015-01-01 23:00:00+00',
provider['id'],
tool['id'],
data.origin_metadata2['metadata'])
n_actual_om = len(list(swh_storage.origin_metadata_get_by(origin)))
# then
assert n_actual_om == n_om + 2
def test_origin_metadata_get(self, swh_storage):
if not self._test_origin_ids:
pytest.skip('requires origin id')
# given
origin_id = swh_storage.origin_add([data.origin])[0]['id']
origin_id2 = swh_storage.origin_add([data.origin2])[0]['id']
swh_storage.metadata_provider_add(data.provider['name'],
data.provider['type'],
data.provider['url'],
data.provider['metadata'])
provider = swh_storage.metadata_provider_get_by({
'provider_name': data.provider['name'],
'provider_url': data.provider['url']
})
tool = swh_storage.tool_add([data.metadata_tool])[0]
# when adding for the same origin 2 metadatas
swh_storage.origin_metadata_add(
origin_id,
data.origin_metadata['discovery_date'],
provider['id'],
tool['id'],
data.origin_metadata['metadata'])
swh_storage.origin_metadata_add(
origin_id2,
data.origin_metadata2['discovery_date'],
provider['id'],
tool['id'],
data.origin_metadata2['metadata'])
swh_storage.origin_metadata_add(
origin_id,
data.origin_metadata2['discovery_date'],
provider['id'],
tool['id'],
data.origin_metadata2['metadata'])
all_metadatas = list(sorted(swh_storage.origin_metadata_get_by(
origin_id), key=lambda x: x['discovery_date']))
metadatas_for_origin2 = list(swh_storage.origin_metadata_get_by(
origin_id2))
expected_results = [{
'origin_id': origin_id,
'discovery_date': datetime.datetime(
2015, 1, 1, 23, 0,
tzinfo=datetime.timezone.utc),
'metadata': {
'name': 'test_origin_metadata',
'version': '0.0.1'
},
'provider_id': provider['id'],
'provider_name': 'hal',
'provider_type': 'deposit-client',
'provider_url': 'http:///hal/inria',
'tool_id': tool['id']
}, {
'origin_id': origin_id,
'discovery_date': datetime.datetime(
2017, 1, 1, 23, 0,
tzinfo=datetime.timezone.utc),
'metadata': {
'name': 'test_origin_metadata',
'version': '0.0.1'
},
'provider_id': provider['id'],
'provider_name': 'hal',
'provider_type': 'deposit-client',
'provider_url': 'http:///hal/inria',
'tool_id': tool['id']
}]
# then
assert len(all_metadatas) == 2
assert len(metadatas_for_origin2) == 1
assert all_metadatas == expected_results
def test_metadata_provider_add(self, swh_storage):
provider = {
'provider_name': 'swMATH',
'provider_type': 'registry',
'provider_url': 'http://www.swmath.org/',
'metadata': {
'email': 'contact@swmath.org',
'license': 'All rights reserved'
}
}
provider['id'] = provider_id = swh_storage.metadata_provider_add(
**provider)
assert provider == swh_storage.metadata_provider_get_by(
{'provider_name': 'swMATH',
'provider_url': 'http://www.swmath.org/'})
assert provider == swh_storage.metadata_provider_get(provider_id)
def test_origin_metadata_get_by_provider_type(self, swh_storage):
# given
if not self._test_origin_ids:
pytest.skip('reauires origin id')
origin_id = swh_storage.origin_add([data.origin])[0]['id']
origin_id2 = swh_storage.origin_add([data.origin2])[0]['id']
provider1_id = swh_storage.metadata_provider_add(
data.provider['name'],
data.provider['type'],
data.provider['url'],
data.provider['metadata'])
provider1 = swh_storage.metadata_provider_get_by({
'provider_name': data.provider['name'],
'provider_url': data.provider['url']
})
assert provider1 == swh_storage.metadata_provider_get(provider1_id)
provider2_id = swh_storage.metadata_provider_add(
'swMATH',
'registry',
'http://www.swmath.org/',
{'email': 'contact@swmath.org',
'license': 'All rights reserved'})
provider2 = swh_storage.metadata_provider_get_by({
'provider_name': 'swMATH',
'provider_url': 'http://www.swmath.org/'
})
assert provider2 == swh_storage.metadata_provider_get(provider2_id)
# using the only tool now inserted in the data.sql, but for this
# provider should be a crawler tool (not yet implemented)
tool = swh_storage.tool_add([data.metadata_tool])[0]
# when adding for the same origin 2 metadatas
swh_storage.origin_metadata_add(
origin_id,
data.origin_metadata['discovery_date'],
provider1['id'],
tool['id'],
data.origin_metadata['metadata'])
swh_storage.origin_metadata_add(
origin_id2,
data.origin_metadata2['discovery_date'],
provider2['id'],
tool['id'],
data.origin_metadata2['metadata'])
provider_type = 'registry'
m_by_provider = list(swh_storage.origin_metadata_get_by(
origin_id2,
provider_type))
for item in m_by_provider:
if 'id' in item:
del item['id']
expected_results = [{
'origin_id': origin_id2,
'discovery_date': datetime.datetime(
2017, 1, 1, 23, 0,
tzinfo=datetime.timezone.utc),
'metadata': {
'name': 'test_origin_metadata',
'version': '0.0.1'
},
'provider_id': provider2['id'],
'provider_name': 'swMATH',
'provider_type': provider_type,
'provider_url': 'http://www.swmath.org/',
'tool_id': tool['id']
}]
# then
assert len(m_by_provider) == 1
assert m_by_provider == expected_results
class TestStorageGeneratedData:
_test_origin_ids = True
def assert_contents_ok(self, expected_contents, actual_contents,
keys_to_check={'sha1', 'data'}):
"""Assert that a given list of contents matches on a given set of keys.
"""
for k in keys_to_check:
expected_list = set([c.get(k) for c in expected_contents])
actual_list = set([c.get(k) for c in actual_contents])
assert actual_list == expected_list, k
def test_generate_content_get(self, swh_storage, swh_contents):
contents_with_data = [c for c in swh_contents
if c['status'] != 'absent']
# input the list of sha1s we want from storage
get_sha1s = [c['sha1'] for c in contents_with_data]
# retrieve contents
actual_contents = list(swh_storage.content_get(get_sha1s))
assert None not in actual_contents
self.assert_contents_ok(contents_with_data, actual_contents)
def test_generate_content_get_metadata(self, swh_storage, swh_contents):
# input the list of sha1s we want from storage
expected_contents = [c for c in swh_contents
if c['status'] != 'absent']
get_sha1s = [c['sha1'] for c in expected_contents]
# retrieve contents
actual_contents = list(swh_storage.content_get_metadata(get_sha1s))
assert len(actual_contents) == len(get_sha1s)
keys_to_check = {'length', 'status',
'sha1', 'sha1_git', 'sha256', 'blake2s256'}
self.assert_contents_ok(expected_contents, actual_contents,
keys_to_check=keys_to_check)
def test_generate_content_get_range(self, swh_storage, swh_contents):
"""content_get_range paginates results if limit exceeded"""
# add contents to storage
present_contents = [c for c in swh_contents
if c['status'] != 'absent']
get_sha1s = sorted([c['sha1'] for c in swh_contents
if c['status'] != 'absent'])
start = get_sha1s[2]
end = get_sha1s[-2]
actual_result = swh_storage.content_get_range(start, end)
assert actual_result['next'] is None
actual_contents = actual_result['contents']
expected_contents = [c for c in present_contents
if start <= c['sha1'] <= end]
if expected_contents:
self.assert_contents_ok(
expected_contents, actual_contents, ['sha1'])
else:
assert actual_contents == []
def test_generate_content_get_range_full(self, swh_storage, swh_contents):
"""content_get_range for a full range returns all available contents"""
present_contents = [c for c in swh_contents
if c['status'] != 'absent']
start = b'0' * 40
end = b'f' * 40
actual_result = swh_storage.content_get_range(start, end)
assert actual_result['next'] is None
actual_contents = actual_result['contents']
expected_contents = [c for c in present_contents
if start <= c['sha1'] <= end]
if expected_contents:
self.assert_contents_ok(
expected_contents, actual_contents, ['sha1'])
else:
assert actual_contents == []
def test_generate_content_get_range_empty(self, swh_storage, swh_contents):
"""content_get_range for an empty range returns nothing"""
start = b'0' * 40
end = b'f' * 40
actual_result = swh_storage.content_get_range(end, start)
assert actual_result['next'] is None
assert len(actual_result['contents']) == 0
def test_generate_content_get_range_limit_none(self, swh_storage):
"""content_get_range call with wrong limit input should fail"""
with pytest.raises(ValueError) as e:
swh_storage.content_get_range(start=None, end=None, limit=None)
assert e.value.args == ('Development error: limit should not be None',)
def test_generate_content_get_range_no_limit(
self, swh_storage, swh_contents):
"""content_get_range returns contents within range provided"""
# add contents to storage
# input the list of sha1s we want from storage
get_sha1s = sorted([c['sha1'] for c in swh_contents
if c['status'] != 'absent'])
start = get_sha1s[0]
end = get_sha1s[-1]
# retrieve contents
actual_result = swh_storage.content_get_range(start, end)
actual_contents = actual_result['contents']
assert actual_result['next'] is None
assert len(actual_contents) == len(get_sha1s)
expected_contents = [c for c in swh_contents
if c['status'] != 'absent']
self.assert_contents_ok(
expected_contents, actual_contents, ['sha1'])
def test_generate_content_get_range_limit(self, swh_storage, swh_contents):
"""content_get_range paginates results if limit exceeded"""
contents_map = {c['sha1']: c for c in swh_contents}
# input the list of sha1s we want from storage
get_sha1s = sorted([c['sha1'] for c in swh_contents
if c['status'] != 'absent'])
start = get_sha1s[0]
end = get_sha1s[-1]
# retrieve contents limited to n-1 results
limited_results = len(get_sha1s) - 1
actual_result = swh_storage.content_get_range(
start, end, limit=limited_results)
actual_contents = actual_result['contents']
assert actual_result['next'] == get_sha1s[-1]
assert len(actual_contents) == limited_results
expected_contents = [contents_map[sha1] for sha1 in get_sha1s[:-1]]
self.assert_contents_ok(
expected_contents, actual_contents, ['sha1'])
# retrieve next part
actual_results2 = swh_storage.content_get_range(start=end, end=end)
assert actual_results2['next'] is None
actual_contents2 = actual_results2['contents']
assert len(actual_contents2) == 1
self.assert_contents_ok(
[contents_map[get_sha1s[-1]]], actual_contents2, ['sha1'])
def test_origin_get_invalid_id_legacy(self, swh_storage):
if self._test_origin_ids:
invalid_origin_id = 1
origin_info = swh_storage.origin_get({'id': invalid_origin_id})
assert origin_info is None
origin_visits = list(swh_storage.origin_visit_get(
invalid_origin_id))
assert origin_visits == []
def test_origin_get_invalid_id(self, swh_storage):
if self._test_origin_ids:
origin_info = swh_storage.origin_get([{'id': 1}, {'id': 2}])
assert origin_info == [None, None]
origin_visits = list(swh_storage.origin_visit_get(1))
assert origin_visits == []
def test_origin_get_range(self, swh_storage, swh_origins):
if not self._test_origin_ids:
pytest.skip('requires origin id')
actual_origins = list(
swh_storage.origin_get_range(origin_from=0,
origin_count=0))
assert len(actual_origins) == 0
actual_origins = list(
swh_storage.origin_get_range(origin_from=0,
origin_count=1))
assert len(actual_origins) == 1
assert actual_origins[0]['id'] == 1
actual_origins = list(
swh_storage.origin_get_range(origin_from=1,
origin_count=1))
assert len(actual_origins) == 1
assert actual_origins[0]['id'] == 1
actual_origins = list(
swh_storage.origin_get_range(origin_from=1,
origin_count=10))
assert len(actual_origins) == 10
assert actual_origins[0]['id'] == 1
assert actual_origins[-1]['id'] == 10
actual_origins = list(
swh_storage.origin_get_range(origin_from=1,
origin_count=20))
assert len(actual_origins) == 20
assert actual_origins[0]['id'] == 1
assert actual_origins[-1]['id'] == 20
actual_origins = list(
swh_storage.origin_get_range(origin_from=1,
origin_count=21))
assert len(actual_origins) == 20
assert actual_origins[0]['id'] == 1
assert actual_origins[-1]['id'] == 20
actual_origins = list(
swh_storage.origin_get_range(origin_from=11,
origin_count=0))
assert len(actual_origins) == 0
actual_origins = list(
swh_storage.origin_get_range(origin_from=11,
origin_count=10))
assert len(actual_origins) == 10
assert actual_origins[0]['id'] == 11
assert actual_origins[-1]['id'] == 20
actual_origins = list(
swh_storage.origin_get_range(origin_from=11,
origin_count=11))
assert len(actual_origins) == 10
assert actual_origins[0]['id'] == 11
assert actual_origins[-1]['id'] == 20
def test_origin_count(self, swh_storage):
new_origins = [
{
'type': 'git',
'url': 'https://github.com/user1/repo1'
},
{
'type': 'git',
'url': 'https://github.com/user2/repo1'
},
{
'type': 'git',
'url': 'https://github.com/user3/repo1'
},
{
'type': 'git',
'url': 'https://gitlab.com/user1/repo1'
},
{
'type': 'git',
'url': 'https://gitlab.com/user2/repo1'
}
]
swh_storage.origin_add(new_origins)
assert swh_storage.origin_count('github') == 3
assert swh_storage.origin_count('gitlab') == 2
assert swh_storage.origin_count('.*user.*', regexp=True) == 5
assert swh_storage.origin_count('.*user.*', regexp=False) == 0
assert swh_storage.origin_count('.*user1.*', regexp=True) == 2
assert swh_storage.origin_count('.*user1.*', regexp=False) == 0
@settings(suppress_health_check=[HealthCheck.too_slow])
@given(strategies.lists(objects(), max_size=2))
def test_add_arbitrary(self, swh_storage, objects):
for (obj_type, obj) in objects:
obj = obj.to_dict()
if obj_type == 'origin_visit':
origin_id = swh_storage.origin_add_one(obj.pop('origin'))
if 'visit' in obj:
del obj['visit']
swh_storage.origin_visit_add(
origin_id, obj['date'], obj['type'])
else:
method = getattr(swh_storage, obj_type + '_add')
try:
method([obj])
except HashCollision:
pass
@pytest.mark.db
class TestLocalStorage:
"""Test the local storage"""
_test_origin_ids = True
- # Can only be tested with local storage as you can't mock
- # datetimes for the remote server
- @pytest.mark.parametrize('use_url', [True, False])
- def test_fetch_history(self, swh_storage, use_url):
- if not self._test_origin_ids and not use_url:
- return
-
- origin_id = swh_storage.origin_add_one(data.origin)
- origin_id_or_url = data.origin['url'] if use_url else origin_id
- with patch('datetime.datetime'):
- datetime.datetime.now.return_value = data.fetch_history_date
- fetch_history_id = swh_storage.fetch_history_start(
- origin_id_or_url)
- datetime.datetime.now.assert_called_with(tz=datetime.timezone.utc)
-
- with patch('datetime.datetime'):
- datetime.datetime.now.return_value = data.fetch_history_end
- swh_storage.fetch_history_end(fetch_history_id,
- data.fetch_history_data)
-
- fetch_history = swh_storage.fetch_history_get(fetch_history_id)
- expected_fetch_history = data.fetch_history_data.copy()
-
- expected_fetch_history['id'] = fetch_history_id
- expected_fetch_history['origin'] = origin_id
- expected_fetch_history['date'] = data.fetch_history_date
- expected_fetch_history['duration'] = data.fetch_history_duration
-
- assert expected_fetch_history == fetch_history
-
# This test is only relevant on the local storage, with an actual
# objstorage raising an exception
def test_content_add_objstorage_exception(self, swh_storage):
swh_storage.objstorage.add = Mock(
side_effect=Exception('mocked broken objstorage')
)
with pytest.raises(Exception) as e:
swh_storage.content_add([data.cont])
assert e.value.args == ('mocked broken objstorage',)
missing = list(swh_storage.content_missing([data.cont]))
assert missing == [data.cont['sha1']]
@pytest.mark.db
class TestStorageRaceConditions:
@pytest.mark.xfail
def test_content_add_race(self, swh_storage):
results = queue.Queue()
def thread():
try:
with db_transaction(swh_storage) as (db, cur):
ret = swh_storage.content_add([data.cont], db=db,
cur=cur)
results.put((threading.get_ident(), 'data', ret))
except Exception as e:
results.put((threading.get_ident(), 'exc', e))
t1 = threading.Thread(target=thread)
t2 = threading.Thread(target=thread)
t1.start()
# this avoids the race condition
# import time
# time.sleep(1)
t2.start()
t1.join()
t2.join()
r1 = results.get(block=False)
r2 = results.get(block=False)
with pytest.raises(queue.Empty):
results.get(block=False)
assert r1[0] != r2[0]
assert r1[1] == 'data', 'Got exception %r in Thread%s' % (r1[2], r1[0])
assert r2[1] == 'data', 'Got exception %r in Thread%s' % (r2[2], r2[0])
@pytest.mark.db
class TestPgStorage:
"""This class is dedicated for the rare case where the schema needs to
be altered dynamically.
Otherwise, the tests could be blocking when ran altogether.
"""
def test_content_update(self, swh_storage):
swh_storage.journal_writer = None # TODO, not supported
cont = copy.deepcopy(data.cont)
swh_storage.content_add([cont])
# alter the sha1_git for example
cont['sha1_git'] = hash_to_bytes(
'3a60a5275d0333bf13468e8b3dcab90f4046e654')
swh_storage.content_update([cont], keys=['sha1_git'])
with db_transaction(swh_storage) as (_, cur):
cur.execute('SELECT sha1, sha1_git, sha256, length, status'
' FROM content WHERE sha1 = %s',
(cont['sha1'],))
datum = cur.fetchone()
assert datum == (cont['sha1'], cont['sha1_git'], cont['sha256'],
cont['length'], 'visible')
def test_content_update_with_new_cols(self, swh_storage):
swh_storage.journal_writer = None # TODO, not supported
with db_transaction(swh_storage) as (_, cur):
cur.execute("""alter table content
add column test text default null,
add column test2 text default null""")
cont = copy.deepcopy(data.cont2)
swh_storage.content_add([cont])
cont['test'] = 'value-1'
cont['test2'] = 'value-2'
swh_storage.content_update([cont], keys=['test', 'test2'])
with db_transaction(swh_storage) as (_, cur):
cur.execute(
'''SELECT sha1, sha1_git, sha256, length, status,
test, test2
FROM content WHERE sha1 = %s''',
(cont['sha1'],))
datum = cur.fetchone()
assert datum == (cont['sha1'], cont['sha1_git'], cont['sha256'],
cont['length'], 'visible',
cont['test'], cont['test2'])
with db_transaction(swh_storage) as (_, cur):
cur.execute("""alter table content drop column test,
drop column test2""")
def test_content_add_db(self, swh_storage):
cont = data.cont
actual_result = swh_storage.content_add([cont])
assert actual_result == {
'content:add': 1,
'content:add:bytes': cont['length'],
'skipped_content:add': 0
}
if hasattr(swh_storage, 'objstorage'):
assert cont['sha1'] in swh_storage.objstorage
with db_transaction(swh_storage) as (_, cur):
cur.execute('SELECT sha1, sha1_git, sha256, length, status'
' FROM content WHERE sha1 = %s',
(cont['sha1'],))
datum = cur.fetchone()
assert datum == (cont['sha1'], cont['sha1_git'], cont['sha256'],
cont['length'], 'visible')
expected_cont = cont.copy()
del expected_cont['data']
journal_objects = list(swh_storage.journal_writer.objects)
for (obj_type, obj) in journal_objects:
del obj['ctime']
assert journal_objects == [('content', expected_cont)]
def test_content_add_metadata_db(self, swh_storage):
cont = data.cont
del cont['data']
cont['ctime'] = datetime.datetime.now()
actual_result = swh_storage.content_add_metadata([cont])
assert actual_result == {
'content:add': 1,
'skipped_content:add': 0
}
if hasattr(swh_storage, 'objstorage'):
assert cont['sha1'] not in swh_storage.objstorage
with db_transaction(swh_storage) as (_, cur):
cur.execute('SELECT sha1, sha1_git, sha256, length, status'
' FROM content WHERE sha1 = %s',
(cont['sha1'],))
datum = cur.fetchone()
assert datum == (cont['sha1'], cont['sha1_git'], cont['sha256'],
cont['length'], 'visible')
assert list(swh_storage.journal_writer.objects) == [('content', cont)]
def test_skipped_content_add_db(self, swh_storage):
cont = data.skipped_cont
cont2 = data.skipped_cont2
cont2['blake2s256'] = None
actual_result = swh_storage.content_add([cont, cont, cont2])
assert actual_result == {
'content:add': 0,
'content:add:bytes': 0,
'skipped_content:add': 2,
}
with db_transaction(swh_storage) as (_, cur):
cur.execute('SELECT sha1, sha1_git, sha256, blake2s256, '
'length, status, reason '
'FROM skipped_content ORDER BY sha1_git')
dbdata = cur.fetchall()
assert len(dbdata) == 2
assert dbdata[0] == (cont['sha1'], cont['sha1_git'], cont['sha256'],
cont['blake2s256'], cont['length'], 'absent',
'Content too long')
assert dbdata[1] == (cont2['sha1'], cont2['sha1_git'], cont2['sha256'],
cont2['blake2s256'], cont2['length'], 'absent',
'Content too long')