diff --git a/swh/storage/api/client.py b/swh/storage/api/client.py
index 167be33c..1b4c0aa1 100644
--- a/swh/storage/api/client.py
+++ b/swh/storage/api/client.py
@@ -1,286 +1,289 @@
# Copyright (C) 2015-2017 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import warnings
from swh.core.api import SWHRemoteAPI
from ..exc import StorageAPIError
class RemoteStorage(SWHRemoteAPI):
"""Proxy to a remote storage API"""
api_exception = StorageAPIError
def check_config(self, *, check_write):
return self.post('check_config', {'check_write': check_write})
+ def reset(self):
+ return self.post('reset', {})
+
def content_add(self, content):
return self.post('content/add', {'content': content})
def content_add_metadata(self, content):
return self.post('content/add_metadata', {'content': content})
def content_update(self, content, keys=[]):
return self.post('content/update', {'content': content,
'keys': keys})
def content_missing(self, content, key_hash='sha1'):
return self.post('content/missing', {'content': content,
'key_hash': key_hash})
def content_missing_per_sha1(self, contents):
return self.post('content/missing/sha1', {'contents': contents})
def skipped_content_missing(self, contents):
return self.post('content/skipped/missing', {'contents': contents})
def content_get(self, content):
return self.post('content/data', {'content': content})
def content_get_metadata(self, content):
return self.post('content/metadata', {'content': content})
def content_get_range(self, start, end, limit=1000):
return self.post('content/range', {'start': start,
'end': end,
'limit': limit})
def content_find(self, content):
return self.post('content/present', {'content': content})
def directory_add(self, directories):
return self.post('directory/add', {'directories': directories})
def directory_missing(self, directories):
return self.post('directory/missing', {'directories': directories})
def directory_ls(self, directory, recursive=False):
return self.post('directory/ls', {'directory': directory,
'recursive': recursive})
def revision_get(self, revisions):
return self.post('revision', {'revisions': revisions})
def revision_log(self, revisions, limit=None):
return self.post('revision/log', {'revisions': revisions,
'limit': limit})
def revision_shortlog(self, revisions, limit=None):
return self.post('revision/shortlog', {'revisions': revisions,
'limit': limit})
def revision_add(self, revisions):
return self.post('revision/add', {'revisions': revisions})
def revision_missing(self, revisions):
return self.post('revision/missing', {'revisions': revisions})
def release_add(self, releases):
return self.post('release/add', {'releases': releases})
def release_get(self, releases):
return self.post('release', {'releases': releases})
def release_missing(self, releases):
return self.post('release/missing', {'releases': releases})
def object_find_by_sha1_git(self, ids):
return self.post('object/find_by_sha1_git', {'ids': ids})
def snapshot_add(self, snapshots, origin=None, visit=None):
if origin:
assert visit
(origin, visit, snapshots) = (snapshots, origin, visit)
warnings.warn("arguments 'origin' and 'visit' of snapshot_add "
"are deprecated since v0.0.131, please use "
"snapshot_add([snapshot]) + "
"origin_visit_update(origin, visit, "
"snapshot=snapshot['id']) instead.",
DeprecationWarning)
return self.post('snapshot/add', {
'origin': origin, 'visit': visit, 'snapshots': snapshots,
})
else:
assert not visit
return self.post('snapshot/add', {
'snapshots': snapshots,
})
def snapshot_get(self, snapshot_id):
return self.post('snapshot', {
'snapshot_id': snapshot_id
})
def snapshot_get_by_origin_visit(self, origin, visit):
return self.post('snapshot/by_origin_visit', {
'origin': origin,
'visit': visit
})
def snapshot_get_latest(self, origin, allowed_statuses=None):
return self.post('snapshot/latest', {
'origin': origin,
'allowed_statuses': allowed_statuses
})
def snapshot_count_branches(self, snapshot_id):
return self.post('snapshot/count_branches', {
'snapshot_id': snapshot_id
})
def snapshot_get_branches(self, snapshot_id, branches_from=b'',
branches_count=1000, target_types=None):
return self.post('snapshot/get_branches', {
'snapshot_id': snapshot_id,
'branches_from': branches_from,
'branches_count': branches_count,
'target_types': target_types
})
def origin_get(self, origins=None, *, origin=None):
if origin is None:
if origins is None:
raise TypeError('origin_get expected 1 argument')
else:
assert origins is None
origins = origin
warnings.warn("argument 'origin' of origin_get was renamed "
"to 'origins' in v0.0.123.",
DeprecationWarning)
return self.post('origin/get', {'origins': origins})
def origin_search(self, url_pattern, offset=0, limit=50, regexp=False,
with_visit=False):
return self.post('origin/search', {'url_pattern': url_pattern,
'offset': offset,
'limit': limit,
'regexp': regexp,
'with_visit': with_visit})
def origin_count(self, url_pattern, regexp=False, with_visit=False):
return self.post('origin/count', {'url_pattern': url_pattern,
'regexp': regexp,
'with_visit': with_visit})
def origin_get_range(self, origin_from=1, origin_count=100):
return self.post('origin/get_range', {'origin_from': origin_from,
'origin_count': origin_count})
def origin_add(self, origins):
return self.post('origin/add_multi', {'origins': origins})
def origin_add_one(self, origin):
return self.post('origin/add', {'origin': origin})
def origin_visit_add(self, origin, date, type=None, *, ts=None):
if ts is None:
if date is None:
raise TypeError('origin_visit_add expected 2 arguments.')
else:
assert date is None
warnings.warn("argument 'ts' of origin_visit_add was renamed "
"to 'date' in v0.0.109.",
DeprecationWarning)
date = ts
return self.post(
'origin/visit/add',
{'origin': origin, 'date': date, 'type': type})
def origin_visit_update(self, origin, visit_id, status=None,
metadata=None, snapshot=None):
return self.post('origin/visit/update', {'origin': origin,
'visit_id': visit_id,
'status': status,
'metadata': metadata,
'snapshot': snapshot})
def origin_visit_upsert(self, visits):
return self.post('origin/visit/upsert', {'visits': visits})
def origin_visit_get(self, origin, last_visit=None, limit=None):
return self.post('origin/visit/get', {
'origin': origin, 'last_visit': last_visit, 'limit': limit})
def origin_visit_get_by(self, origin, visit):
return self.post('origin/visit/getby', {'origin': origin,
'visit': visit})
def origin_visit_get_latest(self, origin, allowed_statuses=None,
require_snapshot=False):
return self.post(
'origin/visit/get_latest',
{'origin': origin, 'allowed_statuses': allowed_statuses,
'require_snapshot': require_snapshot})
def person_get(self, person):
return self.post('person', {'person': person})
def fetch_history_start(self, origin_id):
return self.post('fetch_history/start', {'origin_id': origin_id})
def fetch_history_end(self, fetch_history_id, data):
return self.post('fetch_history/end',
{'fetch_history_id': fetch_history_id,
'data': data})
def fetch_history_get(self, fetch_history_id):
return self.get('fetch_history', {'id': fetch_history_id})
def stat_counters(self):
return self.get('stat/counters')
def directory_entry_get_by_path(self, directory, paths):
return self.post('directory/path', dict(directory=directory,
paths=paths))
def tool_add(self, tools):
return self.post('tool/add', {'tools': tools})
def tool_get(self, tool):
return self.post('tool/data', {'tool': tool})
def origin_metadata_add(self, origin_id, ts, provider, tool, metadata):
return self.post('origin/metadata/add', {'origin_id': origin_id,
'ts': ts,
'provider': provider,
'tool': tool,
'metadata': metadata})
def origin_metadata_get_by(self, origin_id, provider_type=None):
return self.post('origin/metadata/get', {
'origin_id': origin_id,
'provider_type': provider_type
})
def metadata_provider_add(self, provider_name, provider_type, provider_url,
metadata):
return self.post('provider/add', {'provider_name': provider_name,
'provider_type': provider_type,
'provider_url': provider_url,
'metadata': metadata})
def metadata_provider_get(self, provider_id):
return self.post('provider/get', {'provider_id': provider_id})
def metadata_provider_get_by(self, provider):
return self.post('provider/getby', {'provider': provider})
def diff_directories(self, from_dir, to_dir, track_renaming=False):
return self.post('algos/diff_directories',
{'from_dir': from_dir,
'to_dir': to_dir,
'track_renaming': track_renaming})
def diff_revisions(self, from_rev, to_rev, track_renaming=False):
return self.post('algos/diff_revisions',
{'from_rev': from_rev,
'to_rev': to_rev,
'track_renaming': track_renaming})
def diff_revision(self, revision, track_renaming=False):
return self.post('algos/diff_revision',
{'revision': revision,
'track_renaming': track_renaming})
diff --git a/swh/storage/api/server.py b/swh/storage/api/server.py
index 1296e5b3..52b4a7ce 100644
--- a/swh/storage/api/server.py
+++ b/swh/storage/api/server.py
@@ -1,607 +1,613 @@
# Copyright (C) 2015-2019 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import os
import logging
from flask import request
from functools import wraps
from swh.core import config
from swh.storage import get_storage as get_swhstorage
from swh.core.api import (SWHServerAPIApp, decode_request,
error_handler,
encode_data_server as encode_data)
from swh.core.statsd import statsd
app = SWHServerAPIApp(__name__)
storage = None
OPERATIONS_METRIC = 'swh_storage_operations_total'
OPERATIONS_UNIT_METRIC = "swh_storage_operations_{unit}_total"
DURATION_METRIC = "swh_storage_request_duration_seconds"
def timed(f):
"""Time that function!
"""
@wraps(f)
def d(*a, **kw):
with statsd.timed(DURATION_METRIC, tags={'endpoint': f.__name__}):
return f(*a, **kw)
return d
def encode(f):
@wraps(f)
def d(*a, **kw):
r = f(*a, **kw)
return encode_data(r)
return d
def send_metric(metric, count, method_name):
"""Send statsd metric with count for method `method_name`
If count is 0, the metric is discarded. If the metric is not
parseable, the metric is discarded with a log message.
Args:
metric (str): Metric's name (e.g content:add, content:add:bytes)
count (int): Associated value for the metric
method_name (str): Method's name
Returns:
Bool to explicit if metric has been set or not
"""
if count == 0:
return False
metric_type = metric.split(':')
_length = len(metric_type)
if _length == 2:
object_type, operation = metric_type
metric_name = OPERATIONS_METRIC
elif _length == 3:
object_type, operation, unit = metric_type
metric_name = OPERATIONS_UNIT_METRIC.format(unit=unit)
else:
logging.warning('Skipping unknown metric {%s: %s}' % (
metric, count))
return False
statsd.increment(
metric_name, count, tags={
'endpoint': method_name,
'object_type': object_type,
'operation': operation,
})
return True
def process_metrics(f):
"""Increment object counters for the decorated function.
"""
@wraps(f)
def d(*a, **kw):
r = f(*a, **kw)
for metric, count in r.items():
send_metric(metric=metric, count=count, method_name=f.__name__)
return r
return d
@app.errorhandler(Exception)
def my_error_handler(exception):
return error_handler(exception, encode_data)
def get_storage():
global storage
if not storage:
storage = get_swhstorage(**app.config['storage'])
return storage
@app.route('/')
@timed
def index():
return '''
Software Heritage storage server
You have reached the
Software Heritage
storage server.
See its
documentation
and API for more information
'''
@app.route('/check_config', methods=['POST'])
@timed
def check_config():
return encode_data(get_storage().check_config(**decode_request(request)))
+@app.route('/reset', methods=['POST'])
+@timed
+def reset():
+ return encode_data(get_storage().reset(**decode_request(request)))
+
+
@app.route('/content/missing', methods=['POST'])
@timed
def content_missing():
return encode_data(get_storage().content_missing(
**decode_request(request)))
@app.route('/content/missing/sha1', methods=['POST'])
@timed
def content_missing_per_sha1():
return encode_data(get_storage().content_missing_per_sha1(
**decode_request(request)))
@app.route('/content/skipped/missing', methods=['POST'])
@timed
def skipped_content_missing():
return encode_data(get_storage().skipped_content_missing(
**decode_request(request)))
@app.route('/content/present', methods=['POST'])
@timed
def content_find():
return encode_data(get_storage().content_find(**decode_request(request)))
@app.route('/content/add', methods=['POST'])
@timed
@encode
@process_metrics
def content_add():
return get_storage().content_add(**decode_request(request))
@app.route('/content/add_metadata', methods=['POST'])
@timed
@encode
@process_metrics
def content_add_metadata():
return get_storage().content_add_metadata(**decode_request(request))
@app.route('/content/update', methods=['POST'])
@timed
def content_update():
return encode_data(get_storage().content_update(**decode_request(request)))
@app.route('/content/data', methods=['POST'])
@timed
def content_get():
return encode_data(get_storage().content_get(**decode_request(request)))
@app.route('/content/metadata', methods=['POST'])
@timed
def content_get_metadata():
return encode_data(get_storage().content_get_metadata(
**decode_request(request)))
@app.route('/content/range', methods=['POST'])
@timed
def content_get_range():
return encode_data(get_storage().content_get_range(
**decode_request(request)))
@app.route('/directory/missing', methods=['POST'])
@timed
def directory_missing():
return encode_data(get_storage().directory_missing(
**decode_request(request)))
@app.route('/directory/add', methods=['POST'])
@timed
@encode
@process_metrics
def directory_add():
return get_storage().directory_add(**decode_request(request))
@app.route('/directory/path', methods=['POST'])
@timed
def directory_entry_get_by_path():
return encode_data(get_storage().directory_entry_get_by_path(
**decode_request(request)))
@app.route('/directory/ls', methods=['POST'])
@timed
def directory_ls():
return encode_data(get_storage().directory_ls(
**decode_request(request)))
@app.route('/revision/add', methods=['POST'])
@timed
@encode
@process_metrics
def revision_add():
return get_storage().revision_add(**decode_request(request))
@app.route('/revision', methods=['POST'])
@timed
def revision_get():
return encode_data(get_storage().revision_get(**decode_request(request)))
@app.route('/revision/log', methods=['POST'])
@timed
def revision_log():
return encode_data(get_storage().revision_log(**decode_request(request)))
@app.route('/revision/shortlog', methods=['POST'])
@timed
def revision_shortlog():
return encode_data(get_storage().revision_shortlog(
**decode_request(request)))
@app.route('/revision/missing', methods=['POST'])
@timed
def revision_missing():
return encode_data(get_storage().revision_missing(
**decode_request(request)))
@app.route('/release/add', methods=['POST'])
@timed
@encode
@process_metrics
def release_add():
return get_storage().release_add(**decode_request(request))
@app.route('/release', methods=['POST'])
@timed
def release_get():
return encode_data(get_storage().release_get(**decode_request(request)))
@app.route('/release/missing', methods=['POST'])
@timed
def release_missing():
return encode_data(get_storage().release_missing(
**decode_request(request)))
@app.route('/object/find_by_sha1_git', methods=['POST'])
@timed
def object_find_by_sha1_git():
return encode_data(get_storage().object_find_by_sha1_git(
**decode_request(request)))
@app.route('/snapshot/add', methods=['POST'])
@timed
@encode
@process_metrics
def snapshot_add():
req_data = decode_request(request)
if 'snapshot' in req_data:
req_data['snapshots'] = req_data.pop('snapshot')
return get_storage().snapshot_add(**req_data)
@app.route('/snapshot', methods=['POST'])
@timed
def snapshot_get():
return encode_data(get_storage().snapshot_get(**decode_request(request)))
@app.route('/snapshot/by_origin_visit', methods=['POST'])
@timed
def snapshot_get_by_origin_visit():
return encode_data(get_storage().snapshot_get_by_origin_visit(
**decode_request(request)))
@app.route('/snapshot/latest', methods=['POST'])
@timed
def snapshot_get_latest():
return encode_data(get_storage().snapshot_get_latest(
**decode_request(request)))
@app.route('/snapshot/count_branches', methods=['POST'])
@timed
def snapshot_count_branches():
return encode_data(get_storage().snapshot_count_branches(
**decode_request(request)))
@app.route('/snapshot/get_branches', methods=['POST'])
@timed
def snapshot_get_branches():
return encode_data(get_storage().snapshot_get_branches(
**decode_request(request)))
@app.route('/origin/get', methods=['POST'])
@timed
def origin_get():
return encode_data(get_storage().origin_get(**decode_request(request)))
@app.route('/origin/get_range', methods=['POST'])
@timed
def origin_get_range():
return encode_data(get_storage().origin_get_range(
**decode_request(request)))
@app.route('/origin/search', methods=['POST'])
@timed
def origin_search():
return encode_data(get_storage().origin_search(**decode_request(request)))
@app.route('/origin/count', methods=['POST'])
@timed
def origin_count():
return encode_data(get_storage().origin_count(**decode_request(request)))
@app.route('/origin/add_multi', methods=['POST'])
@timed
@encode
def origin_add():
origins = get_storage().origin_add(**decode_request(request))
send_metric('origin:add', count=len(origins), method_name='origin_add')
return origins
@app.route('/origin/add', methods=['POST'])
@timed
@encode
def origin_add_one():
origin = get_storage().origin_add_one(**decode_request(request))
send_metric('origin:add', count=1, method_name='origin_add_one')
return origin
@app.route('/origin/visit/get', methods=['POST'])
@timed
def origin_visit_get():
return encode_data(get_storage().origin_visit_get(
**decode_request(request)))
@app.route('/origin/visit/getby', methods=['POST'])
@timed
def origin_visit_get_by():
return encode_data(
get_storage().origin_visit_get_by(**decode_request(request)))
@app.route('/origin/visit/get_latest', methods=['POST'])
@timed
def origin_visit_get_latest():
return encode_data(
get_storage().origin_visit_get_latest(**decode_request(request)))
@app.route('/origin/visit/add', methods=['POST'])
@timed
@encode
def origin_visit_add():
origin_visit = get_storage().origin_visit_add(
**decode_request(request))
send_metric('origin_visit:add', count=1, method_name='origin_visit')
return origin_visit
@app.route('/origin/visit/update', methods=['POST'])
@timed
def origin_visit_update():
return encode_data(get_storage().origin_visit_update(
**decode_request(request)))
@app.route('/origin/visit/upsert', methods=['POST'])
@timed
def origin_visit_upsert():
return encode_data(get_storage().origin_visit_upsert(
**decode_request(request)))
@app.route('/person', methods=['POST'])
@timed
def person_get():
return encode_data(get_storage().person_get(**decode_request(request)))
@app.route('/fetch_history', methods=['GET'])
@timed
def fetch_history_get():
return encode_data(get_storage().fetch_history_get(request.args['id']))
@app.route('/fetch_history/start', methods=['POST'])
@timed
def fetch_history_start():
return encode_data(
get_storage().fetch_history_start(**decode_request(request)))
@app.route('/fetch_history/end', methods=['POST'])
@timed
def fetch_history_end():
return encode_data(
get_storage().fetch_history_end(**decode_request(request)))
@app.route('/tool/data', methods=['POST'])
@timed
def tool_get():
return encode_data(get_storage().tool_get(
**decode_request(request)))
@app.route('/tool/add', methods=['POST'])
@timed
@encode
def tool_add():
tools = get_storage().tool_add(**decode_request(request))
send_metric('tool:add', count=len(tools), method_name='tool_add')
return tools
@app.route('/origin/metadata/add', methods=['POST'])
@timed
@encode
def origin_metadata_add():
origin_metadata = get_storage().origin_metadata_add(
**decode_request(request))
send_metric(
'origin_metadata:add', count=1, method_name='origin_metadata_add')
return origin_metadata
@app.route('/origin/metadata/get', methods=['POST'])
@timed
def origin_metadata_get_by():
return encode_data(get_storage().origin_metadata_get_by(**decode_request(
request)))
@app.route('/provider/add', methods=['POST'])
@timed
@encode
def metadata_provider_add():
metadata_provider = get_storage().metadata_provider_add(**decode_request(
request))
send_metric(
'metadata_provider:add', count=1, method_name='metadata_provider')
return metadata_provider
@app.route('/provider/get', methods=['POST'])
@timed
def metadata_provider_get():
return encode_data(get_storage().metadata_provider_get(**decode_request(
request)))
@app.route('/provider/getby', methods=['POST'])
@timed
def metadata_provider_get_by():
return encode_data(get_storage().metadata_provider_get_by(**decode_request(
request)))
@app.route('/stat/counters', methods=['GET'])
@timed
def stat_counters():
return encode_data(get_storage().stat_counters())
@app.route('/algos/diff_directories', methods=['POST'])
@timed
def diff_directories():
return encode_data(get_storage().diff_directories(
**decode_request(request)))
@app.route('/algos/diff_revisions', methods=['POST'])
@timed
def diff_revisions():
return encode_data(get_storage().diff_revisions(**decode_request(request)))
@app.route('/algos/diff_revision', methods=['POST'])
@timed
def diff_revision():
return encode_data(get_storage().diff_revision(**decode_request(request)))
api_cfg = None
def load_and_check_config(config_file, type='local'):
"""Check the minimal configuration is set to run the api or raise an
error explanation.
Args:
config_file (str): Path to the configuration file to load
type (str): configuration type. For 'local' type, more
checks are done.
Raises:
Error if the setup is not as expected
Returns:
configuration as a dict
"""
if not config_file:
raise EnvironmentError('Configuration file must be defined')
if not os.path.exists(config_file):
raise FileNotFoundError('Configuration file %s does not exist' % (
config_file, ))
cfg = config.read(config_file)
if 'storage' not in cfg:
raise KeyError("Missing '%storage' configuration")
if type == 'local':
vcfg = cfg['storage']
cls = vcfg.get('cls')
if cls != 'local':
raise ValueError(
"The storage backend can only be started with a 'local' "
"configuration")
args = vcfg['args']
for key in ('db', 'objstorage'):
if not args.get(key):
raise ValueError(
"Invalid configuration; missing '%s' config entry" % key)
return cfg
def make_app_from_configfile():
"""Run the WSGI app from the webserver, loading the configuration from
a configuration file.
SWH_CONFIG_FILENAME environment variable defines the
configuration path to load.
"""
global api_cfg
if not api_cfg:
config_file = os.environ.get('SWH_CONFIG_FILENAME')
api_cfg = load_and_check_config(config_file)
app.config.update(api_cfg)
handler = logging.StreamHandler()
app.logger.addHandler(handler)
return app
if __name__ == '__main__':
print('Deprecated. Use swh-storage')
diff --git a/swh/storage/in_memory.py b/swh/storage/in_memory.py
index 9da36f98..7bb934dc 100644
--- a/swh/storage/in_memory.py
+++ b/swh/storage/in_memory.py
@@ -1,1611 +1,1615 @@
# Copyright (C) 2015-2019 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import re
import bisect
import dateutil
import collections
from collections import defaultdict
import copy
import datetime
import itertools
import random
import warnings
from swh.model.hashutil import DEFAULT_ALGORITHMS
from swh.model.identifiers import normalize_timestamp
from swh.objstorage import get_objstorage
from swh.objstorage.exc import ObjNotFoundError
from .journal_writer import get_journal_writer
# Max block size of contents to return
BULK_BLOCK_CONTENT_LEN_MAX = 10000
def now():
return datetime.datetime.now(tz=datetime.timezone.utc)
class Storage:
def __init__(self, journal_writer=None):
self._contents = {}
self._content_indexes = defaultdict(lambda: defaultdict(set))
+ self.reset()
+
+ if journal_writer:
+ self.journal_writer = get_journal_writer(**journal_writer)
+ else:
+ self.journal_writer = None
+
+ def reset(self):
self._directories = {}
self._revisions = {}
self._releases = {}
self._snapshots = {}
self._origins = []
self._origin_visits = []
self._persons = []
self._origin_metadata = defaultdict(list)
self._tools = {}
self._metadata_providers = {}
self._objects = defaultdict(list)
# ideally we would want a skip list for both fast inserts and searches
self._sorted_sha1s = []
self.objstorage = get_objstorage('memory', {})
- if journal_writer:
- self.journal_writer = get_journal_writer(**journal_writer)
- else:
- self.journal_writer = None
def check_config(self, *, check_write):
"""Check that the storage is configured and ready to go."""
return True
def _content_add(self, contents, with_data):
if self.journal_writer:
for content in contents:
if 'data' in content:
content = content.copy()
del content['data']
self.journal_writer.write_addition('content', content)
count_contents = 0
count_content_added = 0
count_content_bytes_added = 0
for content in contents:
key = self._content_key(content)
if key in self._contents:
continue
for algorithm in DEFAULT_ALGORITHMS:
if content[algorithm] in self._content_indexes[algorithm]\
and (algorithm not in {'blake2s256', 'sha256'}):
from . import HashCollision
raise HashCollision(algorithm, content[algorithm], key)
for algorithm in DEFAULT_ALGORITHMS:
self._content_indexes[algorithm][content[algorithm]].add(key)
self._objects[content['sha1_git']].append(
('content', content['sha1']))
self._contents[key] = copy.deepcopy(content)
bisect.insort(self._sorted_sha1s, content['sha1'])
count_contents += 1
if self._contents[key]['status'] == 'visible':
count_content_added += 1
if with_data:
content_data = self._contents[key].pop('data')
count_content_bytes_added += len(content_data)
self.objstorage.add(content_data, content['sha1'])
summary = {
'content:add': count_content_added,
'skipped_content:add': count_contents - count_content_added,
}
if with_data:
summary['content:add:bytes'] = count_content_bytes_added
return summary
def content_add(self, content):
"""Add content blobs to the storage
Args:
content (iterable): iterable of dictionaries representing
individual pieces of content to add. Each dictionary has the
following keys:
- data (bytes): the actual content
- length (int): content length (default: -1)
- one key for each checksum algorithm in
:data:`swh.model.hashutil.DEFAULT_ALGORITHMS`, mapped to the
corresponding checksum
- status (str): one of visible, hidden, absent
- reason (str): if status = absent, the reason why
- origin (int): if status = absent, the origin we saw the
content in
Raises:
HashCollision in case of collision
Returns:
Summary dict with the following key and associated values:
content:add: New contents added
content_bytes:add: Sum of the contents' length data
skipped_content:add: New skipped contents (no data) added
"""
content = [dict(c.items()) for c in content] # semi-shallow copy
now = datetime.datetime.now(tz=datetime.timezone.utc)
for item in content:
item['ctime'] = now
return self._content_add(content, with_data=True)
def content_add_metadata(self, content):
"""Add content metadata to the storage (like `content_add`, but
without inserting to the objstorage).
Args:
content (iterable): iterable of dictionaries representing
individual pieces of content to add. Each dictionary has the
following keys:
- length (int): content length (default: -1)
- one key for each checksum algorithm in
:data:`swh.model.hashutil.DEFAULT_ALGORITHMS`, mapped to the
corresponding checksum
- status (str): one of visible, hidden, absent
- reason (str): if status = absent, the reason why
- origin (int): if status = absent, the origin we saw the
content in
- ctime (datetime): time of insertion in the archive
Raises:
HashCollision in case of collision
Returns:
Summary dict with the following key and associated values:
content:add: New contents added
skipped_content:add: New skipped contents (no data) added
"""
return self._content_add(content, with_data=False)
def content_get(self, content):
"""Retrieve in bulk contents and their data.
This function may yield more blobs than provided sha1 identifiers,
in case they collide.
Args:
content: iterables of sha1
Yields:
Dict[str, bytes]: Generates streams of contents as dict with their
raw data:
- sha1 (bytes): content id
- data (bytes): content's raw data
Raises:
ValueError in case of too much contents are required.
cf. BULK_BLOCK_CONTENT_LEN_MAX
"""
# FIXME: Make this method support slicing the `data`.
if len(content) > BULK_BLOCK_CONTENT_LEN_MAX:
raise ValueError(
"Sending at most %s contents." % BULK_BLOCK_CONTENT_LEN_MAX)
for obj_id in content:
try:
data = self.objstorage.get(obj_id)
except ObjNotFoundError:
yield None
continue
yield {'sha1': obj_id, 'data': data}
def content_get_range(self, start, end, limit=1000, db=None, cur=None):
"""Retrieve contents within range [start, end] bound by limit.
Note that this function may return more than one blob per hash. The
limit is enforced with multiplicity (ie. two blobs with the same hash
will count twice toward the limit).
Args:
**start** (bytes): Starting identifier range (expected smaller
than end)
**end** (bytes): Ending identifier range (expected larger
than start)
**limit** (int): Limit result (default to 1000)
Returns:
a dict with keys:
- contents [dict]: iterable of contents in between the range.
- next (bytes): There remains content in the range
starting from this next sha1
"""
if limit is None:
raise ValueError('Development error: limit should not be None')
from_index = bisect.bisect_left(self._sorted_sha1s, start)
sha1s = itertools.islice(self._sorted_sha1s, from_index, None)
sha1s = ((sha1, content_key)
for sha1 in sha1s
for content_key in self._content_indexes['sha1'][sha1])
matched = []
next_content = None
for sha1, key in sha1s:
if sha1 > end:
break
if len(matched) >= limit:
next_content = sha1
break
matched.append({
**self._contents[key],
})
return {
'contents': matched,
'next': next_content,
}
def content_get_metadata(self, content):
"""Retrieve content metadata in bulk
Args:
content: iterable of content identifiers (sha1)
Returns:
an iterable with content metadata corresponding to the given ids
"""
# FIXME: the return value should be a mapping from search key to found
# content*s*
for sha1 in content:
if sha1 in self._content_indexes['sha1']:
objs = self._content_indexes['sha1'][sha1]
# FIXME: rather than selecting one of the objects with that
# hash, we should return all of them. See:
# https://forge.softwareheritage.org/D645?id=1994#inline-3389
key = random.sample(objs, 1)[0]
data = copy.deepcopy(self._contents[key])
data.pop('ctime')
yield data
else:
# FIXME: should really be None
yield {
'sha1': sha1,
'sha1_git': None,
'sha256': None,
'blake2s256': None,
'length': None,
'status': None,
}
def content_find(self, content):
if not set(content).intersection(DEFAULT_ALGORITHMS):
raise ValueError('content keys must contain at least one of: '
'%s' % ', '.join(sorted(DEFAULT_ALGORITHMS)))
found = []
for algo in DEFAULT_ALGORITHMS:
hash = content.get(algo)
if hash and hash in self._content_indexes[algo]:
found.append(self._content_indexes[algo][hash])
if not found:
return []
keys = list(set.intersection(*found))
return copy.deepcopy([self._contents[key] for key in keys])
def content_missing(self, content, key_hash='sha1'):
"""List content missing from storage
Args:
contents ([dict]): iterable of dictionaries whose keys are
either 'length' or an item of
:data:`swh.model.hashutil.ALGORITHMS`;
mapped to the corresponding checksum
(or length).
key_hash (str): name of the column to use as hash id
result (default: 'sha1')
Returns:
iterable ([bytes]): missing content ids (as per the
key_hash column)
"""
for cont in content:
for (algo, hash_) in cont.items():
if algo not in DEFAULT_ALGORITHMS:
continue
if hash_ not in self._content_indexes.get(algo, []):
yield cont[key_hash]
break
else:
for result in self.content_find(cont):
if result['status'] == 'missing':
yield cont[key_hash]
def content_missing_per_sha1(self, contents):
"""List content missing from storage based only on sha1.
Args:
contents: Iterable of sha1 to check for absence.
Returns:
iterable: missing ids
Raises:
TODO: an exception when we get a hash collision.
"""
for content in contents:
if content not in self._content_indexes['sha1']:
yield content
def directory_add(self, directories):
"""Add directories to the storage
Args:
directories (iterable): iterable of dictionaries representing the
individual directories to add. Each dict has the following
keys:
- id (sha1_git): the id of the directory to add
- entries (list): list of dicts for each entry in the
directory. Each dict has the following keys:
- name (bytes)
- type (one of 'file', 'dir', 'rev'): type of the
directory entry (file, directory, revision)
- target (sha1_git): id of the object pointed at by the
directory entry
- perms (int): entry permissions
Returns:
Summary dict of keys with associated count as values:
directory:add: Number of directories actually added
"""
if self.journal_writer:
self.journal_writer.write_additions('directory', directories)
count = 0
for directory in directories:
if directory['id'] not in self._directories:
count += 1
self._directories[directory['id']] = copy.deepcopy(directory)
self._objects[directory['id']].append(
('directory', directory['id']))
return {'directory:add': count}
def directory_missing(self, directories):
"""List directories missing from storage
Args:
directories (iterable): an iterable of directory ids
Yields:
missing directory ids
"""
for id in directories:
if id not in self._directories:
yield id
def _join_dentry_to_content(self, dentry):
keys = (
'status',
'sha1',
'sha1_git',
'sha256',
'length',
)
ret = dict.fromkeys(keys)
ret.update(dentry)
if ret['type'] == 'file':
# TODO: Make it able to handle more than one content
content = self.content_find({'sha1_git': ret['target']})
if content:
content = content[0]
for key in keys:
ret[key] = content[key]
return ret
def _directory_ls(self, directory_id, recursive, prefix=b''):
if directory_id in self._directories:
for entry in self._directories[directory_id]['entries']:
ret = self._join_dentry_to_content(entry)
ret['name'] = prefix + ret['name']
ret['dir_id'] = directory_id
yield ret
if recursive and ret['type'] == 'dir':
yield from self._directory_ls(
ret['target'], True, prefix + ret['name'] + b'/')
def directory_ls(self, directory, recursive=False):
"""Get entries for one directory.
Args:
- directory: the directory to list entries from.
- recursive: if flag on, this list recursively from this directory.
Returns:
List of entries for such directory.
If `recursive=True`, names in the path of a dir/file not at the
root are concatenated with a slash (`/`).
"""
yield from self._directory_ls(directory, recursive)
def directory_entry_get_by_path(self, directory, paths):
"""Get the directory entry (either file or dir) from directory with path.
Args:
- directory: sha1 of the top level directory
- paths: path to lookup from the top level directory. From left
(top) to right (bottom).
Returns:
The corresponding directory entry if found, None otherwise.
"""
return self._directory_entry_get_by_path(directory, paths, b'')
def _directory_entry_get_by_path(self, directory, paths, prefix):
if not paths:
return
contents = list(self.directory_ls(directory))
if not contents:
return
def _get_entry(entries, name):
for entry in entries:
if entry['name'] == name:
entry = entry.copy()
entry['name'] = prefix + entry['name']
return entry
first_item = _get_entry(contents, paths[0])
if len(paths) == 1:
return first_item
if not first_item or first_item['type'] != 'dir':
return
return self._directory_entry_get_by_path(
first_item['target'], paths[1:], prefix + paths[0] + b'/')
def revision_add(self, revisions):
"""Add revisions to the storage
Args:
revisions (Iterable[dict]): iterable of dictionaries representing
the individual revisions to add. Each dict has the following
keys:
- **id** (:class:`sha1_git`): id of the revision to add
- **date** (:class:`dict`): date the revision was written
- **committer_date** (:class:`dict`): date the revision got
added to the origin
- **type** (one of 'git', 'tar'): type of the
revision added
- **directory** (:class:`sha1_git`): the directory the
revision points at
- **message** (:class:`bytes`): the message associated with
the revision
- **author** (:class:`Dict[str, bytes]`): dictionary with
keys: name, fullname, email
- **committer** (:class:`Dict[str, bytes]`): dictionary with
keys: name, fullname, email
- **metadata** (:class:`jsonb`): extra information as
dictionary
- **synthetic** (:class:`bool`): revision's nature (tarball,
directory creates synthetic revision`)
- **parents** (:class:`list[sha1_git]`): the parents of
this revision
date dictionaries have the form defined in :mod:`swh.model`.
Returns:
Summary dict of keys with associated count as values
revision_added: New objects actually stored in db
"""
if self.journal_writer:
self.journal_writer.write_additions('revision', revisions)
count = 0
for revision in revisions:
if revision['id'] not in self._revisions:
self._revisions[revision['id']] = rev = copy.deepcopy(revision)
self._person_add(rev['committer'])
self._person_add(rev['author'])
rev['date'] = normalize_timestamp(rev.get('date'))
rev['committer_date'] = normalize_timestamp(
rev.get('committer_date'))
self._objects[revision['id']].append(
('revision', revision['id']))
count += 1
return {'revision:add': count}
def revision_missing(self, revisions):
"""List revisions missing from storage
Args:
revisions (iterable): revision ids
Yields:
missing revision ids
"""
for id in revisions:
if id not in self._revisions:
yield id
def revision_get(self, revisions):
for id in revisions:
yield copy.deepcopy(self._revisions.get(id))
def _get_parent_revs(self, rev_id, seen, limit):
if limit and len(seen) >= limit:
return
if rev_id in seen or rev_id not in self._revisions:
return
seen.add(rev_id)
yield self._revisions[rev_id]
for parent in self._revisions[rev_id]['parents']:
yield from self._get_parent_revs(parent, seen, limit)
def revision_log(self, revisions, limit=None):
"""Fetch revision entry from the given root revisions.
Args:
revisions: array of root revision to lookup
limit: limitation on the output result. Default to None.
Yields:
List of revision log from such revisions root.
"""
seen = set()
for rev_id in revisions:
yield from self._get_parent_revs(rev_id, seen, limit)
def revision_shortlog(self, revisions, limit=None):
"""Fetch the shortlog for the given revisions
Args:
revisions: list of root revisions to lookup
limit: depth limitation for the output
Yields:
a list of (id, parents) tuples.
"""
yield from ((rev['id'], rev['parents'])
for rev in self.revision_log(revisions, limit))
def release_add(self, releases):
"""Add releases to the storage
Args:
releases (Iterable[dict]): iterable of dictionaries representing
the individual releases to add. Each dict has the following
keys:
- **id** (:class:`sha1_git`): id of the release to add
- **revision** (:class:`sha1_git`): id of the revision the
release points to
- **date** (:class:`dict`): the date the release was made
- **name** (:class:`bytes`): the name of the release
- **comment** (:class:`bytes`): the comment associated with
the release
- **author** (:class:`Dict[str, bytes]`): dictionary with
keys: name, fullname, email
the date dictionary has the form defined in :mod:`swh.model`.
Returns:
Summary dict of keys with associated count as values
release:add: New objects contents actually stored in db
"""
if self.journal_writer:
self.journal_writer.write_additions('release', releases)
count = 0
for rel in releases:
if rel['id'] not in self._releases:
rel = copy.deepcopy(rel)
rel['date'] = normalize_timestamp(rel['date'])
if rel['author']:
self._person_add(rel['author'])
self._objects[rel['id']].append(
('release', rel['id']))
self._releases[rel['id']] = rel
count += 1
return {'release:add': count}
def release_missing(self, releases):
"""List releases missing from storage
Args:
releases: an iterable of release ids
Returns:
a list of missing release ids
"""
yield from (rel for rel in releases if rel not in self._releases)
def release_get(self, releases):
"""Given a list of sha1, return the releases's information
Args:
releases: list of sha1s
Yields:
dicts with the same keys as those given to `release_add`
(or ``None`` if a release does not exist)
"""
for rel_id in releases:
yield copy.deepcopy(self._releases.get(rel_id))
def snapshot_add(self, snapshots, origin=None, visit=None):
"""Add a snapshot to the storage
Args:
snapshot ([dict]): the snapshots to add, containing the
following keys:
- **id** (:class:`bytes`): id of the snapshot
- **branches** (:class:`dict`): branches the snapshot contains,
mapping the branch name (:class:`bytes`) to the branch target,
itself a :class:`dict` (or ``None`` if the branch points to an
unknown object)
- **target_type** (:class:`str`): one of ``content``,
``directory``, ``revision``, ``release``,
``snapshot``, ``alias``
- **target** (:class:`bytes`): identifier of the target
(currently a ``sha1_git`` for all object kinds, or the name
of the target branch for aliases)
Raises:
ValueError: if the origin's or visit's identifier does not exist.
Returns:
Summary dict of keys with associated count as values
snapshot_added: Count of object actually stored in db
"""
if origin:
if not visit:
raise TypeError(
'snapshot_add expects one argument (or, as a legacy '
'behavior, three arguments), not two')
if isinstance(snapshots, (int, bytes)):
# Called by legacy code that uses the new api/client.py
(origin, visit, snapshots) = \
(snapshots, origin, [visit])
else:
# Called by legacy code that uses the old api/client.py
snapshots = [snapshots]
count = 0
for snapshot in snapshots:
snapshot_id = snapshot['id']
if snapshot_id not in self._snapshots:
if self.journal_writer:
self.journal_writer.write_addition('snapshot', snapshot)
self._snapshots[snapshot_id] = {
'id': snapshot_id,
'branches': copy.deepcopy(snapshot['branches']),
'_sorted_branch_names': sorted(snapshot['branches'])
}
self._objects[snapshot_id].append(('snapshot', snapshot_id))
count += 1
if visit:
# Legacy API, there can be only one snapshot
self.origin_visit_update(
origin, visit, snapshot=snapshots[0]['id'])
return {'snapshot:add': count}
def snapshot_get(self, snapshot_id):
"""Get the content, possibly partial, of a snapshot with the given id
The branches of the snapshot are iterated in the lexicographical
order of their names.
.. warning:: At most 1000 branches contained in the snapshot will be
returned for performance reasons. In order to browse the whole
set of branches, the method :meth:`snapshot_get_branches`
should be used instead.
Args:
snapshot_id (bytes): identifier of the snapshot
Returns:
dict: a dict with three keys:
* **id**: identifier of the snapshot
* **branches**: a dict of branches contained in the snapshot
whose keys are the branches' names.
* **next_branch**: the name of the first branch not returned
or :const:`None` if the snapshot has less than 1000
branches.
"""
return self.snapshot_get_branches(snapshot_id)
def snapshot_get_by_origin_visit(self, origin, visit):
"""Get the content, possibly partial, of a snapshot for the given origin visit
The branches of the snapshot are iterated in the lexicographical
order of their names.
.. warning:: At most 1000 branches contained in the snapshot will be
returned for performance reasons. In order to browse the whole
set of branches, the method :meth:`snapshot_get_branches`
should be used instead.
Args:
origin (int): the origin's identifier
visit (int): the visit's identifier
Returns:
dict: None if the snapshot does not exist;
a dict with three keys otherwise:
* **id**: identifier of the snapshot
* **branches**: a dict of branches contained in the snapshot
whose keys are the branches' names.
* **next_branch**: the name of the first branch not returned
or :const:`None` if the snapshot has less than 1000
branches.
"""
if origin > len(self._origins) or \
visit > len(self._origin_visits[origin-1]):
return None
snapshot_id = self._origin_visits[origin-1][visit-1]['snapshot']
if snapshot_id:
return self.snapshot_get(snapshot_id)
else:
return None
def snapshot_get_latest(self, origin, allowed_statuses=None):
"""Get the content, possibly partial, of the latest snapshot for the
given origin, optionally only from visits that have one of the given
allowed_statuses
The branches of the snapshot are iterated in the lexicographical
order of their names.
.. warning:: At most 1000 branches contained in the snapshot will be
returned for performance reasons. In order to browse the whole
set of branches, the methods :meth:`origin_visit_get_latest`
and :meth:`snapshot_get_branches` should be used instead.
Args:
origin (Union[str,int]): the origin's URL or identifier
allowed_statuses (list of str): list of visit statuses considered
to find the latest snapshot for the origin. For instance,
``allowed_statuses=['full']`` will only consider visits that
have successfully run to completion.
Returns:
dict: a dict with three keys:
* **id**: identifier of the snapshot
* **branches**: a dict of branches contained in the snapshot
whose keys are the branches' names.
* **next_branch**: the name of the first branch not returned
or :const:`None` if the snapshot has less than 1000
branches.
"""
if isinstance(origin, int):
origin = self.origin_get({'id': origin})['url']
visit = self.origin_visit_get_latest(
origin, allowed_statuses=allowed_statuses, require_snapshot=True)
if visit and visit['snapshot']:
snapshot = self.snapshot_get(visit['snapshot'])
if not snapshot:
raise ValueError(
'last origin visit references an unknown snapshot')
return snapshot
def snapshot_count_branches(self, snapshot_id, db=None, cur=None):
"""Count the number of branches in the snapshot with the given id
Args:
snapshot_id (bytes): identifier of the snapshot
Returns:
dict: A dict whose keys are the target types of branches and
values their corresponding amount
"""
branches = list(self._snapshots[snapshot_id]['branches'].values())
return collections.Counter(branch['target_type'] if branch else None
for branch in branches)
def snapshot_get_branches(self, snapshot_id, branches_from=b'',
branches_count=1000, target_types=None):
"""Get the content, possibly partial, of a snapshot with the given id
The branches of the snapshot are iterated in the lexicographical
order of their names.
Args:
snapshot_id (bytes): identifier of the snapshot
branches_from (bytes): optional parameter used to skip branches
whose name is lesser than it before returning them
branches_count (int): optional parameter used to restrain
the amount of returned branches
target_types (list): optional parameter used to filter the
target types of branch to return (possible values that can be
contained in that list are `'content', 'directory',
'revision', 'release', 'snapshot', 'alias'`)
Returns:
dict: None if the snapshot does not exist;
a dict with three keys otherwise:
* **id**: identifier of the snapshot
* **branches**: a dict of branches contained in the snapshot
whose keys are the branches' names.
* **next_branch**: the name of the first branch not returned
or :const:`None` if the snapshot has less than
`branches_count` branches after `branches_from` included.
"""
snapshot = self._snapshots.get(snapshot_id)
if snapshot is None:
return None
sorted_branch_names = snapshot['_sorted_branch_names']
from_index = bisect.bisect_left(
sorted_branch_names, branches_from)
if target_types:
next_branch = None
branches = {}
for branch_name in sorted_branch_names[from_index:]:
branch = snapshot['branches'][branch_name]
if branch and branch['target_type'] in target_types:
if len(branches) < branches_count:
branches[branch_name] = branch
else:
next_branch = branch_name
break
else:
# As there is no 'target_types', we can do that much faster
to_index = from_index + branches_count
returned_branch_names = sorted_branch_names[from_index:to_index]
branches = {branch_name: snapshot['branches'][branch_name]
for branch_name in returned_branch_names}
if to_index >= len(sorted_branch_names):
next_branch = None
else:
next_branch = sorted_branch_names[to_index]
return {
'id': snapshot_id,
'branches': branches,
'next_branch': next_branch,
}
def object_find_by_sha1_git(self, ids, db=None, cur=None):
"""Return the objects found with the given ids.
Args:
ids: a generator of sha1_gits
Returns:
dict: a mapping from id to the list of objects found. Each object
found is itself a dict with keys:
- sha1_git: the input id
- type: the type of object found
- id: the id of the object found
- object_id: the numeric id of the object found.
"""
ret = {}
for id_ in ids:
objs = self._objects.get(id_, [])
ret[id_] = [{
'sha1_git': id_,
'type': obj[0],
'id': obj[1],
'object_id': id_,
} for obj in objs]
return ret
def origin_get(self, origins):
"""Return origins, either all identified by their ids or all
identified by tuples (type, url).
If the url is given and the type is omitted, one of the origins with
that url is returned.
Args:
origin: a list of dictionaries representing the individual
origins to find.
These dicts have either the key url (and optionally type):
- type (FIXME: enum TBD): the origin type ('git', 'wget', ...)
- url (bytes): the url the origin points to
or the id:
- id (int): the origin's identifier
Returns:
dict: the origin dictionary with the keys:
- id: origin's id
- type: origin's type
- url: origin's url
Raises:
ValueError: if the keys does not match (url and type) nor id.
"""
if isinstance(origins, dict):
# Old API
return_single = True
origins = [origins]
else:
return_single = False
# Sanity check to be error-compatible with the pgsql backend
if any('id' in origin for origin in origins) \
and not all('id' in origin for origin in origins):
raise ValueError(
'Either all origins or none at all should have an "id".')
if any('url' in origin for origin in origins) \
and not all('url' in origin for origin in origins):
raise ValueError(
'Either all origins or none at all should have '
'an "url" key.')
results = []
for origin in origins:
if 'id' in origin:
origin_id = origin['id']
elif 'url' in origin:
origin_id = self._origin_id(origin)
else:
raise ValueError(
'Origin must have either id or (type and url).')
origin = None
# self._origin_id can return None
if origin_id is not None and origin_id <= len(self._origins):
origin = copy.deepcopy(self._origins[origin_id-1])
origin['id'] = origin_id
results.append(origin)
if return_single:
assert len(results) == 1
return results[0]
else:
return results
def origin_get_range(self, origin_from=1, origin_count=100):
"""Retrieve ``origin_count`` origins whose ids are greater
or equal than ``origin_from``.
Origins are sorted by id before retrieving them.
Args:
origin_from (int): the minimum id of origins to retrieve
origin_count (int): the maximum number of origins to retrieve
Yields:
dicts containing origin information as returned
by :meth:`swh.storage.in_memory.Storage.origin_get`.
"""
origin_from = max(origin_from, 1)
if origin_from <= len(self._origins):
max_idx = origin_from + origin_count - 1
if max_idx > len(self._origins):
max_idx = len(self._origins)
for idx in range(origin_from-1, max_idx):
yield copy.deepcopy(self._origins[idx])
def origin_search(self, url_pattern, offset=0, limit=50,
regexp=False, with_visit=False, db=None, cur=None):
"""Search for origins whose urls contain a provided string pattern
or match a provided regular expression.
The search is performed in a case insensitive way.
Args:
url_pattern (str): the string pattern to search for in origin urls
offset (int): number of found origins to skip before returning
results
limit (int): the maximum number of found origins to return
regexp (bool): if True, consider the provided pattern as a regular
expression and return origins whose urls match it
with_visit (bool): if True, filter out origins with no visit
Returns:
An iterable of dict containing origin information as returned
by :meth:`swh.storage.storage.Storage.origin_get`.
"""
origins = self._origins
if regexp:
pat = re.compile(url_pattern)
origins = [orig for orig in origins if pat.search(orig['url'])]
else:
origins = [orig for orig in origins if url_pattern in orig['url']]
if with_visit:
origins = [orig for orig in origins
if len(self._origin_visits[orig['id']-1]) > 0]
origins = copy.deepcopy(origins[offset:offset+limit])
return origins
def origin_count(self, url_pattern, regexp=False, with_visit=False,
db=None, cur=None):
"""Count origins whose urls contain a provided string pattern
or match a provided regular expression.
The pattern search in origin urls is performed in a case insensitive
way.
Args:
url_pattern (str): the string pattern to search for in origin urls
regexp (bool): if True, consider the provided pattern as a regular
expression and return origins whose urls match it
with_visit (bool): if True, filter out origins with no visit
Returns:
int: The number of origins matching the search criterion.
"""
return len(self.origin_search(url_pattern, regexp=regexp,
with_visit=with_visit,
limit=len(self._origins)))
def origin_add(self, origins):
"""Add origins to the storage
Args:
origins: list of dictionaries representing the individual origins,
with the following keys:
- type: the origin type ('git', 'svn', 'deb', ...)
- url (bytes): the url the origin points to
Returns:
list: given origins as dict updated with their id
"""
origins = copy.deepcopy(origins)
for origin in origins:
origin['id'] = self.origin_add_one(origin)
return origins
def origin_add_one(self, origin):
"""Add origin to the storage
Args:
origin: dictionary representing the individual origin to add. This
dict has the following keys:
- type (FIXME: enum TBD): the origin type ('git', 'wget', ...)
- url (bytes): the url the origin points to
Returns:
the id of the added origin, or of the identical one that already
exists.
"""
origin = copy.deepcopy(origin)
assert 'id' not in origin
origin_id = self._origin_id(origin)
if origin_id is None:
if self.journal_writer:
self.journal_writer.write_addition('origin', origin)
# origin ids are in the range [1, +inf[
origin_id = len(self._origins) + 1
origin['id'] = origin_id
self._origins.append(origin)
self._origin_visits.append([])
key = (origin['type'], origin['url'])
self._objects[key].append(('origin', origin_id))
else:
origin['id'] = origin_id
return origin_id
def fetch_history_start(self, origin_id):
"""Add an entry for origin origin_id in fetch_history. Returns the id
of the added fetch_history entry
"""
pass
def fetch_history_end(self, fetch_history_id, data):
"""Close the fetch_history entry with id `fetch_history_id`, replacing
its data with `data`.
"""
pass
def fetch_history_get(self, fetch_history_id):
"""Get the fetch_history entry with id `fetch_history_id`.
"""
raise NotImplementedError('fetch_history_get is deprecated, use '
'origin_visit_get instead.')
def origin_visit_add(self, origin, date=None, type=None, *, ts=None):
"""Add an origin_visit for the origin at date with status 'ongoing'.
For backward compatibility, `type` is optional and defaults to
the origin's type.
Args:
origin (Union[int,str]): visited origin's identifier or URL
date: timestamp of such visit
type (str): the type of loader used for the visit (hg, git, ...)
Returns:
dict: dictionary with keys origin and visit where:
- origin: origin's identifier
- visit: the visit's identifier for the new visit occurrence
"""
if ts is None:
if date is None:
raise TypeError('origin_visit_add expected 2 arguments.')
else:
assert date is None
warnings.warn("argument 'ts' of origin_visit_add was renamed "
"to 'date' in v0.0.109.",
DeprecationWarning)
date = ts
if isinstance(origin, str):
origin_id = self.origin_get({'url': origin})['id']
else:
origin_id = origin
if isinstance(date, str):
date = dateutil.parser.parse(date)
visit_ret = None
if origin_id <= len(self._origin_visits):
# visit ids are in the range [1, +inf[
visit_id = len(self._origin_visits[origin_id-1]) + 1
status = 'ongoing'
visit = {
'origin': origin_id,
'date': date,
'type': type or self._origins[origin_id-1]['type'],
'status': status,
'snapshot': None,
'metadata': None,
'visit': visit_id
}
self._origin_visits[origin_id-1].append(visit)
visit_ret = {
'origin': origin_id,
'visit': visit_id,
}
self._objects[(origin_id, visit_id)].append(
('origin_visit', None))
if self.journal_writer:
origin = self.origin_get([{'id': origin_id}])[0]
del origin['id']
self.journal_writer.write_addition('origin_visit', {
**visit, 'origin': origin})
return visit_ret
def origin_visit_update(self, origin, visit_id, status=None,
metadata=None, snapshot=None):
"""Update an origin_visit's status.
Args:
origin (Union[int,str]): visited origin's identifier or URL
visit_id (int): visit's identifier
status: visit's new status
metadata: data associated to the visit
snapshot (sha1_git): identifier of the snapshot to add to
the visit
Returns:
None
"""
if isinstance(origin, str):
origin_id = self.origin_get({'url': origin})['id']
else:
origin_id = origin
try:
visit = self._origin_visits[origin_id-1][visit_id-1]
except IndexError:
raise ValueError('Invalid origin_id or visit_id') from None
if self.journal_writer:
origin = self.origin_get([{'id': origin_id}])[0]
del origin['id']
self.journal_writer.write_update('origin_visit', {
'origin': origin, 'type': origin['type'],
'visit': visit_id,
'status': status or visit['status'],
'date': visit['date'],
'metadata': metadata or visit['metadata'],
'snapshot': snapshot or visit['snapshot']})
if origin_id > len(self._origin_visits) or \
visit_id > len(self._origin_visits[origin_id-1]):
return
if status:
visit['status'] = status
if metadata:
visit['metadata'] = metadata
if snapshot:
visit['snapshot'] = snapshot
def origin_visit_upsert(self, visits):
"""Add a origin_visits with a specific id and with all its data.
If there is already an origin_visit with the same
`(origin_id, visit_id)`, updates it instead of inserting a new one.
Args:
visits: iterable of dicts with keys:
origin: Visited Origin id
visit: origin visit id
type: type of loader used for the visit
date: timestamp of such visit
status: Visit's new status
metadata: Data associated to the visit
snapshot (sha1_git): identifier of the snapshot to add to
the visit
"""
visits = copy.deepcopy(visits)
for visit in visits:
if isinstance(visit['date'], str):
visit['date'] = dateutil.parser.parse(visit['date'])
if self.journal_writer:
for visit in visits:
visit = visit.copy()
visit['origin'] = self.origin_get([{'id': visit['origin']}])[0]
del visit['origin']['id']
self.journal_writer.write_addition('origin_visit', visit)
for visit in visits:
origin_id = visit['origin']
visit_id = visit['visit']
self._objects[(origin_id, visit_id)].append(
('origin_visit', None))
while len(self._origin_visits[origin_id-1]) < visit_id:
self._origin_visits[origin_id-1].append(None)
visit = self._origin_visits[origin_id-1][visit_id-1] = visit
def origin_visit_get(self, origin, last_visit=None, limit=None):
"""Retrieve all the origin's visit's information.
Args:
origin (int): the origin's identifier
last_visit (int): visit's id from which listing the next ones,
default to None
limit (int): maximum number of results to return,
default to None
Yields:
List of visits.
"""
if origin <= len(self._origin_visits):
visits = self._origin_visits[origin-1]
if last_visit is not None:
visits = visits[last_visit:]
if limit is not None:
visits = visits[:limit]
for visit in visits:
if not visit:
continue
visit_id = visit['visit']
yield copy.deepcopy(self._origin_visits[origin-1][visit_id-1])
def origin_visit_get_by(self, origin, visit):
"""Retrieve origin visit's information.
Args:
origin (int): the origin's identifier
Returns:
The information on that particular (origin, visit) or None if
it does not exist
"""
if isinstance(origin, str):
origin = self.origin_get({'url': origin})['id']
origin_visit = None
if origin <= len(self._origin_visits) and \
visit <= len(self._origin_visits[origin-1]):
origin_visit = self._origin_visits[origin-1][visit-1]
return copy.deepcopy(origin_visit)
def origin_visit_get_latest(
self, origin, allowed_statuses=None, require_snapshot=False):
"""Get the latest origin visit for the given origin, optionally
looking only for those with one of the given allowed_statuses
or for those with a known snapshot.
Args:
origin (str): the origin's URL
allowed_statuses (list of str): list of visit statuses considered
to find the latest visit. For instance,
``allowed_statuses=['full']`` will only consider visits that
have successfully run to completion.
require_snapshot (bool): If True, only a visit with a snapshot
will be returned.
Returns:
dict: a dict with the following keys:
origin: the URL of the origin
visit: origin visit id
type: type of loader used for the visit
date: timestamp of such visit
status: Visit's new status
metadata: Data associated to the visit
snapshot (Optional[sha1_git]): identifier of the snapshot
associated to the visit
"""
origin = self.origin_get({'url': origin})['id']
visits = self._origin_visits[origin-1]
if allowed_statuses is not None:
visits = [visit for visit in visits
if visit['status'] in allowed_statuses]
if require_snapshot:
visits = [visit for visit in visits
if visit['snapshot']]
return max(visits, key=lambda v: (v['date'], v['visit']), default=None)
def person_get(self, person):
"""Return the persons identified by their ids.
Args:
person: array of ids.
Returns:
The array of persons corresponding of the ids.
"""
for p in person:
if 0 <= (p - 1) < len(self._persons):
yield dict(self._persons[p - 1], id=p)
else:
yield None
def stat_counters(self):
"""compute statistics about the number of tuples in various tables
Returns:
dict: a dictionary mapping textual labels (e.g., content) to
integer values (e.g., the number of tuples in table content)
"""
keys = (
'content',
'directory',
'origin',
'origin_visit',
'person',
'release',
'revision',
'skipped_content',
'snapshot'
)
stats = {key: 0 for key in keys}
stats.update(collections.Counter(
obj_type
for (obj_type, obj_id)
in itertools.chain(*self._objects.values())))
return stats
def refresh_stat_counters(self):
"""Recomputes the statistics for `stat_counters`."""
pass
def origin_metadata_add(self, origin_id, ts, provider, tool, metadata,
db=None, cur=None):
""" Add an origin_metadata for the origin at ts with provenance and
metadata.
Args:
origin_id (int): the origin's id for which the metadata is added
ts (datetime): timestamp of the found metadata
provider: id of the provider of metadata (ex:'hal')
tool: id of the tool used to extract metadata
metadata (jsonb): the metadata retrieved at the time and location
"""
if isinstance(ts, str):
ts = dateutil.parser.parse(ts)
origin_metadata = {
'origin_id': origin_id,
'discovery_date': ts,
'tool_id': tool,
'metadata': metadata,
'provider_id': provider,
}
self._origin_metadata[origin_id].append(origin_metadata)
return None
def origin_metadata_get_by(self, origin_id, provider_type=None, db=None,
cur=None):
"""Retrieve list of all origin_metadata entries for the origin_id
Args:
origin_id (int): the unique origin's identifier
provider_type (str): (optional) type of provider
Returns:
list of dicts: the origin_metadata dictionary with the keys:
- origin_id (int): origin's identifier
- discovery_date (datetime): timestamp of discovery
- tool_id (int): metadata's extracting tool
- metadata (jsonb)
- provider_id (int): metadata's provider
- provider_name (str)
- provider_type (str)
- provider_url (str)
"""
metadata = []
for item in self._origin_metadata[origin_id]:
item = copy.deepcopy(item)
provider = self.metadata_provider_get(item['provider_id'])
for attr in ('name', 'type', 'url'):
item['provider_' + attr] = provider['provider_' + attr]
metadata.append(item)
return metadata
def tool_add(self, tools):
"""Add new tools to the storage.
Args:
tools (iterable of :class:`dict`): Tool information to add to
storage. Each tool is a :class:`dict` with the following keys:
- name (:class:`str`): name of the tool
- version (:class:`str`): version of the tool
- configuration (:class:`dict`): configuration of the tool,
must be json-encodable
Returns:
:class:`dict`: All the tools inserted in storage
(including the internal ``id``). The order of the list is not
guaranteed to match the order of the initial list.
"""
inserted = []
for tool in tools:
key = self._tool_key(tool)
assert 'id' not in tool
record = copy.deepcopy(tool)
record['id'] = key # TODO: remove this
if key not in self._tools:
self._tools[key] = record
inserted.append(copy.deepcopy(self._tools[key]))
return inserted
def tool_get(self, tool):
"""Retrieve tool information.
Args:
tool (dict): Tool information we want to retrieve from storage.
The dicts have the same keys as those used in :func:`tool_add`.
Returns:
dict: The full tool information if it exists (``id`` included),
None otherwise.
"""
return self._tools.get(self._tool_key(tool))
def metadata_provider_add(self, provider_name, provider_type, provider_url,
metadata):
"""Add a metadata provider.
Args:
provider_name (str): Its name
provider_type (str): Its type
provider_url (str): Its URL
metadata: JSON-encodable object
Returns:
an identifier of the provider
"""
provider = {
'provider_name': provider_name,
'provider_type': provider_type,
'provider_url': provider_url,
'metadata': metadata,
}
key = self._metadata_provider_key(provider)
provider['id'] = key
self._metadata_providers[key] = provider
return key
def metadata_provider_get(self, provider_id, db=None, cur=None):
"""Get a metadata provider
Args:
provider_id: Its identifier, as given by `metadata_provider_add`.
Returns:
dict: same as `metadata_provider_add`;
or None if it does not exist.
"""
return self._metadata_providers.get(provider_id)
def metadata_provider_get_by(self, provider, db=None, cur=None):
"""Get a metadata provider
Args:
provider_name: Its name
provider_url: Its URL
Returns:
dict: same as `metadata_provider_add`;
or None if it does not exist.
"""
key = self._metadata_provider_key(provider)
return self._metadata_providers.get(key)
def _origin_id(self, origin):
origin_id = None
for stored_origin in self._origins:
if stored_origin['url'] == origin['url'] \
and ('type' not in origin
or stored_origin['type'] == origin['type']):
origin_id = stored_origin['id']
break
return origin_id
def _person_add(self, person):
"""Add a person in storage.
Note: Private method, do not use outside of this class.
Args:
person: dictionary with keys fullname, name and email.
"""
key = ('person', person['fullname'])
if key not in self._objects:
person_id = len(self._persons) + 1
self._persons.append(dict(person))
self._objects[key].append(('person', person_id))
else:
person_id = self._objects[key][0][1]
p = next(self.person_get([person_id]))
person.update(p.items())
person['id'] = person_id
@staticmethod
def _content_key(content):
"""A stable key for a content"""
return tuple(content.get(key) for key in sorted(DEFAULT_ALGORITHMS))
@staticmethod
def _tool_key(tool):
return '%r %r %r' % (tool['name'], tool['version'],
tuple(sorted(tool['configuration'].items())))
@staticmethod
def _metadata_provider_key(provider):
return '%r %r' % (provider['provider_name'], provider['provider_url'])