diff --git a/swh/storage/converters.py b/swh/storage/converters.py index f792ad97..74468ec3 100644 --- a/swh/storage/converters.py +++ b/swh/storage/converters.py @@ -1,320 +1,312 @@ # Copyright (C) 2015 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime from swh.core.utils import decode_with_escape, encode_with_unescape from swh.model import identifiers DEFAULT_AUTHOR = { 'fullname': None, 'name': None, 'email': None, } DEFAULT_DATE = { 'timestamp': None, 'offset': 0, 'neg_utc_offset': None, } def author_to_db(author): """Convert a swh-model author to its DB representation. Args: author: a :mod:`swh.model` compatible author Returns: dict: a dictionary with three keys: author, fullname and email """ if author is None: return DEFAULT_AUTHOR return author -def db_to_author(id, fullname, name, email): +def db_to_author(fullname, name, email): """Convert the DB representation of an author to a swh-model author. Args: id (long): the author's identifier fullname (bytes): the author's fullname name (bytes): the author's name email (bytes): the author's email Returns: dict: a dictionary with four keys: id, fullname, name and email, or None if the id is None """ - - if id is None: - return None - return { - 'id': id, 'fullname': fullname, 'name': name, 'email': email, } def git_headers_to_db(git_headers): """Convert git headers to their database representation. We convert the bytes to unicode by decoding them into utf-8 and replacing invalid utf-8 sequences with backslash escapes. """ ret = [] for key, values in git_headers: if isinstance(values, list): ret.append([key, [decode_with_escape(value) for value in values]]) else: ret.append([key, decode_with_escape(values)]) return ret def db_to_git_headers(db_git_headers): ret = [] for key, values in db_git_headers: if isinstance(values, list): ret.append([key, [encode_with_unescape(value) for value in values]]) else: ret.append([key, encode_with_unescape(values)]) return ret def db_to_date(date, offset, neg_utc_offset): """Convert the DB representation of a date to a swh-model compatible date. Args: date (datetime.datetime): a date pulled out of the database offset (int): an integer number of minutes representing an UTC offset neg_utc_offset (boolean): whether an utc offset is negative Returns: dict: a dict with three keys: - timestamp: a timestamp from UTC - offset: the number of minutes since UTC - negative_utc: whether a null UTC offset is negative """ if date is None: return None return { 'timestamp': { 'seconds': int(date.timestamp()), 'microseconds': date.microsecond, }, 'offset': offset, 'negative_utc': neg_utc_offset, } def date_to_db(date_offset): """Convert a swh-model date_offset to its DB representation. Args: date_offset: a :mod:`swh.model` compatible date_offset Returns: dict: a dictionary with three keys: - timestamp: a date in ISO format - offset: the UTC offset in minutes - neg_utc_offset: a boolean indicating whether a null offset is negative or positive. """ if date_offset is None: return DEFAULT_DATE normalized = identifiers.normalize_timestamp(date_offset) ts = normalized['timestamp'] seconds = ts.get('seconds', 0) microseconds = ts.get('microseconds', 0) timestamp = datetime.datetime.fromtimestamp(seconds, datetime.timezone.utc) timestamp = timestamp.replace(microsecond=microseconds) return { # PostgreSQL supports isoformatted timestamps 'timestamp': timestamp.isoformat(), 'offset': normalized['offset'], 'neg_utc_offset': normalized['negative_utc'], } def revision_to_db(revision): """Convert a swh-model revision to its database representation. """ author = author_to_db(revision['author']) date = date_to_db(revision['date']) committer = author_to_db(revision['committer']) committer_date = date_to_db(revision['committer_date']) metadata = revision['metadata'] if metadata and 'extra_headers' in metadata: metadata = metadata.copy() extra_headers = git_headers_to_db(metadata['extra_headers']) metadata['extra_headers'] = extra_headers return { 'id': revision['id'], 'author_fullname': author['fullname'], 'author_name': author['name'], 'author_email': author['email'], 'date': date['timestamp'], 'date_offset': date['offset'], 'date_neg_utc_offset': date['neg_utc_offset'], 'committer_fullname': committer['fullname'], 'committer_name': committer['name'], 'committer_email': committer['email'], 'committer_date': committer_date['timestamp'], 'committer_date_offset': committer_date['offset'], 'committer_date_neg_utc_offset': committer_date['neg_utc_offset'], 'type': revision['type'], 'directory': revision['directory'], 'message': revision['message'], 'metadata': metadata, 'synthetic': revision['synthetic'], 'parents': [ { 'id': revision['id'], 'parent_id': parent, 'parent_rank': i, } for i, parent in enumerate(revision['parents']) ], } def db_to_revision(db_revision): """Convert a database representation of a revision to its swh-model representation.""" author = db_to_author( - db_revision['author_id'], db_revision['author_fullname'], db_revision['author_name'], db_revision['author_email'], ) date = db_to_date( db_revision['date'], db_revision['date_offset'], db_revision['date_neg_utc_offset'], ) committer = db_to_author( - db_revision['committer_id'], db_revision['committer_fullname'], db_revision['committer_name'], db_revision['committer_email'], ) committer_date = db_to_date( db_revision['committer_date'], db_revision['committer_date_offset'], db_revision['committer_date_neg_utc_offset'] ) metadata = db_revision['metadata'] if metadata and 'extra_headers' in metadata: extra_headers = db_to_git_headers(metadata['extra_headers']) metadata['extra_headers'] = extra_headers parents = [] if 'parents' in db_revision: for parent in db_revision['parents']: if parent: parents.append(parent) ret = { 'id': db_revision['id'], 'author': author, 'date': date, 'committer': committer, 'committer_date': committer_date, 'type': db_revision['type'], 'directory': db_revision['directory'], 'message': db_revision['message'], 'metadata': metadata, 'synthetic': db_revision['synthetic'], 'parents': parents, } if 'object_id' in db_revision: ret['object_id'] = db_revision['object_id'] return ret def release_to_db(release): """Convert a swh-model release to its database representation. """ author = author_to_db(release['author']) date = date_to_db(release['date']) return { 'id': release['id'], 'author_fullname': author['fullname'], 'author_name': author['name'], 'author_email': author['email'], 'date': date['timestamp'], 'date_offset': date['offset'], 'date_neg_utc_offset': date['neg_utc_offset'], 'name': release['name'], 'target': release['target'], 'target_type': release['target_type'], 'comment': release['message'], 'synthetic': release['synthetic'], } def db_to_release(db_release): """Convert a database representation of a release to its swh-model representation. """ author = db_to_author( - db_release['author_id'], db_release['author_fullname'], db_release['author_name'], db_release['author_email'], ) date = db_to_date( db_release['date'], db_release['date_offset'], db_release['date_neg_utc_offset'] ) ret = { 'author': author, 'date': date, 'id': db_release['id'], 'name': db_release['name'], 'message': db_release['comment'], 'synthetic': db_release['synthetic'], 'target': db_release['target'], 'target_type': db_release['target_type'], } if 'object_id' in db_release: ret['object_id'] = db_release['object_id'] return ret diff --git a/swh/storage/db.py b/swh/storage/db.py index fe585b92..11cef63f 100644 --- a/swh/storage/db.py +++ b/swh/storage/db.py @@ -1,913 +1,912 @@ # Copyright (C) 2015-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import select from swh.core.db import BaseDb from swh.core.db.db_utils import stored_procedure, jsonize from swh.core.db.db_utils import execute_values_generator class Db(BaseDb): """Proxy to the SWH DB, with wrappers around stored procedures """ def mktemp_dir_entry(self, entry_type, cur=None): self._cursor(cur).execute('SELECT swh_mktemp_dir_entry(%s)', (('directory_entry_%s' % entry_type),)) @stored_procedure('swh_mktemp_revision') def mktemp_revision(self, cur=None): pass @stored_procedure('swh_mktemp_release') def mktemp_release(self, cur=None): pass @stored_procedure('swh_mktemp_snapshot_branch') def mktemp_snapshot_branch(self, cur=None): pass def register_listener(self, notify_queue, cur=None): """Register a listener for NOTIFY queue `notify_queue`""" self._cursor(cur).execute("LISTEN %s" % notify_queue) def listen_notifies(self, timeout): """Listen to notifications for `timeout` seconds""" if select.select([self.conn], [], [], timeout) == ([], [], []): return else: self.conn.poll() while self.conn.notifies: yield self.conn.notifies.pop(0) @stored_procedure('swh_content_add') def content_add_from_temp(self, cur=None): pass @stored_procedure('swh_directory_add') def directory_add_from_temp(self, cur=None): pass @stored_procedure('swh_skipped_content_add') def skipped_content_add_from_temp(self, cur=None): pass @stored_procedure('swh_revision_add') def revision_add_from_temp(self, cur=None): pass @stored_procedure('swh_release_add') def release_add_from_temp(self, cur=None): pass def content_update_from_temp(self, keys_to_update, cur=None): cur = self._cursor(cur) cur.execute("""select swh_content_update(ARRAY[%s] :: text[])""" % keys_to_update) content_get_metadata_keys = [ 'sha1', 'sha1_git', 'sha256', 'blake2s256', 'length', 'status'] content_add_keys = content_get_metadata_keys + ['ctime'] skipped_content_keys = [ 'sha1', 'sha1_git', 'sha256', 'blake2s256', 'length', 'reason', 'status', 'origin'] def content_get_metadata_from_sha1s(self, sha1s, cur=None): cur = self._cursor(cur) yield from execute_values_generator( cur, """ select t.sha1, %s from (values %%s) as t (sha1) left join content using (sha1) """ % ', '.join(self.content_get_metadata_keys[1:]), ((sha1,) for sha1 in sha1s), ) def content_get_range(self, start, end, limit=None, cur=None): """Retrieve contents within range [start, end]. """ cur = self._cursor(cur) query = """select %s from content where %%s <= sha1 and sha1 <= %%s order by sha1 limit %%s""" % ', '.join(self.content_get_metadata_keys) cur.execute(query, (start, end, limit)) yield from cur content_hash_keys = ['sha1', 'sha1_git', 'sha256', 'blake2s256'] def content_missing_from_list(self, contents, cur=None): cur = self._cursor(cur) keys = ', '.join(self.content_hash_keys) equality = ' AND '.join( ('t.%s = c.%s' % (key, key)) for key in self.content_hash_keys ) yield from execute_values_generator( cur, """ SELECT %s FROM (VALUES %%s) as t(%s) WHERE NOT EXISTS ( SELECT 1 FROM content c WHERE %s ) """ % (keys, keys, equality), (tuple(c[key] for key in self.content_hash_keys) for c in contents) ) def content_missing_per_sha1(self, sha1s, cur=None): cur = self._cursor(cur) yield from execute_values_generator(cur, """ SELECT t.sha1 FROM (VALUES %s) AS t(sha1) WHERE NOT EXISTS ( SELECT 1 FROM content c WHERE c.sha1 = t.sha1 )""", ((sha1,) for sha1 in sha1s)) def skipped_content_missing(self, contents, cur=None): if not contents: return [] cur = self._cursor(cur) query = """SELECT * FROM (VALUES %s) AS t (%s) WHERE not exists (SELECT 1 FROM skipped_content s WHERE s.sha1 is not distinct from t.sha1 and s.sha1_git is not distinct from t.sha1_git and s.sha256 is not distinct from t.sha256);""" % \ ((', '.join('%s' for _ in contents)), ', '.join(self.content_hash_keys)) cur.execute(query, [tuple(cont[key] for key in self.content_hash_keys) for cont in contents]) yield from cur def snapshot_exists(self, snapshot_id, cur=None): """Check whether a snapshot with the given id exists""" cur = self._cursor(cur) cur.execute("""SELECT 1 FROM snapshot where id=%s""", (snapshot_id,)) return bool(cur.fetchone()) def snapshot_add(self, snapshot_id, cur=None): """Add a snapshot from the temporary table""" cur = self._cursor(cur) cur.execute("""SELECT swh_snapshot_add(%s)""", (snapshot_id,)) snapshot_count_cols = ['target_type', 'count'] def snapshot_count_branches(self, snapshot_id, cur=None): cur = self._cursor(cur) query = """\ SELECT %s FROM swh_snapshot_count_branches(%%s) """ % ', '.join(self.snapshot_count_cols) cur.execute(query, (snapshot_id,)) yield from cur snapshot_get_cols = ['snapshot_id', 'name', 'target', 'target_type'] def snapshot_get_by_id(self, snapshot_id, branches_from=b'', branches_count=None, target_types=None, cur=None): cur = self._cursor(cur) query = """\ SELECT %s FROM swh_snapshot_get_by_id(%%s, %%s, %%s, %%s :: snapshot_target[]) """ % ', '.join(self.snapshot_get_cols) cur.execute(query, (snapshot_id, branches_from, branches_count, target_types)) yield from cur def snapshot_get_by_origin_visit(self, origin_id, visit_id, cur=None): cur = self._cursor(cur) query = """\ SELECT snapshot from origin_visit where origin_visit.origin=%s and origin_visit.visit=%s; """ cur.execute(query, (origin_id, visit_id)) ret = cur.fetchone() if ret: return ret[0] content_find_cols = ['sha1', 'sha1_git', 'sha256', 'blake2s256', 'length', 'ctime', 'status'] def content_find(self, sha1=None, sha1_git=None, sha256=None, blake2s256=None, cur=None): """Find the content optionally on a combination of the following checksums sha1, sha1_git, sha256 or blake2s256. Args: sha1: sha1 content git_sha1: the sha1 computed `a la git` sha1 of the content sha256: sha256 content blake2s256: blake2s256 content Returns: The tuple (sha1, sha1_git, sha256, blake2s256) if found or None. """ cur = self._cursor(cur) checksum_dict = {'sha1': sha1, 'sha1_git': sha1_git, 'sha256': sha256, 'blake2s256': blake2s256} where_parts = [] args = [] # Adds only those keys which have value other than None for algorithm in checksum_dict: if checksum_dict[algorithm] is not None: args.append(checksum_dict[algorithm]) where_parts.append(algorithm + '= %s') query = ' AND '.join(where_parts) cur.execute("""SELECT %s FROM content WHERE %s """ % (','.join(self.content_find_cols), query), args) content = cur.fetchall() return content def directory_missing_from_list(self, directories, cur=None): cur = self._cursor(cur) yield from execute_values_generator( cur, """ SELECT id FROM (VALUES %s) as t(id) WHERE NOT EXISTS ( SELECT 1 FROM directory d WHERE d.id = t.id ) """, ((id,) for id in directories)) directory_ls_cols = ['dir_id', 'type', 'target', 'name', 'perms', 'status', 'sha1', 'sha1_git', 'sha256', 'length'] def directory_walk_one(self, directory, cur=None): cur = self._cursor(cur) cols = ', '.join(self.directory_ls_cols) query = 'SELECT %s FROM swh_directory_walk_one(%%s)' % cols cur.execute(query, (directory,)) yield from cur def directory_walk(self, directory, cur=None): cur = self._cursor(cur) cols = ', '.join(self.directory_ls_cols) query = 'SELECT %s FROM swh_directory_walk(%%s)' % cols cur.execute(query, (directory,)) yield from cur def directory_entry_get_by_path(self, directory, paths, cur=None): """Retrieve a directory entry by path. """ cur = self._cursor(cur) cols = ', '.join(self.directory_ls_cols) query = ( 'SELECT %s FROM swh_find_directory_entry_by_path(%%s, %%s)' % cols) cur.execute(query, (directory, paths)) data = cur.fetchone() if set(data) == {None}: return None return data def revision_missing_from_list(self, revisions, cur=None): cur = self._cursor(cur) yield from execute_values_generator( cur, """ SELECT id FROM (VALUES %s) as t(id) WHERE NOT EXISTS ( SELECT 1 FROM revision r WHERE r.id = t.id ) """, ((id,) for id in revisions)) revision_add_cols = [ 'id', 'date', 'date_offset', 'date_neg_utc_offset', 'committer_date', 'committer_date_offset', 'committer_date_neg_utc_offset', 'type', 'directory', 'message', 'author_fullname', 'author_name', 'author_email', 'committer_fullname', 'committer_name', 'committer_email', 'metadata', 'synthetic', ] - revision_get_cols = revision_add_cols + [ - 'author_id', 'committer_id', 'parents'] + revision_get_cols = revision_add_cols + ['parents'] def origin_visit_add(self, origin, ts, type, cur=None): """Add a new origin_visit for origin origin at timestamp ts with status 'ongoing'. Args: origin: origin concerned by the visit ts: the date of the visit type: type of loader for the visit Returns: The new visit index step for that origin """ cur = self._cursor(cur) self._cursor(cur).execute('SELECT swh_origin_visit_add(%s, %s, %s)', (origin, ts, type)) return cur.fetchone()[0] def origin_visit_update(self, origin_id, visit_id, updates, cur=None): """Update origin_visit's status.""" cur = self._cursor(cur) update_cols = [] values = [] where = ['origin=%s AND visit=%s'] where_values = [origin_id, visit_id] from_ = '' if 'status' in updates: update_cols.append('status=%s') values.append(updates.pop('status')) if 'metadata' in updates: update_cols.append('metadata=%s') values.append(jsonize(updates.pop('metadata'))) if 'snapshot' in updates: update_cols.append('snapshot=%s') values.append(updates.pop('snapshot')) assert not updates, 'Unknown fields: %r' % updates query = """UPDATE origin_visit SET {update_cols} {from} WHERE {where}""".format(**{ 'update_cols': ', '.join(update_cols), 'from': from_, 'where': ' AND '.join(where) }) cur.execute(query, (*values, *where_values)) def origin_visit_upsert(self, origin, visit, date, type, status, metadata, snapshot, cur=None): cur = self._cursor(cur) query = """INSERT INTO origin_visit ({cols}) VALUES ({values}) ON CONFLICT ON CONSTRAINT origin_visit_pkey DO UPDATE SET {updates}""".format( cols=', '.join(self.origin_visit_get_cols), values=', '.join('%s' for col in self.origin_visit_get_cols), updates=', '.join('{0}=excluded.{0}'.format(col) for col in self.origin_visit_get_cols)) cur.execute( query, (origin, visit, date, type, status, metadata, snapshot)) origin_visit_get_cols = ['origin', 'visit', 'date', 'type', 'status', 'metadata', 'snapshot'] def origin_visit_get_all(self, origin_id, last_visit=None, limit=None, cur=None): """Retrieve all visits for origin with id origin_id. Args: origin_id: The occurrence's origin Yields: The occurrence's history visits """ cur = self._cursor(cur) if last_visit: extra_condition = 'and visit > %s' args = (origin_id, last_visit, limit) else: extra_condition = '' args = (origin_id, limit) query = """\ SELECT %s FROM origin_visit WHERE origin=%%s %s order by visit asc limit %%s""" % ( ', '.join(self.origin_visit_get_cols), extra_condition ) cur.execute(query, args) yield from cur def origin_visit_get(self, origin_id, visit_id, cur=None): """Retrieve information on visit visit_id of origin origin_id. Args: origin_id: the origin concerned visit_id: The visit step for that origin Returns: The origin_visit information """ cur = self._cursor(cur) query = """\ SELECT %s FROM origin_visit WHERE origin = %%s AND visit = %%s """ % (', '.join(self.origin_visit_get_cols)) cur.execute(query, (origin_id, visit_id)) r = cur.fetchall() if not r: return None return r[0] def origin_visit_find_by_date(self, origin, visit_date, cur=None): cur = self._cursor(cur) cur.execute( 'SELECT * FROM swh_visit_find_by_date(%s, %s)', (origin, visit_date)) r = cur.fetchall() if r: return r[0] def origin_visit_exists(self, origin_id, visit_id, cur=None): """Check whether an origin visit with the given ids exists""" cur = self._cursor(cur) query = "SELECT 1 FROM origin_visit where origin = %s AND visit = %s" cur.execute(query, (origin_id, visit_id)) return bool(cur.fetchone()) def origin_visit_get_latest( self, origin_id, allowed_statuses=None, require_snapshot=False, cur=None): """Retrieve the most recent origin_visit of the given origin, with optional filters. Args: origin_id: the origin concerned allowed_statuses: the visit statuses allowed for the returned visit require_snapshot (bool): If True, only a visit with a known snapshot will be returned. Returns: The origin_visit information, or None if no visit matches. """ cur = self._cursor(cur) query_parts = [ 'SELECT %s' % ', '.join(self.origin_visit_get_cols), 'FROM origin_visit'] query_parts.append('WHERE origin = %s') if require_snapshot: query_parts.append('AND snapshot is not null') if allowed_statuses: query_parts.append( cur.mogrify('AND status IN %s', (tuple(allowed_statuses),)).decode()) query_parts.append('ORDER BY date DESC, visit DESC LIMIT 1') query = '\n'.join(query_parts) cur.execute(query, (origin_id,)) r = cur.fetchone() if not r: return None return r @staticmethod def mangle_query_key(key, main_table): if key == 'id': return 't.id' if key == 'parents': return ''' ARRAY( SELECT rh.parent_id::bytea FROM revision_history rh WHERE rh.id = t.id ORDER BY rh.parent_rank )''' if '_' not in key: return '%s.%s' % (main_table, key) head, tail = key.split('_', 1) if (head in ('author', 'committer') and tail in ('name', 'email', 'id', 'fullname')): return '%s.%s' % (head, tail) return '%s.%s' % (main_table, key) def revision_get_from_list(self, revisions, cur=None): cur = self._cursor(cur) query_keys = ', '.join( self.mangle_query_key(k, 'revision') for k in self.revision_get_cols ) yield from execute_values_generator( cur, """ SELECT %s FROM (VALUES %%s) as t(id) LEFT JOIN revision ON t.id = revision.id LEFT JOIN person author ON revision.author = author.id LEFT JOIN person committer ON revision.committer = committer.id """ % query_keys, ((id,) for id in revisions)) def revision_log(self, root_revisions, limit=None, cur=None): cur = self._cursor(cur) query = """SELECT %s FROM swh_revision_log(%%s, %%s) """ % ', '.join(self.revision_get_cols) cur.execute(query, (root_revisions, limit)) yield from cur revision_shortlog_cols = ['id', 'parents'] def revision_shortlog(self, root_revisions, limit=None, cur=None): cur = self._cursor(cur) query = """SELECT %s FROM swh_revision_list(%%s, %%s) """ % ', '.join(self.revision_shortlog_cols) cur.execute(query, (root_revisions, limit)) yield from cur def release_missing_from_list(self, releases, cur=None): cur = self._cursor(cur) yield from execute_values_generator( cur, """ SELECT id FROM (VALUES %s) as t(id) WHERE NOT EXISTS ( SELECT 1 FROM release r WHERE r.id = t.id ) """, ((id,) for id in releases)) object_find_by_sha1_git_cols = ['sha1_git', 'type', 'id', 'object_id'] def object_find_by_sha1_git(self, ids, cur=None): cur = self._cursor(cur) yield from execute_values_generator( cur, """ WITH t (id) AS (VALUES %s), known_objects as (( select id as sha1_git, 'release'::object_type as type, id, object_id from release r where exists (select 1 from t where t.id = r.id) ) union all ( select id as sha1_git, 'revision'::object_type as type, id, object_id from revision r where exists (select 1 from t where t.id = r.id) ) union all ( select id as sha1_git, 'directory'::object_type as type, id, object_id from directory d where exists (select 1 from t where t.id = d.id) ) union all ( select sha1_git as sha1_git, 'content'::object_type as type, sha1 as id, object_id from content c where exists (select 1 from t where t.id = c.sha1_git) )) select t.id as sha1_git, k.type, k.id, k.object_id from t left join known_objects k on t.id = k.sha1_git """, ((id,) for id in ids) ) def stat_counters(self, cur=None): cur = self._cursor(cur) cur.execute('SELECT * FROM swh_stat_counters()') yield from cur fetch_history_cols = ['origin', 'date', 'status', 'result', 'stdout', 'stderr', 'duration'] def create_fetch_history(self, fetch_history, cur=None): """Create a fetch_history entry with the data in fetch_history""" cur = self._cursor(cur) query = '''INSERT INTO fetch_history (%s) VALUES (%s) RETURNING id''' % ( ','.join(self.fetch_history_cols), ','.join(['%s'] * len(self.fetch_history_cols)) ) cur.execute(query, [fetch_history.get(col) for col in self.fetch_history_cols]) return cur.fetchone()[0] def get_fetch_history(self, fetch_history_id, cur=None): """Get a fetch_history entry with the given id""" cur = self._cursor(cur) query = '''SELECT %s FROM fetch_history WHERE id=%%s''' % ( ', '.join(self.fetch_history_cols), ) cur.execute(query, (fetch_history_id,)) data = cur.fetchone() if not data: return None ret = {'id': fetch_history_id} for i, col in enumerate(self.fetch_history_cols): ret[col] = data[i] return ret def update_fetch_history(self, fetch_history, cur=None): """Update the fetch_history entry from the data in fetch_history""" cur = self._cursor(cur) query = '''UPDATE fetch_history SET %s WHERE id=%%s''' % ( ','.join('%s=%%s' % col for col in self.fetch_history_cols) ) cur.execute(query, [jsonize(fetch_history.get(col)) for col in self.fetch_history_cols + ['id']]) def origin_add(self, type, url, cur=None): """Insert a new origin and return the new identifier.""" insert = """INSERT INTO origin (type, url) values (%s, %s) RETURNING id""" cur.execute(insert, (type, url)) return cur.fetchone()[0] origin_cols = ['id', 'type', 'url'] def origin_get_by_url(self, origins, cur=None): """Retrieve origin `(id, type, url)` from urls if found.""" cur = self._cursor(cur) query = """SELECT %s FROM (VALUES %%s) as t(url) LEFT JOIN origin ON t.url = origin.url """ % ','.join('origin.' + col for col in self.origin_cols) yield from execute_values_generator( cur, query, ((url,) for url in origins)) def origin_get_by_id(self, ids, cur=None): """Retrieve origin `(id, type, url)` from ids if found. """ cur = self._cursor(cur) query = """SELECT %s FROM (VALUES %%s) as t(id) LEFT JOIN origin ON t.id = origin.id """ % ','.join('origin.' + col for col in self.origin_cols) yield from execute_values_generator( cur, query, ((id,) for id in ids)) def origin_get_range(self, origin_from=1, origin_count=100, cur=None): """Retrieve ``origin_count`` origins whose ids are greater or equal than ``origin_from``. Origins are sorted by id before retrieving them. Args: origin_from (int): the minimum id of origins to retrieve origin_count (int): the maximum number of origins to retrieve """ cur = self._cursor(cur) query = """SELECT %s FROM origin WHERE id >= %%s ORDER BY id LIMIT %%s """ % ','.join(self.origin_cols) cur.execute(query, (origin_from, origin_count)) yield from cur def _origin_query(self, url_pattern, count=False, offset=0, limit=50, regexp=False, with_visit=False, cur=None): """ Method factorizing query creation for searching and counting origins. """ cur = self._cursor(cur) if count: origin_cols = 'COUNT(*)' else: origin_cols = ','.join(self.origin_cols) query = """SELECT %s FROM origin WHERE """ if with_visit: query += """ EXISTS (SELECT 1 from origin_visit WHERE origin=origin.id) AND """ query += 'url %s %%s ' if not count: query += 'ORDER BY id OFFSET %%s LIMIT %%s' if not regexp: query = query % (origin_cols, 'ILIKE') query_params = ('%'+url_pattern+'%', offset, limit) else: query = query % (origin_cols, '~*') query_params = (url_pattern, offset, limit) if count: query_params = (query_params[0],) cur.execute(query, query_params) def origin_search(self, url_pattern, offset=0, limit=50, regexp=False, with_visit=False, cur=None): """Search for origins whose urls contain a provided string pattern or match a provided regular expression. The search is performed in a case insensitive way. Args: url_pattern (str): the string pattern to search for in origin urls offset (int): number of found origins to skip before returning results limit (int): the maximum number of found origins to return regexp (bool): if True, consider the provided pattern as a regular expression and returns origins whose urls match it with_visit (bool): if True, filter out origins with no visit """ self._origin_query(url_pattern, offset=offset, limit=limit, regexp=regexp, with_visit=with_visit, cur=cur) yield from cur def origin_count(self, url_pattern, regexp=False, with_visit=False, cur=None): """Count origins whose urls contain a provided string pattern or match a provided regular expression. The pattern search in origin urls is performed in a case insensitive way. Args: url_pattern (str): the string pattern to search for in origin urls regexp (bool): if True, consider the provided pattern as a regular expression and returns origins whose urls match it with_visit (bool): if True, filter out origins with no visit """ self._origin_query(url_pattern, count=True, regexp=regexp, with_visit=with_visit, cur=cur) return cur.fetchone()[0] release_add_cols = [ 'id', 'target', 'target_type', 'date', 'date_offset', 'date_neg_utc_offset', 'name', 'comment', 'synthetic', 'author_fullname', 'author_name', 'author_email', ] - release_get_cols = release_add_cols + ['author_id'] + release_get_cols = release_add_cols def release_get_from_list(self, releases, cur=None): cur = self._cursor(cur) query_keys = ', '.join( self.mangle_query_key(k, 'release') for k in self.release_get_cols ) yield from execute_values_generator( cur, """ SELECT %s FROM (VALUES %%s) as t(id) LEFT JOIN release ON t.id = release.id LEFT JOIN person author ON release.author = author.id """ % query_keys, ((id,) for id in releases)) def origin_metadata_add(self, origin, ts, provider, tool, metadata, cur=None): """ Add an origin_metadata for the origin at ts with provider, tool and metadata. Args: origin (int): the origin's id for which the metadata is added ts (datetime): time when the metadata was found provider (int): the metadata provider identifier tool (int): the tool's identifier used to extract metadata metadata (jsonb): the metadata retrieved at the time and location Returns: id (int): the origin_metadata unique id """ cur = self._cursor(cur) insert = """INSERT INTO origin_metadata (origin_id, discovery_date, provider_id, tool_id, metadata) values (%s, %s, %s, %s, %s) RETURNING id""" cur.execute(insert, (origin, ts, provider, tool, jsonize(metadata))) return cur.fetchone()[0] origin_metadata_get_cols = ['origin_id', 'discovery_date', 'tool_id', 'metadata', 'provider_id', 'provider_name', 'provider_type', 'provider_url'] def origin_metadata_get_by(self, origin_id, provider_type=None, cur=None): """Retrieve all origin_metadata entries for one origin_id """ cur = self._cursor(cur) if not provider_type: query = '''SELECT %s FROM swh_origin_metadata_get_by_origin( %%s)''' % (','.join( self.origin_metadata_get_cols)) cur.execute(query, (origin_id, )) else: query = '''SELECT %s FROM swh_origin_metadata_get_by_provider_type( %%s, %%s)''' % (','.join( self.origin_metadata_get_cols)) cur.execute(query, (origin_id, provider_type)) yield from cur tool_cols = ['id', 'name', 'version', 'configuration'] @stored_procedure('swh_mktemp_tool') def mktemp_tool(self, cur=None): pass def tool_add_from_temp(self, cur=None): cur = self._cursor(cur) cur.execute("SELECT %s from swh_tool_add()" % ( ','.join(self.tool_cols), )) yield from cur def tool_get(self, name, version, configuration, cur=None): cur = self._cursor(cur) cur.execute('''select %s from tool where name=%%s and version=%%s and configuration=%%s''' % ( ','.join(self.tool_cols)), (name, version, configuration)) return cur.fetchone() metadata_provider_cols = ['id', 'provider_name', 'provider_type', 'provider_url', 'metadata'] def metadata_provider_add(self, provider_name, provider_type, provider_url, metadata, cur=None): """Insert a new provider and return the new identifier.""" cur = self._cursor(cur) insert = """INSERT INTO metadata_provider (provider_name, provider_type, provider_url, metadata) values (%s, %s, %s, %s) RETURNING id""" cur.execute(insert, (provider_name, provider_type, provider_url, jsonize(metadata))) return cur.fetchone()[0] def metadata_provider_get(self, provider_id, cur=None): cur = self._cursor(cur) cur.execute('''select %s from metadata_provider where id=%%s ''' % ( ','.join(self.metadata_provider_cols)), (provider_id, )) return cur.fetchone() def metadata_provider_get_by(self, provider_name, provider_url, cur=None): cur = self._cursor(cur) cur.execute('''select %s from metadata_provider where provider_name=%%s and provider_url=%%s''' % ( ','.join(self.metadata_provider_cols)), (provider_name, provider_url)) return cur.fetchone() diff --git a/swh/storage/in_memory.py b/swh/storage/in_memory.py index 25a291b7..d642bc5a 100644 --- a/swh/storage/in_memory.py +++ b/swh/storage/in_memory.py @@ -1,1711 +1,1710 @@ # Copyright (C) 2015-2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import os import re import bisect import dateutil import collections from collections import defaultdict import copy import datetime import itertools import random import warnings from swh.model.hashutil import DEFAULT_ALGORITHMS from swh.model.identifiers import normalize_timestamp from swh.objstorage import get_objstorage from swh.objstorage.exc import ObjNotFoundError from .journal_writer import get_journal_writer # Max block size of contents to return BULK_BLOCK_CONTENT_LEN_MAX = 10000 def now(): return datetime.datetime.now(tz=datetime.timezone.utc) ENABLE_ORIGIN_IDS = \ os.environ.get('SWH_STORAGE_IN_MEMORY_ENABLE_ORIGIN_IDS', 'true') == 'true' class Storage: def __init__(self, journal_writer=None): self._contents = {} self._content_indexes = defaultdict(lambda: defaultdict(set)) self._skipped_contents = {} self._skipped_content_indexes = defaultdict(lambda: defaultdict(set)) self.reset() if journal_writer: self.journal_writer = get_journal_writer(**journal_writer) else: self.journal_writer = None def reset(self): self._directories = {} self._revisions = {} self._releases = {} self._snapshots = {} self._origins = {} self._origins_by_id = [] self._origin_visits = {} self._persons = [] self._origin_metadata = defaultdict(list) self._tools = {} self._metadata_providers = {} self._objects = defaultdict(list) # ideally we would want a skip list for both fast inserts and searches self._sorted_sha1s = [] self.objstorage = get_objstorage('memory', {}) def check_config(self, *, check_write): """Check that the storage is configured and ready to go.""" return True def _content_add(self, contents, with_data): if self.journal_writer: for content in contents: if 'data' in content: content = content.copy() del content['data'] self.journal_writer.write_addition('content', content) content_with_data = [] content_without_data = [] for content in contents: if 'status' not in content: content['status'] = 'visible' if 'length' not in content: content['length'] = -1 if content['status'] == 'visible': content_with_data.append(content) elif content['status'] == 'absent': content_without_data.append(content) count_content_added, count_content_bytes_added = \ self._content_add_present(content_with_data, with_data) count_skipped_content_added = self._content_add_absent( content_without_data ) summary = { 'content:add': count_content_added, 'skipped_content:add': count_skipped_content_added, } if with_data: summary['content:add:bytes'] = count_content_bytes_added return summary def _content_add_present(self, contents, with_data): count_content_added = 0 count_content_bytes_added = 0 for content in contents: key = self._content_key(content) if key in self._contents: continue for algorithm in DEFAULT_ALGORITHMS: if content[algorithm] in self._content_indexes[algorithm]\ and (algorithm not in {'blake2s256', 'sha256'}): from . import HashCollision raise HashCollision(algorithm, content[algorithm], key) for algorithm in DEFAULT_ALGORITHMS: self._content_indexes[algorithm][content[algorithm]].add(key) self._objects[content['sha1_git']].append( ('content', content['sha1'])) self._contents[key] = copy.deepcopy(content) bisect.insort(self._sorted_sha1s, content['sha1']) count_content_added += 1 if with_data: content_data = self._contents[key].pop('data') count_content_bytes_added += len(content_data) self.objstorage.add(content_data, content['sha1']) return (count_content_added, count_content_bytes_added) def _content_add_absent(self, contents): count = 0 skipped_content_missing = self.skipped_content_missing(contents) for content in skipped_content_missing: key = self._content_key(content) for algo in DEFAULT_ALGORITHMS: self._skipped_content_indexes[algo][content[algo]].add(key) self._skipped_contents[key] = copy.deepcopy(content) count += 1 return count def content_add(self, content): """Add content blobs to the storage Args: content (iterable): iterable of dictionaries representing individual pieces of content to add. Each dictionary has the following keys: - data (bytes): the actual content - length (int): content length (default: -1) - one key for each checksum algorithm in :data:`swh.model.hashutil.DEFAULT_ALGORITHMS`, mapped to the corresponding checksum - status (str): one of visible, hidden, absent - reason (str): if status = absent, the reason why - origin (int): if status = absent, the origin we saw the content in Raises: HashCollision in case of collision Returns: Summary dict with the following key and associated values: content:add: New contents added content_bytes:add: Sum of the contents' length data skipped_content:add: New skipped contents (no data) added """ content = [dict(c.items()) for c in content] # semi-shallow copy now = datetime.datetime.now(tz=datetime.timezone.utc) for item in content: item['ctime'] = now return self._content_add(content, with_data=True) def content_add_metadata(self, content): """Add content metadata to the storage (like `content_add`, but without inserting to the objstorage). Args: content (iterable): iterable of dictionaries representing individual pieces of content to add. Each dictionary has the following keys: - length (int): content length (default: -1) - one key for each checksum algorithm in :data:`swh.model.hashutil.DEFAULT_ALGORITHMS`, mapped to the corresponding checksum - status (str): one of visible, hidden, absent - reason (str): if status = absent, the reason why - origin (int): if status = absent, the origin we saw the content in - ctime (datetime): time of insertion in the archive Raises: HashCollision in case of collision Returns: Summary dict with the following key and associated values: content:add: New contents added skipped_content:add: New skipped contents (no data) added """ return self._content_add(content, with_data=False) def content_get(self, content): """Retrieve in bulk contents and their data. This function may yield more blobs than provided sha1 identifiers, in case they collide. Args: content: iterables of sha1 Yields: Dict[str, bytes]: Generates streams of contents as dict with their raw data: - sha1 (bytes): content id - data (bytes): content's raw data Raises: ValueError in case of too much contents are required. cf. BULK_BLOCK_CONTENT_LEN_MAX """ # FIXME: Make this method support slicing the `data`. if len(content) > BULK_BLOCK_CONTENT_LEN_MAX: raise ValueError( "Sending at most %s contents." % BULK_BLOCK_CONTENT_LEN_MAX) for obj_id in content: try: data = self.objstorage.get(obj_id) except ObjNotFoundError: yield None continue yield {'sha1': obj_id, 'data': data} def content_get_range(self, start, end, limit=1000, db=None, cur=None): """Retrieve contents within range [start, end] bound by limit. Note that this function may return more than one blob per hash. The limit is enforced with multiplicity (ie. two blobs with the same hash will count twice toward the limit). Args: **start** (bytes): Starting identifier range (expected smaller than end) **end** (bytes): Ending identifier range (expected larger than start) **limit** (int): Limit result (default to 1000) Returns: a dict with keys: - contents [dict]: iterable of contents in between the range. - next (bytes): There remains content in the range starting from this next sha1 """ if limit is None: raise ValueError('Development error: limit should not be None') from_index = bisect.bisect_left(self._sorted_sha1s, start) sha1s = itertools.islice(self._sorted_sha1s, from_index, None) sha1s = ((sha1, content_key) for sha1 in sha1s for content_key in self._content_indexes['sha1'][sha1]) matched = [] next_content = None for sha1, key in sha1s: if sha1 > end: break if len(matched) >= limit: next_content = sha1 break matched.append({ **self._contents[key], }) return { 'contents': matched, 'next': next_content, } def content_get_metadata(self, content): """Retrieve content metadata in bulk Args: content: iterable of content identifiers (sha1) Returns: an iterable with content metadata corresponding to the given ids """ # FIXME: the return value should be a mapping from search key to found # content*s* for sha1 in content: if sha1 in self._content_indexes['sha1']: objs = self._content_indexes['sha1'][sha1] # FIXME: rather than selecting one of the objects with that # hash, we should return all of them. See: # https://forge.softwareheritage.org/D645?id=1994#inline-3389 key = random.sample(objs, 1)[0] data = copy.deepcopy(self._contents[key]) data.pop('ctime') yield data else: # FIXME: should really be None yield { 'sha1': sha1, 'sha1_git': None, 'sha256': None, 'blake2s256': None, 'length': None, 'status': None, } def content_find(self, content): if not set(content).intersection(DEFAULT_ALGORITHMS): raise ValueError('content keys must contain at least one of: ' '%s' % ', '.join(sorted(DEFAULT_ALGORITHMS))) found = [] for algo in DEFAULT_ALGORITHMS: hash = content.get(algo) if hash and hash in self._content_indexes[algo]: found.append(self._content_indexes[algo][hash]) if not found: return [] keys = list(set.intersection(*found)) return copy.deepcopy([self._contents[key] for key in keys]) def content_missing(self, content, key_hash='sha1'): """List content missing from storage Args: contents ([dict]): iterable of dictionaries whose keys are either 'length' or an item of :data:`swh.model.hashutil.ALGORITHMS`; mapped to the corresponding checksum (or length). key_hash (str): name of the column to use as hash id result (default: 'sha1') Returns: iterable ([bytes]): missing content ids (as per the key_hash column) """ for cont in content: for (algo, hash_) in cont.items(): if algo not in DEFAULT_ALGORITHMS: continue if hash_ not in self._content_indexes.get(algo, []): yield cont[key_hash] break else: for result in self.content_find(cont): if result['status'] == 'missing': yield cont[key_hash] def content_missing_per_sha1(self, contents): """List content missing from storage based only on sha1. Args: contents: Iterable of sha1 to check for absence. Returns: iterable: missing ids Raises: TODO: an exception when we get a hash collision. """ for content in contents: if content not in self._content_indexes['sha1']: yield content def skipped_content_missing(self, contents): """List all skipped_content missing from storage Args: contents: Iterable of sha1 to check for skipped content entry Returns: iterable: dict of skipped content entry """ for content in contents: for (key, algorithm) in self._content_key_algorithm(content): if algorithm == 'blake2s256': continue if key not in self._skipped_content_indexes[algorithm]: # index must contain hashes of algos except blake2s256 # else the content is considered skipped yield content break def directory_add(self, directories): """Add directories to the storage Args: directories (iterable): iterable of dictionaries representing the individual directories to add. Each dict has the following keys: - id (sha1_git): the id of the directory to add - entries (list): list of dicts for each entry in the directory. Each dict has the following keys: - name (bytes) - type (one of 'file', 'dir', 'rev'): type of the directory entry (file, directory, revision) - target (sha1_git): id of the object pointed at by the directory entry - perms (int): entry permissions Returns: Summary dict of keys with associated count as values: directory:add: Number of directories actually added """ if self.journal_writer: self.journal_writer.write_additions('directory', directories) count = 0 for directory in directories: if directory['id'] not in self._directories: count += 1 self._directories[directory['id']] = copy.deepcopy(directory) self._objects[directory['id']].append( ('directory', directory['id'])) return {'directory:add': count} def directory_missing(self, directories): """List directories missing from storage Args: directories (iterable): an iterable of directory ids Yields: missing directory ids """ for id in directories: if id not in self._directories: yield id def _join_dentry_to_content(self, dentry): keys = ( 'status', 'sha1', 'sha1_git', 'sha256', 'length', ) ret = dict.fromkeys(keys) ret.update(dentry) if ret['type'] == 'file': # TODO: Make it able to handle more than one content content = self.content_find({'sha1_git': ret['target']}) if content: content = content[0] for key in keys: ret[key] = content[key] return ret def _directory_ls(self, directory_id, recursive, prefix=b''): if directory_id in self._directories: for entry in self._directories[directory_id]['entries']: ret = self._join_dentry_to_content(entry) ret['name'] = prefix + ret['name'] ret['dir_id'] = directory_id yield ret if recursive and ret['type'] == 'dir': yield from self._directory_ls( ret['target'], True, prefix + ret['name'] + b'/') def directory_ls(self, directory, recursive=False): """Get entries for one directory. Args: - directory: the directory to list entries from. - recursive: if flag on, this list recursively from this directory. Returns: List of entries for such directory. If `recursive=True`, names in the path of a dir/file not at the root are concatenated with a slash (`/`). """ yield from self._directory_ls(directory, recursive) def directory_entry_get_by_path(self, directory, paths): """Get the directory entry (either file or dir) from directory with path. Args: - directory: sha1 of the top level directory - paths: path to lookup from the top level directory. From left (top) to right (bottom). Returns: The corresponding directory entry if found, None otherwise. """ return self._directory_entry_get_by_path(directory, paths, b'') def _directory_entry_get_by_path(self, directory, paths, prefix): if not paths: return contents = list(self.directory_ls(directory)) if not contents: return def _get_entry(entries, name): for entry in entries: if entry['name'] == name: entry = entry.copy() entry['name'] = prefix + entry['name'] return entry first_item = _get_entry(contents, paths[0]) if len(paths) == 1: return first_item if not first_item or first_item['type'] != 'dir': return return self._directory_entry_get_by_path( first_item['target'], paths[1:], prefix + paths[0] + b'/') def revision_add(self, revisions): """Add revisions to the storage Args: revisions (Iterable[dict]): iterable of dictionaries representing the individual revisions to add. Each dict has the following keys: - **id** (:class:`sha1_git`): id of the revision to add - **date** (:class:`dict`): date the revision was written - **committer_date** (:class:`dict`): date the revision got added to the origin - **type** (one of 'git', 'tar'): type of the revision added - **directory** (:class:`sha1_git`): the directory the revision points at - **message** (:class:`bytes`): the message associated with the revision - **author** (:class:`Dict[str, bytes]`): dictionary with keys: name, fullname, email - **committer** (:class:`Dict[str, bytes]`): dictionary with keys: name, fullname, email - **metadata** (:class:`jsonb`): extra information as dictionary - **synthetic** (:class:`bool`): revision's nature (tarball, directory creates synthetic revision`) - **parents** (:class:`list[sha1_git]`): the parents of this revision date dictionaries have the form defined in :mod:`swh.model`. Returns: Summary dict of keys with associated count as values revision_added: New objects actually stored in db """ if self.journal_writer: self.journal_writer.write_additions('revision', revisions) count = 0 for revision in revisions: if revision['id'] not in self._revisions: self._revisions[revision['id']] = rev = copy.deepcopy(revision) self._person_add(rev['committer']) self._person_add(rev['author']) rev['date'] = normalize_timestamp(rev.get('date')) rev['committer_date'] = normalize_timestamp( rev.get('committer_date')) self._objects[revision['id']].append( ('revision', revision['id'])) count += 1 return {'revision:add': count} def revision_missing(self, revisions): """List revisions missing from storage Args: revisions (iterable): revision ids Yields: missing revision ids """ for id in revisions: if id not in self._revisions: yield id def revision_get(self, revisions): for id in revisions: yield copy.deepcopy(self._revisions.get(id)) def _get_parent_revs(self, rev_id, seen, limit): if limit and len(seen) >= limit: return if rev_id in seen or rev_id not in self._revisions: return seen.add(rev_id) yield self._revisions[rev_id] for parent in self._revisions[rev_id]['parents']: yield from self._get_parent_revs(parent, seen, limit) def revision_log(self, revisions, limit=None): """Fetch revision entry from the given root revisions. Args: revisions: array of root revision to lookup limit: limitation on the output result. Default to None. Yields: List of revision log from such revisions root. """ seen = set() for rev_id in revisions: yield from self._get_parent_revs(rev_id, seen, limit) def revision_shortlog(self, revisions, limit=None): """Fetch the shortlog for the given revisions Args: revisions: list of root revisions to lookup limit: depth limitation for the output Yields: a list of (id, parents) tuples. """ yield from ((rev['id'], rev['parents']) for rev in self.revision_log(revisions, limit)) def release_add(self, releases): """Add releases to the storage Args: releases (Iterable[dict]): iterable of dictionaries representing the individual releases to add. Each dict has the following keys: - **id** (:class:`sha1_git`): id of the release to add - **revision** (:class:`sha1_git`): id of the revision the release points to - **date** (:class:`dict`): the date the release was made - **name** (:class:`bytes`): the name of the release - **comment** (:class:`bytes`): the comment associated with the release - **author** (:class:`Dict[str, bytes]`): dictionary with keys: name, fullname, email the date dictionary has the form defined in :mod:`swh.model`. Returns: Summary dict of keys with associated count as values release:add: New objects contents actually stored in db """ if self.journal_writer: self.journal_writer.write_additions('release', releases) count = 0 for rel in releases: if rel['id'] not in self._releases: rel = copy.deepcopy(rel) rel['date'] = normalize_timestamp(rel['date']) if rel['author']: self._person_add(rel['author']) self._objects[rel['id']].append( ('release', rel['id'])) self._releases[rel['id']] = rel count += 1 return {'release:add': count} def release_missing(self, releases): """List releases missing from storage Args: releases: an iterable of release ids Returns: a list of missing release ids """ yield from (rel for rel in releases if rel not in self._releases) def release_get(self, releases): """Given a list of sha1, return the releases's information Args: releases: list of sha1s Yields: dicts with the same keys as those given to `release_add` (or ``None`` if a release does not exist) """ for rel_id in releases: yield copy.deepcopy(self._releases.get(rel_id)) def snapshot_add(self, snapshots): """Add a snapshot to the storage Args: snapshot ([dict]): the snapshots to add, containing the following keys: - **id** (:class:`bytes`): id of the snapshot - **branches** (:class:`dict`): branches the snapshot contains, mapping the branch name (:class:`bytes`) to the branch target, itself a :class:`dict` (or ``None`` if the branch points to an unknown object) - **target_type** (:class:`str`): one of ``content``, ``directory``, ``revision``, ``release``, ``snapshot``, ``alias`` - **target** (:class:`bytes`): identifier of the target (currently a ``sha1_git`` for all object kinds, or the name of the target branch for aliases) Raises: ValueError: if the origin's or visit's identifier does not exist. Returns: Summary dict of keys with associated count as values snapshot_added: Count of object actually stored in db """ count = 0 for snapshot in snapshots: snapshot_id = snapshot['id'] if snapshot_id not in self._snapshots: if self.journal_writer: self.journal_writer.write_addition('snapshot', snapshot) self._snapshots[snapshot_id] = { 'id': snapshot_id, 'branches': copy.deepcopy(snapshot['branches']), '_sorted_branch_names': sorted(snapshot['branches']) } self._objects[snapshot_id].append(('snapshot', snapshot_id)) count += 1 return {'snapshot:add': count} def snapshot_get(self, snapshot_id): """Get the content, possibly partial, of a snapshot with the given id The branches of the snapshot are iterated in the lexicographical order of their names. .. warning:: At most 1000 branches contained in the snapshot will be returned for performance reasons. In order to browse the whole set of branches, the method :meth:`snapshot_get_branches` should be used instead. Args: snapshot_id (bytes): identifier of the snapshot Returns: dict: a dict with three keys: * **id**: identifier of the snapshot * **branches**: a dict of branches contained in the snapshot whose keys are the branches' names. * **next_branch**: the name of the first branch not returned or :const:`None` if the snapshot has less than 1000 branches. """ return self.snapshot_get_branches(snapshot_id) def snapshot_get_by_origin_visit(self, origin, visit): """Get the content, possibly partial, of a snapshot for the given origin visit The branches of the snapshot are iterated in the lexicographical order of their names. .. warning:: At most 1000 branches contained in the snapshot will be returned for performance reasons. In order to browse the whole set of branches, the method :meth:`snapshot_get_branches` should be used instead. Args: origin (int): the origin's identifier visit (int): the visit's identifier Returns: dict: None if the snapshot does not exist; a dict with three keys otherwise: * **id**: identifier of the snapshot * **branches**: a dict of branches contained in the snapshot whose keys are the branches' names. * **next_branch**: the name of the first branch not returned or :const:`None` if the snapshot has less than 1000 branches. """ origin_url = self._get_origin_url(origin) if not origin_url: return if origin_url not in self._origins or \ visit > len(self._origin_visits[origin_url]): return None snapshot_id = self._origin_visits[origin_url][visit-1]['snapshot'] if snapshot_id: return self.snapshot_get(snapshot_id) else: return None def snapshot_get_latest(self, origin, allowed_statuses=None): """Get the content, possibly partial, of the latest snapshot for the given origin, optionally only from visits that have one of the given allowed_statuses The branches of the snapshot are iterated in the lexicographical order of their names. .. warning:: At most 1000 branches contained in the snapshot will be returned for performance reasons. In order to browse the whole set of branches, the methods :meth:`origin_visit_get_latest` and :meth:`snapshot_get_branches` should be used instead. Args: origin (Union[str,int]): the origin's URL or identifier allowed_statuses (list of str): list of visit statuses considered to find the latest snapshot for the origin. For instance, ``allowed_statuses=['full']`` will only consider visits that have successfully run to completion. Returns: dict: a dict with three keys: * **id**: identifier of the snapshot * **branches**: a dict of branches contained in the snapshot whose keys are the branches' names. * **next_branch**: the name of the first branch not returned or :const:`None` if the snapshot has less than 1000 branches. """ origin_url = self._get_origin_url(origin) if not origin_url: return visit = self.origin_visit_get_latest( origin_url, allowed_statuses=allowed_statuses, require_snapshot=True) if visit and visit['snapshot']: snapshot = self.snapshot_get(visit['snapshot']) if not snapshot: raise ValueError( 'last origin visit references an unknown snapshot') return snapshot def snapshot_count_branches(self, snapshot_id, db=None, cur=None): """Count the number of branches in the snapshot with the given id Args: snapshot_id (bytes): identifier of the snapshot Returns: dict: A dict whose keys are the target types of branches and values their corresponding amount """ branches = list(self._snapshots[snapshot_id]['branches'].values()) return collections.Counter(branch['target_type'] if branch else None for branch in branches) def snapshot_get_branches(self, snapshot_id, branches_from=b'', branches_count=1000, target_types=None): """Get the content, possibly partial, of a snapshot with the given id The branches of the snapshot are iterated in the lexicographical order of their names. Args: snapshot_id (bytes): identifier of the snapshot branches_from (bytes): optional parameter used to skip branches whose name is lesser than it before returning them branches_count (int): optional parameter used to restrain the amount of returned branches target_types (list): optional parameter used to filter the target types of branch to return (possible values that can be contained in that list are `'content', 'directory', 'revision', 'release', 'snapshot', 'alias'`) Returns: dict: None if the snapshot does not exist; a dict with three keys otherwise: * **id**: identifier of the snapshot * **branches**: a dict of branches contained in the snapshot whose keys are the branches' names. * **next_branch**: the name of the first branch not returned or :const:`None` if the snapshot has less than `branches_count` branches after `branches_from` included. """ snapshot = self._snapshots.get(snapshot_id) if snapshot is None: return None sorted_branch_names = snapshot['_sorted_branch_names'] from_index = bisect.bisect_left( sorted_branch_names, branches_from) if target_types: next_branch = None branches = {} for branch_name in sorted_branch_names[from_index:]: branch = snapshot['branches'][branch_name] if branch and branch['target_type'] in target_types: if len(branches) < branches_count: branches[branch_name] = branch else: next_branch = branch_name break else: # As there is no 'target_types', we can do that much faster to_index = from_index + branches_count returned_branch_names = sorted_branch_names[from_index:to_index] branches = {branch_name: snapshot['branches'][branch_name] for branch_name in returned_branch_names} if to_index >= len(sorted_branch_names): next_branch = None else: next_branch = sorted_branch_names[to_index] return { 'id': snapshot_id, 'branches': branches, 'next_branch': next_branch, } def object_find_by_sha1_git(self, ids, db=None, cur=None): """Return the objects found with the given ids. Args: ids: a generator of sha1_gits Returns: dict: a mapping from id to the list of objects found. Each object found is itself a dict with keys: - sha1_git: the input id - type: the type of object found - id: the id of the object found - object_id: the numeric id of the object found. """ ret = {} for id_ in ids: objs = self._objects.get(id_, []) ret[id_] = [{ 'sha1_git': id_, 'type': obj[0], 'id': obj[1], 'object_id': id_, } for obj in objs] return ret def origin_get(self, origins): """Return origins, either all identified by their ids or all identified by tuples (type, url). If the url is given and the type is omitted, one of the origins with that url is returned. Args: origin: a list of dictionaries representing the individual origins to find. These dicts have either the key url (and optionally type): - type (FIXME: enum TBD): the origin type ('git', 'wget', ...) - url (bytes): the url the origin points to or the id: - id (int): the origin's identifier Returns: dict: the origin dictionary with the keys: - id: origin's id - type: origin's type - url: origin's url Raises: ValueError: if the keys does not match (url and type) nor id. """ if isinstance(origins, dict): # Old API return_single = True origins = [origins] else: return_single = False # Sanity check to be error-compatible with the pgsql backend if any('id' in origin for origin in origins) \ and not all('id' in origin for origin in origins): raise ValueError( 'Either all origins or none at all should have an "id".') if any('url' in origin for origin in origins) \ and not all('url' in origin for origin in origins): raise ValueError( 'Either all origins or none at all should have ' 'an "url" key.') results = [] for origin in origins: result = None if 'id' in origin: assert ENABLE_ORIGIN_IDS, 'origin ids are disabled' if origin['id'] <= len(self._origins_by_id): result = self._origins[self._origins_by_id[origin['id']-1]] elif 'url' in origin: if origin['url'] in self._origins: result = copy.deepcopy(self._origins[origin['url']]) else: raise ValueError( 'Origin must have either id or url.') results.append(result) if return_single: assert len(results) == 1 return results[0] else: return results def origin_get_range(self, origin_from=1, origin_count=100): """Retrieve ``origin_count`` origins whose ids are greater or equal than ``origin_from``. Origins are sorted by id before retrieving them. Args: origin_from (int): the minimum id of origins to retrieve origin_count (int): the maximum number of origins to retrieve Yields: dicts containing origin information as returned by :meth:`swh.storage.in_memory.Storage.origin_get`. """ origin_from = max(origin_from, 1) if origin_from <= len(self._origins_by_id): max_idx = origin_from + origin_count - 1 if max_idx > len(self._origins_by_id): max_idx = len(self._origins_by_id) for idx in range(origin_from-1, max_idx): yield copy.deepcopy(self._origins[self._origins_by_id[idx]]) def origin_search(self, url_pattern, offset=0, limit=50, regexp=False, with_visit=False, db=None, cur=None): """Search for origins whose urls contain a provided string pattern or match a provided regular expression. The search is performed in a case insensitive way. Args: url_pattern (str): the string pattern to search for in origin urls offset (int): number of found origins to skip before returning results limit (int): the maximum number of found origins to return regexp (bool): if True, consider the provided pattern as a regular expression and return origins whose urls match it with_visit (bool): if True, filter out origins with no visit Returns: An iterable of dict containing origin information as returned by :meth:`swh.storage.storage.Storage.origin_get`. """ origins = self._origins.values() if regexp: pat = re.compile(url_pattern) origins = [orig for orig in origins if pat.search(orig['url'])] else: origins = [orig for orig in origins if url_pattern in orig['url']] if with_visit: origins = [orig for orig in origins if len(self._origin_visits[orig['url']]) > 0] if ENABLE_ORIGIN_IDS: origins.sort(key=lambda origin: origin['id']) origins = copy.deepcopy(origins[offset:offset+limit]) return origins def origin_count(self, url_pattern, regexp=False, with_visit=False, db=None, cur=None): """Count origins whose urls contain a provided string pattern or match a provided regular expression. The pattern search in origin urls is performed in a case insensitive way. Args: url_pattern (str): the string pattern to search for in origin urls regexp (bool): if True, consider the provided pattern as a regular expression and return origins whose urls match it with_visit (bool): if True, filter out origins with no visit Returns: int: The number of origins matching the search criterion. """ return len(self.origin_search(url_pattern, regexp=regexp, with_visit=with_visit, limit=len(self._origins))) def origin_add(self, origins): """Add origins to the storage Args: origins: list of dictionaries representing the individual origins, with the following keys: - type: the origin type ('git', 'svn', 'deb', ...) - url (bytes): the url the origin points to Returns: list: given origins as dict updated with their id """ origins = copy.deepcopy(origins) for origin in origins: if ENABLE_ORIGIN_IDS: origin['id'] = self.origin_add_one(origin) else: self.origin_add_one(origin) return origins def origin_add_one(self, origin): """Add origin to the storage Args: origin: dictionary representing the individual origin to add. This dict has the following keys: - type (FIXME: enum TBD): the origin type ('git', 'wget', ...) - url (bytes): the url the origin points to Returns: the id of the added origin, or of the identical one that already exists. """ origin = copy.deepcopy(origin) assert 'id' not in origin if origin['url'] in self._origins: if ENABLE_ORIGIN_IDS: origin_id = self._origins[origin['url']]['id'] else: if self.journal_writer: self.journal_writer.write_addition('origin', origin) if ENABLE_ORIGIN_IDS: # origin ids are in the range [1, +inf[ origin_id = len(self._origins) + 1 origin['id'] = origin_id self._origins_by_id.append(origin['url']) assert len(self._origins_by_id) == origin_id self._origins[origin['url']] = origin self._origin_visits[origin['url']] = [] self._objects[origin['url']].append(('origin', origin['url'])) if ENABLE_ORIGIN_IDS: return origin_id else: return origin['url'] def fetch_history_start(self, origin_id): """Add an entry for origin origin_id in fetch_history. Returns the id of the added fetch_history entry """ assert not ENABLE_ORIGIN_IDS, 'origin ids are disabled' pass def fetch_history_end(self, fetch_history_id, data): """Close the fetch_history entry with id `fetch_history_id`, replacing its data with `data`. """ pass def fetch_history_get(self, fetch_history_id): """Get the fetch_history entry with id `fetch_history_id`. """ raise NotImplementedError('fetch_history_get is deprecated, use ' 'origin_visit_get instead.') def origin_visit_add(self, origin, date=None, type=None, *, ts=None): """Add an origin_visit for the origin at date with status 'ongoing'. For backward compatibility, `type` is optional and defaults to the origin's type. Args: origin (Union[int,str]): visited origin's identifier or URL date: timestamp of such visit type (str): the type of loader used for the visit (hg, git, ...) Returns: dict: dictionary with keys origin and visit where: - origin: origin's identifier - visit: the visit's identifier for the new visit occurrence """ if ts is None: if date is None: raise TypeError('origin_visit_add expected 2 arguments.') else: assert date is None warnings.warn("argument 'ts' of origin_visit_add was renamed " "to 'date' in v0.0.109.", DeprecationWarning) date = ts origin_url = self._get_origin_url(origin) if origin_url is None: raise ValueError('Unknown origin.') if isinstance(date, str): date = dateutil.parser.parse(date) visit_ret = None if origin_url in self._origins: origin = self._origins[origin_url] # visit ids are in the range [1, +inf[ visit_id = len(self._origin_visits[origin_url]) + 1 status = 'ongoing' visit = { 'origin': {'url': origin['url']}, 'date': date, 'type': type or origin['type'], 'status': status, 'snapshot': None, 'metadata': None, 'visit': visit_id } self._origin_visits[origin_url].append(visit) visit_ret = { 'origin': origin['id'] if ENABLE_ORIGIN_IDS else origin['url'], 'visit': visit_id, } self._objects[(origin_url, visit_id)].append( ('origin_visit', None)) if self.journal_writer: origin = self._origins[origin_url].copy() if 'id' in origin: del origin['id'] self.journal_writer.write_addition('origin_visit', { **visit, 'origin': origin}) return visit_ret def origin_visit_update(self, origin, visit_id, status=None, metadata=None, snapshot=None): """Update an origin_visit's status. Args: origin (Union[int,str]): visited origin's identifier or URL visit_id (int): visit's identifier status: visit's new status metadata: data associated to the visit snapshot (sha1_git): identifier of the snapshot to add to the visit Returns: None """ origin_url = self._get_origin_url(origin) if origin_url is None: raise ValueError('Unknown origin.') try: visit = self._origin_visits[origin_url][visit_id-1] except IndexError: raise ValueError('Unknown visit_id for this origin') \ from None if self.journal_writer: origin = self._origins[origin_url].copy() if 'id' in origin: del origin['id'] self.journal_writer.write_update('origin_visit', { 'origin': origin, 'type': origin['type'], 'visit': visit_id, 'status': status or visit['status'], 'date': visit['date'], 'metadata': metadata or visit['metadata'], 'snapshot': snapshot or visit['snapshot']}) if origin_url not in self._origin_visits or \ visit_id > len(self._origin_visits[origin_url]): return if status: visit['status'] = status if metadata: visit['metadata'] = metadata if snapshot: visit['snapshot'] = snapshot def origin_visit_upsert(self, visits): """Add a origin_visits with a specific id and with all its data. If there is already an origin_visit with the same `(origin_url, visit_id)`, updates it instead of inserting a new one. Args: visits: iterable of dicts with keys: origin: dict with keys either `id` or `url` visit: origin visit id type: type of loader used for the visit date: timestamp of such visit status: Visit's new status metadata: Data associated to the visit snapshot (sha1_git): identifier of the snapshot to add to the visit """ visits = copy.deepcopy(visits) for visit in visits: if isinstance(visit['date'], str): visit['date'] = dateutil.parser.parse(visit['date']) if self.journal_writer: for visit in visits: visit = visit.copy() visit['origin'] = self._origins[visit['origin']['url']].copy() if 'id' in visit['origin']: del visit['origin']['id'] self.journal_writer.write_addition('origin_visit', visit) for visit in visits: visit_id = visit['visit'] origin_url = visit['origin']['url'] self._objects[(origin_url, visit_id)].append( ('origin_visit', None)) while len(self._origin_visits[origin_url]) < visit_id: self._origin_visits[origin_url].append(None) visit = visit.copy() visit['origin'] = {'url': visit['origin']['url']} visit = self._origin_visits[origin_url][visit_id-1] = visit def _convert_visit(self, visit): if visit is None: return visit = visit.copy() origin = self._origins[visit['origin']['url']] if ENABLE_ORIGIN_IDS: visit['origin'] = origin['id'] else: visit['origin'] = origin['url'] return visit def origin_visit_get(self, origin, last_visit=None, limit=None): """Retrieve all the origin's visit's information. Args: origin (int): the origin's identifier last_visit (int): visit's id from which listing the next ones, default to None limit (int): maximum number of results to return, default to None Yields: List of visits. """ origin_url = self._get_origin_url(origin) if origin_url in self._origin_visits: visits = self._origin_visits[origin_url] if last_visit is not None: visits = visits[last_visit:] if limit is not None: visits = visits[:limit] for visit in visits: if not visit: continue visit_id = visit['visit'] yield self._convert_visit( self._origin_visits[origin_url][visit_id-1]) def origin_visit_find_by_date(self, origin, visit_date): """Retrieves the origin visit whose date is closest to the provided timestamp. In case of a tie, the visit with largest id is selected. Args: origin (str): The occurrence's origin (URL). target (datetime): target timestamp Returns: A visit. """ origin_url = self._get_origin_url(origin) if origin_url in self._origin_visits: visits = self._origin_visits[origin_url] visit = min( visits, key=lambda v: (abs(v['date'] - visit_date), -v['visit'])) return self._convert_visit(visit) def origin_visit_get_by(self, origin, visit): """Retrieve origin visit's information. Args: origin (int): the origin's identifier Returns: The information on that particular (origin, visit) or None if it does not exist """ origin_url = self._get_origin_url(origin) if origin_url in self._origin_visits and \ visit <= len(self._origin_visits[origin_url]): return self._convert_visit( self._origin_visits[origin_url][visit-1]) def origin_visit_get_latest( self, origin, allowed_statuses=None, require_snapshot=False): """Get the latest origin visit for the given origin, optionally looking only for those with one of the given allowed_statuses or for those with a known snapshot. Args: origin (str): the origin's URL allowed_statuses (list of str): list of visit statuses considered to find the latest visit. For instance, ``allowed_statuses=['full']`` will only consider visits that have successfully run to completion. require_snapshot (bool): If True, only a visit with a snapshot will be returned. Returns: dict: a dict with the following keys: origin: the URL of the origin visit: origin visit id type: type of loader used for the visit date: timestamp of such visit status: Visit's new status metadata: Data associated to the visit snapshot (Optional[sha1_git]): identifier of the snapshot associated to the visit """ origin = self._origins.get(origin) if not origin: return visits = self._origin_visits[origin['url']] if allowed_statuses is not None: visits = [visit for visit in visits if visit['status'] in allowed_statuses] if require_snapshot: visits = [visit for visit in visits if visit['snapshot']] visit = max( visits, key=lambda v: (v['date'], v['visit']), default=None) return self._convert_visit(visit) def stat_counters(self): """compute statistics about the number of tuples in various tables Returns: dict: a dictionary mapping textual labels (e.g., content) to integer values (e.g., the number of tuples in table content) """ keys = ( 'content', 'directory', 'origin', 'origin_visit', 'person', 'release', 'revision', 'skipped_content', 'snapshot' ) stats = {key: 0 for key in keys} stats.update(collections.Counter( obj_type for (obj_type, obj_id) in itertools.chain(*self._objects.values()))) return stats def refresh_stat_counters(self): """Recomputes the statistics for `stat_counters`.""" pass def origin_metadata_add(self, origin_id, ts, provider, tool, metadata, db=None, cur=None): """ Add an origin_metadata for the origin at ts with provenance and metadata. Args: origin_id (int): the origin's id for which the metadata is added ts (datetime): timestamp of the found metadata provider: id of the provider of metadata (ex:'hal') tool: id of the tool used to extract metadata metadata (jsonb): the metadata retrieved at the time and location """ if isinstance(ts, str): ts = dateutil.parser.parse(ts) origin_metadata = { 'origin_id': origin_id, 'discovery_date': ts, 'tool_id': tool, 'metadata': metadata, 'provider_id': provider, } self._origin_metadata[origin_id].append(origin_metadata) return None def origin_metadata_get_by(self, origin_id, provider_type=None, db=None, cur=None): """Retrieve list of all origin_metadata entries for the origin_id Args: origin_id (int): the unique origin's identifier provider_type (str): (optional) type of provider Returns: list of dicts: the origin_metadata dictionary with the keys: - origin_id (int): origin's identifier - discovery_date (datetime): timestamp of discovery - tool_id (int): metadata's extracting tool - metadata (jsonb) - provider_id (int): metadata's provider - provider_name (str) - provider_type (str) - provider_url (str) """ metadata = [] for item in self._origin_metadata[origin_id]: item = copy.deepcopy(item) provider = self.metadata_provider_get(item['provider_id']) for attr in ('name', 'type', 'url'): item['provider_' + attr] = provider['provider_' + attr] metadata.append(item) return metadata def tool_add(self, tools): """Add new tools to the storage. Args: tools (iterable of :class:`dict`): Tool information to add to storage. Each tool is a :class:`dict` with the following keys: - name (:class:`str`): name of the tool - version (:class:`str`): version of the tool - configuration (:class:`dict`): configuration of the tool, must be json-encodable Returns: :class:`dict`: All the tools inserted in storage (including the internal ``id``). The order of the list is not guaranteed to match the order of the initial list. """ inserted = [] for tool in tools: key = self._tool_key(tool) assert 'id' not in tool record = copy.deepcopy(tool) record['id'] = key # TODO: remove this if key not in self._tools: self._tools[key] = record inserted.append(copy.deepcopy(self._tools[key])) return inserted def tool_get(self, tool): """Retrieve tool information. Args: tool (dict): Tool information we want to retrieve from storage. The dicts have the same keys as those used in :func:`tool_add`. Returns: dict: The full tool information if it exists (``id`` included), None otherwise. """ return self._tools.get(self._tool_key(tool)) def metadata_provider_add(self, provider_name, provider_type, provider_url, metadata): """Add a metadata provider. Args: provider_name (str): Its name provider_type (str): Its type provider_url (str): Its URL metadata: JSON-encodable object Returns: an identifier of the provider """ provider = { 'provider_name': provider_name, 'provider_type': provider_type, 'provider_url': provider_url, 'metadata': metadata, } key = self._metadata_provider_key(provider) provider['id'] = key self._metadata_providers[key] = provider return key def metadata_provider_get(self, provider_id, db=None, cur=None): """Get a metadata provider Args: provider_id: Its identifier, as given by `metadata_provider_add`. Returns: dict: same as `metadata_provider_add`; or None if it does not exist. """ return self._metadata_providers.get(provider_id) def metadata_provider_get_by(self, provider, db=None, cur=None): """Get a metadata provider Args: provider_name: Its name provider_url: Its URL Returns: dict: same as `metadata_provider_add`; or None if it does not exist. """ key = self._metadata_provider_key(provider) return self._metadata_providers.get(key) def _get_origin_url(self, origin): if isinstance(origin, str): return origin elif isinstance(origin, int): if origin <= len(self._origins_by_id): return self._origins_by_id[origin-1] else: return None else: raise TypeError('origin must be a string or an integer.') def _person_add(self, person): """Add a person in storage. Note: Private method, do not use outside of this class. Args: person: dictionary with keys fullname, name and email. """ key = ('person', person['fullname']) if key not in self._objects: person_id = len(self._persons) + 1 self._persons.append(dict(person)) self._objects[key].append(('person', person_id)) else: person_id = self._objects[key][0][1] p = self._persons[person_id-1] person.update(p.items()) - person['id'] = person_id @staticmethod def _content_key(content): """A stable key for a content""" return tuple(content.get(key) for key in sorted(DEFAULT_ALGORITHMS)) @staticmethod def _content_key_algorithm(content): """ A stable key and the algorithm for a content""" return tuple((content.get(key), key) for key in sorted(DEFAULT_ALGORITHMS)) @staticmethod def _tool_key(tool): return '%r %r %r' % (tool['name'], tool['version'], tuple(sorted(tool['configuration'].items()))) @staticmethod def _metadata_provider_key(provider): return '%r %r' % (provider['provider_name'], provider['provider_url']) diff --git a/swh/storage/tests/test_converters.py b/swh/storage/tests/test_converters.py index 7a1bf622..75b862ba 100644 --- a/swh/storage/tests/test_converters.py +++ b/swh/storage/tests/test_converters.py @@ -1,122 +1,115 @@ # Copyright (C) 2015 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import unittest from swh.storage import converters class TestConverters(unittest.TestCase): def setUp(self): self.maxDiff = None def test_db_to_author(self): # when actual_author = converters.db_to_author( - 1, b'fullname', b'name', b'email') + b'fullname', b'name', b'email') # then self.assertEqual(actual_author, { - 'id': 1, 'fullname': b'fullname', 'name': b'name', 'email': b'email', }) def test_db_to_revision(self): # when actual_revision = converters.db_to_revision({ 'id': 'revision-id', 'date': None, 'date_offset': None, 'date_neg_utc_offset': None, 'committer_date': None, 'committer_date_offset': None, 'committer_date_neg_utc_offset': None, 'type': 'rev', 'directory': b'dir-sha1', 'message': b'commit message', - 'author_id': 'auth-id', 'author_fullname': b'auth-fullname', 'author_name': b'auth-name', 'author_email': b'auth-email', - 'committer_id': 'comm-id', 'committer_fullname': b'comm-fullname', 'committer_name': b'comm-name', 'committer_email': b'comm-email', 'metadata': {}, 'synthetic': False, 'parents': [123, 456] }) # then self.assertEqual(actual_revision, { 'id': 'revision-id', 'author': { - 'id': 'auth-id', 'fullname': b'auth-fullname', 'name': b'auth-name', 'email': b'auth-email', }, 'date': None, 'committer': { - 'id': 'comm-id', 'fullname': b'comm-fullname', 'name': b'comm-name', 'email': b'comm-email', }, 'committer_date': None, 'type': 'rev', 'directory': b'dir-sha1', 'message': b'commit message', 'metadata': {}, 'synthetic': False, 'parents': [123, 456], }) def test_db_to_release(self): # when actual_release = converters.db_to_release({ 'id': b'release-id', 'target': b'revision-id', 'target_type': 'revision', 'date': None, 'date_offset': None, 'date_neg_utc_offset': None, 'name': b'release-name', 'comment': b'release comment', 'synthetic': True, - 'author_id': 'auth-id', 'author_fullname': b'auth-fullname', 'author_name': b'auth-name', 'author_email': b'auth-email', }) # then self.assertEqual(actual_release, { 'author': { - 'id': 'auth-id', 'fullname': b'auth-fullname', 'name': b'auth-name', 'email': b'auth-email', }, 'date': None, 'id': b'release-id', 'name': b'release-name', 'message': b'release comment', 'synthetic': True, 'target': b'revision-id', 'target_type': 'revision' }) def test_db_to_git_headers(self): raw_data = [ ['gpgsig', b'garbage\x89a\x43b\x14'], ['extra', [b'fo\\\\\\o', b'bar\\', b'inval\\\\\x99id']], ] db_data = converters.git_headers_to_db(raw_data) loop = converters.db_to_git_headers(db_data) self.assertEqual(raw_data, loop)