diff --git a/swh/storage/api/serializers.py b/swh/storage/api/serializers.py --- a/swh/storage/api/serializers.py +++ b/swh/storage/api/serializers.py @@ -7,7 +7,8 @@ from typing import Callable, Dict, List, Tuple -from swh.model.identifiers import CoreSWHID, ExtendedSWHID, QualifiedSWHID +import swh.model.identifiers as identifiers +from swh.model.identifiers import CoreSWHID, ExtendedSWHID, ObjectType, QualifiedSWHID import swh.model.model as model from swh.storage import interface @@ -29,6 +30,10 @@ return getattr(model, d.pop("__type__"))(d["value"]) +def _decode_identifiers_enum(d): + return getattr(identifiers, d.pop("__type__"))(d["value"]) + + def _decode_storage_enum(d): return getattr(interface, d.pop("__type__"))(d["value"]) @@ -38,6 +43,7 @@ (CoreSWHID, "core_swhid", str), (ExtendedSWHID, "extended_swhid", str), (QualifiedSWHID, "qualified_swhid", str), + (ObjectType, "identifiers_enum", _encode_enum), (model.MetadataAuthorityType, "model_enum", _encode_enum), (interface.ListOrder, "storage_enum", _encode_enum), ] @@ -48,6 +54,7 @@ "extended_swhid": ExtendedSWHID.from_string, "qualified_swhid": QualifiedSWHID.from_string, "model": lambda d: getattr(model, d.pop("__type__")).from_dict(d), + "identifiers_enum": _decode_identifiers_enum, "model_enum": _decode_model_enum, "storage_enum": _decode_storage_enum, } diff --git a/swh/storage/backfill.py b/swh/storage/backfill.py --- a/swh/storage/backfill.py +++ b/swh/storage/backfill.py @@ -23,6 +23,7 @@ BaseModel, Directory, DirectoryEntry, + ExtID, RawExtrinsicMetadata, Release, Revision, @@ -31,6 +32,7 @@ TargetType, ) from swh.storage.postgresql.converters import ( + db_to_extid, db_to_raw_extrinsic_metadata, db_to_release, db_to_revision, @@ -44,6 +46,7 @@ "content": "sha1", "skipped_content": "sha1", "directory": "id", + "extid": "target", "metadata_authority": "type, url", "metadata_fetcher": "name, version", "raw_extrinsic_metadata": "target", @@ -76,8 +79,20 @@ "reason", ], "directory": ["id", "dir_entries", "file_entries", "rev_entries"], + "extid": ["extid_type", "extid", "target_type", "target"], "metadata_authority": ["type", "url", "metadata",], "metadata_fetcher": ["name", "version", "metadata",], + "origin": ["url"], + "origin_visit": ["visit", "type", ("origin.url", "origin"), "date",], + "origin_visit_status": [ + ("origin_visit_status.visit", "visit"), + ("origin.url", "origin"), + ("origin_visit_status.date", "date"), + "type", + "snapshot", + "status", + "metadata", + ], "raw_extrinsic_metadata": [ "raw_extrinsic_metadata.type", "raw_extrinsic_metadata.target", @@ -140,17 +155,6 @@ ("a.fullname", "author_fullname"), ], "snapshot": ["id", "object_id"], - "origin": ["url"], - "origin_visit": ["visit", "type", ("origin.url", "origin"), "date",], - "origin_visit_status": [ - ("origin_visit_status.visit", "visit"), - ("origin.url", "origin"), - ("origin_visit_status.date", "date"), - "type", - "snapshot", - "status", - "metadata", - ], } @@ -211,13 +215,21 @@ def raw_extrinsic_metadata_converter( db: BaseDb, metadata: Dict[str, Any] ) -> RawExtrinsicMetadata: - """Convert revision from the flat representation to swh model + """Convert a raw extrinsic metadata from the flat representation to swh model compatible objects. """ return db_to_raw_extrinsic_metadata(metadata) +def extid_converter(db: BaseDb, extid: Dict[str, Any]) -> ExtID: + """Convert an extid from the flat representation to swh model + compatible objects. + + """ + return db_to_extid(extid) + + def revision_converter(db: BaseDb, revision_d: Dict[str, Any]) -> Revision: """Convert revision from the flat representation to swh model compatible objects. @@ -271,6 +283,7 @@ CONVERTERS: Dict[str, Callable[[BaseDb, Dict[str, Any]], BaseModel]] = { "directory": directory_converter, + "extid": extid_converter, "raw_extrinsic_metadata": raw_extrinsic_metadata_converter, "revision": revision_converter, "release": release_converter, @@ -357,6 +370,7 @@ "content": lambda start, end: byte_ranges(24, start, end), "skipped_content": lambda start, end: [(None, None)], "directory": lambda start, end: byte_ranges(24, start, end), + "extid": lambda start, end: byte_ranges(24, start, end), "revision": lambda start, end: byte_ranges(24, start, end), "release": lambda start, end: byte_ranges(16, start, end), "snapshot": lambda start, end: byte_ranges(16, start, end), diff --git a/swh/storage/cassandra/cql.py b/swh/storage/cassandra/cql.py --- a/swh/storage/cassandra/cql.py +++ b/swh/storage/cassandra/cql.py @@ -34,6 +34,7 @@ wait_random_exponential, ) +from swh.model.identifiers import ExtendedSWHID from swh.model.model import ( Content, Person, @@ -52,6 +53,7 @@ ContentRow, DirectoryEntryRow, DirectoryRow, + ExtIDRow, MetadataAuthorityRow, MetadataFetcherRow, ObjectCountRow, @@ -964,6 +966,127 @@ ), ) + ########################## + # 'extid' table + ########################## + + def _extid_add_finalize(self, statement: BoundStatement) -> None: + """Returned currified by extid_add_prepare, to be called when the + extid row should be added to the primary table.""" + self._execute_with_retries(statement, None) + self._increment_counter("extid", 1) + + @_prepared_insert_statement(ExtIDRow) + def extid_add_prepare( + self, extid: ExtIDRow, *, statement + ) -> Tuple[int, Callable[[], None]]: + statement = statement.bind(dataclasses.astuple(extid)) + token_class = self._cluster.metadata.token_map.token_class + token = token_class.from_key(statement.routing_key).value + assert TOKEN_BEGIN <= token <= TOKEN_END + + # Function to be called after the indexes contain their respective + # row + finalizer = functools.partial(self._extid_add_finalize, statement) + + return (token, finalizer) + + # self._add_one(statement, extid) + + @_prepared_select_statement( + ExtIDRow, "WHERE extid_type=? AND extid=? AND target_type=? AND target=?", + ) + def extid_get_from_pk( + self, extid_type: str, extid: bytes, target: ExtendedSWHID, *, statement, + ) -> Optional[ExtIDRow]: + rows = list( + self._execute_with_retries( + statement, + [extid_type, extid, target.object_type.value, target.object_id], + ), + ) + assert len(rows) <= 1 + if rows: + return ExtIDRow(**rows[0]) + else: + return None + + @_prepared_select_statement( + ExtIDRow, "WHERE token(target, target_type, extid, extid_type) = ?", + ) + def extid_get_from_token(self, token: int, *, statement) -> Iterable[ExtIDRow]: + return map(ExtIDRow.from_dict, self._execute_with_retries(statement, [token]),) + + ########################## + # 'extid_by_*' tables + ########################## + + def extid_extid_index_add_one(self, extid: ExtIDRow, token: int) -> None: + """Adds a row mapping extid[extid_type, extid] to the token of the ExtID in + the main 'extid' table.""" + query = ( + "INSERT INTO extid_by_extid (extid_type, extid, target_token) " + "VALUES (%s, %s, %s)" + ) + self._execute_with_retries(query, [extid.extid_type, extid.extid, token]) + + def extid_target_index_add_one(self, extid: ExtIDRow, token: int) -> None: + """Adds a row mapping extid[target_type, target] to the token of the ExtID in + the main 'extid' table.""" + query = ( + "INSERT INTO extid_by_target (target_type, target, target_token) " + "VALUES (%s, %s, %s)" + ) + self._execute_with_retries(query, [extid.target_type, extid.target, token]) + + def extid_get_tokens_from_extid( + self, extid_type: str, extid: bytes + ) -> Iterable[int]: + query = ( + "SELECT target_token " + "FROM extid_by_extid " + "WHERE extid_type = %s AND extid = %s" + ) + return ( + row["target_token"] + for row in self._execute_with_retries(query, [extid_type, extid]) + ) + + def extid_get_tokens_from_target( + self, target_type: str, target: bytes + ) -> Iterable[int]: + query = ( + "SELECT target_token " + "FROM extid_by_target " + "WHERE target_type = %s AND target = %s" + ) + return ( + row["target_token"] + for row in self._execute_with_retries(query, [target_type, target]) + ) + + @_prepared_select_statement( + ExtIDRow, "WHERE extid_type=? AND extid=?", + ) + def extid_get_from_extid( + self, extid_type: str, extid: bytes, *, statement + ) -> Iterable[ExtIDRow]: + return map( + ExtIDRow.from_dict, + self._execute_with_retries(statement, [extid_type, extid]), + ) + + @_prepared_select_statement( + ExtIDRow, "WHERE target_type=? AND target=?", + ) + def extid_get_from_target( + self, target_type: str, target: bytes, *, statement + ) -> Iterable[ExtIDRow]: + return map( + ExtIDRow.from_dict, + self._execute_with_retries(statement, [target_type, target]), + ) + ########################## # Miscellaneous ########################## diff --git a/swh/storage/cassandra/model.py b/swh/storage/cassandra/model.py --- a/swh/storage/cassandra/model.py +++ b/swh/storage/cassandra/model.py @@ -280,3 +280,14 @@ partition_key: int object_type: str count: int + + +@dataclasses.dataclass +class ExtIDRow(BaseRow): + TABLE = "extid" + PARTITION_KEY = ("target", "target_type", "extid", "extid_type") + + extid_type: str + extid: bytes + target_type: str + target: bytes diff --git a/swh/storage/cassandra/schema.py b/swh/storage/cassandra/schema.py --- a/swh/storage/cassandra/schema.py +++ b/swh/storage/cassandra/schema.py @@ -220,6 +220,28 @@ object_type ascii, count counter, PRIMARY KEY ((partition_key), object_type) +);""", + """ +CREATE TABLE IF NOT EXISTS extid ( + extid_type ascii, + extid blob, + target_type ascii, + target blob, + PRIMARY KEY ((target, target_type, extid, extid_type)) +);""", + """ +CREATE TABLE IF NOT EXISTS extid_by_extid ( + extid_type ascii, + extid blob, + target_token bigint, -- value of token(pk) on the "primary" table + PRIMARY KEY ((extid_type, extid), target_token) +);""", + """ +CREATE TABLE IF NOT EXISTS extid_by_target ( + target_type ascii, + target blob, + target_token bigint, -- value of token(pk) on the "primary" table + PRIMARY KEY ((target_type, target), target_token) );""", ] @@ -255,6 +277,9 @@ "origin_visit_status", "metadata_authority", "metadata_fetcher", + "extid", + "extid_by_extid", + "extid_by_target", ] HASH_ALGORITHMS = ["sha1", "sha1_git", "sha256", "blake2s256"] diff --git a/swh/storage/cassandra/storage.py b/swh/storage/cassandra/storage.py --- a/swh/storage/cassandra/storage.py +++ b/swh/storage/cassandra/storage.py @@ -27,11 +27,12 @@ from swh.core.api.classes import stream_results from swh.core.api.serializers import msgpack_dumps, msgpack_loads from swh.model.hashutil import DEFAULT_ALGORITHMS -from swh.model.identifiers import CoreSWHID, ExtendedSWHID +from swh.model.identifiers import CoreSWHID, ExtendedSWHID, ObjectType from swh.model.model import ( Content, Directory, DirectoryEntry, + ExtID, MetadataAuthority, MetadataAuthorityType, MetadataFetcher, @@ -67,6 +68,7 @@ ContentRow, DirectoryEntryRow, DirectoryRow, + ExtIDRow, MetadataAuthorityRow, MetadataFetcherRow, OriginRow, @@ -1323,6 +1325,114 @@ else: return None + # ExtID tables + def _extid_get_from_extid( + self, extid_type: str, extid: bytes + ) -> Iterable[ExtIDRow]: + """XXX """ + yield from self._extid_get_from_tokens( + self._cql_runner.extid_get_tokens_from_extid( + extid_type=extid_type, extid=extid + ) + ) + + def _extid_get_from_target( + self, target_type: str, target: bytes + ) -> Iterable[ExtIDRow]: + """XXX """ + yield from self._extid_get_from_tokens( + self._cql_runner.extid_get_tokens_from_target( + target_type=target_type, target=target + ) + ) + + def _extid_get_from_tokens( + self, tokens: Iterable[Optional[int]] + ) -> Iterable[ExtIDRow]: + for token in tokens: + # Query the main table ('content'). + if token is not None: + yield from self._cql_runner.extid_get_from_token(token=token) + + def extid_add(self, ids: List[ExtID]) -> Dict[str, int]: + extids = [ + extid + for extid in ids + if not self._cql_runner.extid_get_from_pk( + extid_type=extid.extid_type, extid=extid.extid, target=extid.target, + ) + ] + + self.journal_writer.extid_add(extids) + + inserted = 0 + for extid in extids: + extidrow = ExtIDRow( + extid_type=extid.extid_type, + extid=extid.extid, + target_type=extid.target.object_type.value, + target=extid.target.object_id, + ) + (token, insertion_finalizer) = self._cql_runner.extid_add_prepare(extidrow) + if ( + self.extid_get_from_extid(extid.extid_type, [extid.extid])[0] + or self.extid_get_from_target( + extid.target.object_type, [extid.target.object_id] + )[0] + ): + # on conflict do nothing... + continue + self._cql_runner.extid_extid_index_add_one(extidrow, token) + self._cql_runner.extid_target_index_add_one(extidrow, token) + insertion_finalizer() + inserted += 1 + return {"extid:add": inserted} + + def extid_get_from_extid( + self, id_type: str, ids: List[bytes] + ) -> List[Optional[ExtID]]: + result: List[Optional[ExtID]] = [] + for extid in ids: + extidrows = list(self._extid_get_from_extid(id_type, extid)) + assert len(extidrows) <= 1 + if extidrows: + result.append( + ExtID( + extid_type=extidrows[0].extid_type, + extid=extidrows[0].extid, + target=CoreSWHID( + object_type=extidrows[0].target_type, + object_id=extidrows[0].target, + ), + ) + ) + else: + result.append(None) + return result + + def extid_get_from_target( + self, target_type: ObjectType, ids: List[Sha1Git] + ) -> List[Optional[ExtID]]: + result: List[Optional[ExtID]] = [] + for target in ids: + extidrows = list(self._extid_get_from_target(target_type.value, target)) + assert len(extidrows) <= 1 + if extidrows: + result.append( + ExtID( + extid_type=extidrows[0].extid_type, + extid=extidrows[0].extid, + target=CoreSWHID( + object_type=extidrows[0].target_type, + object_id=extidrows[0].target, + ), + ) + ) + else: + result.append(None) + return result + + # Misc def clear_buffers(self, object_types: Sequence[str] = ()) -> None: """Do nothing diff --git a/swh/storage/in_memory.py b/swh/storage/in_memory.py --- a/swh/storage/in_memory.py +++ b/swh/storage/in_memory.py @@ -21,6 +21,7 @@ Union, ) +from swh.model.identifiers import ExtendedSWHID from swh.model.model import Content, Sha1Git, SkippedContent from swh.storage.cassandra import CassandraStorage from swh.storage.cassandra.model import ( @@ -28,6 +29,7 @@ ContentRow, DirectoryEntryRow, DirectoryRow, + ExtIDRow, MetadataAuthorityRow, MetadataFetcherRow, ObjectCountRow, @@ -164,6 +166,8 @@ self._metadata_authorities = Table(MetadataAuthorityRow) self._metadata_fetchers = Table(MetadataFetcherRow) self._raw_extrinsic_metadata = Table(RawExtrinsicMetadataRow) + self._extid = Table(ExtIDRow) + self._extid_index = {} self._stat_counters = defaultdict(int) def increment_counter(self, object_type: str, nb: int): @@ -612,6 +616,46 @@ if m.authority_type == authority_type and m.authority_url == authority_url ) + ######################### + # 'extid' table + ######################### + def _extid_add_finalize(self, extid: ExtIDRow) -> None: + self._extid.insert(extid) + self.increment_counter("extid", 1) + + def extid_add_prepare(self, extid: ExtIDRow): + finalizer = functools.partial(self._extid_add_finalize, extid) + return (self._extid.token(self._extid.partition_key(extid)), finalizer) + + def extid_extid_index_add_one(self, extid: ExtIDRow, token: int) -> None: + self._extid_index[("extid", extid.extid_type, extid.extid)] = token + + def extid_target_index_add_one(self, extid: ExtIDRow, token: int) -> None: + self._extid_index[("target", extid.target_type, extid.target)] = token + + def extid_get_from_pk( + self, extid_type: str, extid: bytes, target: ExtendedSWHID, + ) -> Optional[ExtIDRow]: + print("GET ", extid_type, extid, target) + primary_key = self._extid.primary_key_from_dict( + dict( + extid_type=extid_type, + extid=extid, + target_type=target.object_type.value, + target=target.object_id, + ) + ) + return self._extid.get_from_primary_key(primary_key) + + def extid_get_from_token(self, token: int) -> Iterable[ExtIDRow]: + return self._extid.get_from_token(token) + + def extid_get_tokens_from_extid(self, extid_type: str, extid: bytes): + return [self._extid_index.get(("extid", extid_type, extid))] + + def extid_get_tokens_from_target(self, target_type: str, target: bytes): + return [self._extid_index.get(("target", target_type, target))] + class InMemoryStorage(CassandraStorage): _cql_runner: InMemoryCqlRunner # type: ignore diff --git a/swh/storage/interface.py b/swh/storage/interface.py --- a/swh/storage/interface.py +++ b/swh/storage/interface.py @@ -11,10 +11,11 @@ from swh.core.api import remote_api_endpoint from swh.core.api.classes import PagedResult as CorePagedResult -from swh.model.identifiers import ExtendedSWHID +from swh.model.identifiers import ExtendedSWHID, ObjectType from swh.model.model import ( Content, Directory, + ExtID, MetadataAuthority, MetadataAuthorityType, MetadataFetcher, @@ -500,6 +501,52 @@ """ ... + @remote_api_endpoint("extid/from_extid") + def extid_get_from_extid( + self, id_type: str, ids: List[bytes] + ) -> List[Optional[ExtID]]: + """Get ExtID objects from external IDs + + Args: + id_type: type the external identifiers the SWH objects comes from + ids: list of external IDs + + Returns: + list of ExtID objects (if the ext ID is known, None otherwise) + + """ + ... + + @remote_api_endpoint("extid/from_target") + def extid_get_from_target( + self, target_type: ObjectType, ids: List[Sha1Git] + ) -> List[Optional[ExtID]]: + """Get ExtID objects from target IDs and taget_type + + Args: + target_type: type the SWH object + ids: list of target IDs + + Returns: + list of ExtID objects (if the SWH ID is known, None otherwise) + + """ + ... + + @remote_api_endpoint("extid/add") + def extid_add(self, ids: List[ExtID]) -> Dict[str, int]: + """Add a series of ExtID objects + + Args: + ids: list of ExtID objects + + Returns: + Summary dict of keys with associated count as values + + extid:add: New ExtID objects actually stored in db + """ + ... + @remote_api_endpoint("revision/log") def revision_log( self, revisions: List[Sha1Git], limit: Optional[int] = None diff --git a/swh/storage/postgresql/converters.py b/swh/storage/postgresql/converters.py --- a/swh/storage/postgresql/converters.py +++ b/swh/storage/postgresql/converters.py @@ -9,6 +9,7 @@ from swh.core.utils import encode_with_unescape from swh.model.identifiers import CoreSWHID, ExtendedSWHID from swh.model.model import ( + ExtID, MetadataAuthority, MetadataAuthorityType, MetadataFetcher, @@ -36,6 +37,14 @@ "neg_utc_offset": None, } +OBJECT_TYPE_MAP_SHORT = { + "directory": "dir", + "content": "cnt", + "release": "rel", + "revision": "rev", + "snapshot": "snp", +} + def author_to_db(author: Optional[Person]) -> Dict[str, Any]: """Convert a swh-model author to its DB representation. @@ -314,3 +323,14 @@ path=row["path"], directory=map_optional(CoreSWHID.from_string, row["directory"]), ) + + +def db_to_extid(row) -> ExtID: + return ExtID( + extid=row["extid"], + extid_type=row["extid_type"], + target=CoreSWHID( + object_id=row["target"], + object_type=OBJECT_TYPE_MAP_SHORT[row["target_type"]], + ), + ) diff --git a/swh/storage/postgresql/db.py b/swh/storage/postgresql/db.py --- a/swh/storage/postgresql/db.py +++ b/swh/storage/postgresql/db.py @@ -18,6 +18,14 @@ logger = logging.getLogger(__name__) +OBJECT_TYPE_MAP = { + "dir": "directory", + "cnt": "content", + "rel": "release", + "rev": "revision", + "snp": "snapshot", +} + def jsonize(d): return _jsonize(dict(d) if d is not None else None) @@ -28,7 +36,7 @@ """ - current_version = 167 + current_version = 168 def mktemp_dir_entry(self, entry_type, cur=None): self._cursor(cur).execute( @@ -76,6 +84,10 @@ def revision_add_from_temp(self, cur=None): pass + @stored_procedure("swh_extid_add") + def extid_add_from_temp(self, cur=None): + pass + @stored_procedure("swh_release_add") def release_add_from_temp(self, cur=None): pass @@ -812,6 +824,49 @@ ((sortkey, id) for sortkey, id in enumerate(revisions)), ) + extid_cols = ["extid", "extid_type", "target", "target_type"] + + def extid_get_from_extid_list(self, extid_type, ids, cur=None): + cur = self._cursor(cur) + query_keys = ", ".join( + self.mangle_query_key(k, "extid") for k in self.extid_cols + ) + sql = """ + SELECT %s + FROM (VALUES %%s) as t(sortkey, extid, extid_type) + LEFT JOIN extid USING (extid, extid_type) + ORDER BY sortkey + """ % ( + query_keys, + ) + + yield from execute_values_generator( + cur, + sql, + (((sortkey, extid, extid_type) for sortkey, extid in enumerate(ids))), + ) + + def extid_get_from_swhid_list(self, target_type, ids, cur=None): + cur = self._cursor(cur) + target_type = OBJECT_TYPE_MAP[target_type] # aka "rev" -> "revision", ... + query_keys = ", ".join( + self.mangle_query_key(k, "extid") for k in self.extid_cols + ) + sql = """ + SELECT %s + FROM (VALUES %%s) as t(sortkey, target, target_type) + LEFT JOIN extid USING (target, target_type) + ORDER BY sortkey + """ % ( + query_keys, + ) + yield from execute_values_generator( + cur, + sql, + (((sortkey, target, target_type) for sortkey, target in enumerate(ids))), + template=b"(%s,%s,%s::object_type)", + ) + def revision_log(self, root_revisions, limit=None, cur=None): cur = self._cursor(cur) diff --git a/swh/storage/postgresql/storage.py b/swh/storage/postgresql/storage.py --- a/swh/storage/postgresql/storage.py +++ b/swh/storage/postgresql/storage.py @@ -1,4 +1,4 @@ -# Copyright (C) 2015-2020 The Software Heritage developers +# Copyright (C) 2015-2021 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information @@ -19,11 +19,12 @@ from swh.core.api.serializers import msgpack_dumps, msgpack_loads from swh.core.db.common import db_transaction, db_transaction_generator from swh.model.hashutil import DEFAULT_ALGORITHMS, hash_to_bytes, hash_to_hex -from swh.model.identifiers import ExtendedObjectType, ExtendedSWHID +from swh.model.identifiers import ExtendedObjectType, ExtendedSWHID, ObjectType from swh.model.model import ( SHA1_SIZE, Content, Directory, + ExtID, MetadataAuthority, MetadataAuthorityType, MetadataFetcher, @@ -633,6 +634,85 @@ def revision_get_random(self, db=None, cur=None) -> Sha1Git: return db.revision_get_random(cur) + @timed + @db_transaction() + def extid_get_from_extid( + self, id_type: str, ids: List[bytes], db=None, cur=None + ) -> List[Optional[ExtID]]: + """Get ExtID objects from external IDs + + Args: + id_type: type the external identifiers the SWH objects comes from + ids: list of external IDs + + Returns: + list of ExtID objects (if the ext ID is known, None otherwise) + + """ + extids = [] + for row in db.extid_get_from_extid_list(id_type, ids, cur): + extids.append( + converters.db_to_extid(dict(zip(db.extid_cols, row))) + if row[0] is not None + else None + ) + return extids + + @timed + @db_transaction() + def extid_get_from_target( + self, target_type: ObjectType, ids: List[Sha1Git], db=None, cur=None + ) -> List[Optional[ExtID]]: + """Get ExtID objects from target IDs + + Args: + target_type: type the SWH object + ids: list of SWH IDs + + Returns: + list of ExtID objects (if the SWH ID is known, None otherwise) + + """ + extids = [] + for row in db.extid_get_from_swhid_list(target_type.value, ids, cur): + extids.append( + converters.db_to_extid(dict(zip(db.extid_cols, row))) + if row[0] is not None + else None + ) + return extids + + @timed + @db_transaction() + def extid_add(self, ids: List[ExtID], db=None, cur=None) -> Dict[str, int]: + """Add a series of ExtID objects + + Args: + ids: list of ExtID objects + + Returns: + Summary dict of keys with associated count as values + + extid:add: New ExtID objects actually stored in db + """ + extid = [ + { + "extid": extid.extid, + "extid_type": extid.extid_type, + "target": extid.target.object_id, + "target_type": extid.target.object_type.name.lower(), # arghh + } + for extid in ids + ] + db.mktemp("extid", cur) + + db.copy_to(extid, "tmp_extid", db.extid_cols, cur) + + # move metadata in place + db.extid_add_from_temp(cur) + + return {"extid:add": len(extid)} + @timed @process_metrics @db_transaction() diff --git a/swh/storage/sql/30-schema.sql b/swh/storage/sql/30-schema.sql --- a/swh/storage/sql/30-schema.sql +++ b/swh/storage/sql/30-schema.sql @@ -17,7 +17,7 @@ -- latest schema version insert into dbversion(version, release, description) - values(167, now(), 'Work In Progress'); + values(168, now(), 'Work In Progress'); -- a SHA1 checksum create domain sha1 as bytea check (length(value) = 20); @@ -499,3 +499,19 @@ comment on column object_counts_bucketed.bucket_end is 'Upper bound (exclusive) for the bucket'; comment on column object_counts_bucketed.value is 'Count of objects in the bucket'; comment on column object_counts_bucketed.last_update is 'Last update for the object count in this bucket'; + + +-- The ExtID (typ. original VCS) <-> swhid relation table +create table extid +( + extid_type text not null, + extid bytea not null, + target_type object_type not null, + target sha1_git not null +); + +comment on table extid is 'Correspondance SWH object (SWHID) <-> original revision id (vcs id)'; +comment on column extid.extid_type is 'ExtID type'; +comment on column extid.extid is 'Intrinsic identifier of the object (e.g. hg revision)'; +comment on column extid.target_type is 'Type of SWHID of the referenced SWH object'; +comment on column extid.target is 'Value (hash) of SWHID of the refenced SWH object'; diff --git a/swh/storage/sql/40-funcs.sql b/swh/storage/sql/40-funcs.sql --- a/swh/storage/sql/40-funcs.sql +++ b/swh/storage/sql/40-funcs.sql @@ -549,6 +549,23 @@ $$; +-- Create entries in extid from tmp_extid +-- operates in bulk: 0. swh_mktemp(extid), 1. COPY to tmp_extid, +-- 2. call this function +create or replace function swh_extid_add() + returns void + language plpgsql +as $$ +begin + insert into extid (extid_type, extid, target_type, target) + select distinct t.extid_type, t.extid, t.target_type, t.target + from tmp_extid t + on conflict do nothing; + return; +end +$$; + + -- Create entries in person from tmp_release create or replace function swh_person_add_from_release() returns void diff --git a/swh/storage/sql/60-indexes.sql b/swh/storage/sql/60-indexes.sql --- a/swh/storage/sql/60-indexes.sql +++ b/swh/storage/sql/60-indexes.sql @@ -281,3 +281,7 @@ -- object_counts_bucketed create unique index concurrently object_counts_bucketed_pkey on object_counts_bucketed(line); alter table object_counts_bucketed add primary key using index object_counts_bucketed_pkey; + +-- extid +create unique index concurrently on extid(extid_type, extid); +create unique index concurrently on extid(target_type, target); diff --git a/swh/storage/tests/storage_data.py b/swh/storage/tests/storage_data.py --- a/swh/storage/tests/storage_data.py +++ b/swh/storage/tests/storage_data.py @@ -1,4 +1,4 @@ -# Copyright (C) 2015-2020 The Software Heritage developers +# Copyright (C) 2015-2021 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information @@ -311,7 +311,139 @@ extra_headers=(), synthetic=False, ) - revisions: Tuple[Revision, ...] = (revision, revision2, revision3, revision4) + git_revisions: Tuple[Revision, ...] = (revision, revision2, revision3, revision4) + + hg_revision = Revision( + id=hash_to_bytes("951c9503541e7beaf002d7aebf2abd1629084c68"), + message=b"hello", + author=Person( + name=b"Nicolas Dandrimont", + email=b"nicolas@example.com", + fullname=b"Nicolas Dandrimont ", + ), + date=TimestampWithTimezone( + timestamp=Timestamp(seconds=1234567890, microseconds=0), + offset=120, + negative_utc=False, + ), + committer=Person( + name=b"St\xc3fano Zacchiroli", + email=b"stefano@example.com", + fullname=b"St\xc3fano Zacchiroli ", + ), + committer_date=TimestampWithTimezone( + timestamp=Timestamp(seconds=1123456789, microseconds=0), + offset=120, + negative_utc=False, + ), + parents=(), + type=RevisionType.MERCURIAL, + directory=directory.id, + metadata={ + "checksums": {"sha1": "tarball-sha1", "sha256": "tarball-sha256",}, + "signed-off-by": "some-dude", + "node": "a316dfb434af2b451c1f393496b7eaeda343f543", + }, + extra_headers=(), + synthetic=True, + ) + hg_revision2 = Revision( + id=hash_to_bytes("df4afb063236300eb13b96a0d7fff03f7b7cbbaf"), + message=b"hello again", + author=Person( + name=b"Roberto Dicosmo", + email=b"roberto@example.com", + fullname=b"Roberto Dicosmo ", + ), + date=TimestampWithTimezone( + timestamp=Timestamp(seconds=1234567843, microseconds=220000,), + offset=-720, + negative_utc=False, + ), + committer=Person( + name=b"tony", email=b"ar@dumont.fr", fullname=b"tony ", + ), + committer_date=TimestampWithTimezone( + timestamp=Timestamp(seconds=1123456789, microseconds=220000,), + offset=0, + negative_utc=False, + ), + parents=tuple([hg_revision.id]), + type=RevisionType.MERCURIAL, + directory=directory2.id, + metadata=None, + extra_headers=( + (b"node", hash_to_bytes("fa1b7c84a9b40605b67653700f268349a6d6aca1")), + ), + synthetic=False, + ) + hg_revision3 = Revision( + id=hash_to_bytes("84d8e7081b47ebb88cad9fa1f25de5f330872a37"), + message=b"a simple revision with no parents this time", + author=Person( + name=b"Roberto Dicosmo", + email=b"roberto@example.com", + fullname=b"Roberto Dicosmo ", + ), + date=TimestampWithTimezone( + timestamp=Timestamp(seconds=1234567843, microseconds=220000,), + offset=-720, + negative_utc=False, + ), + committer=Person( + name=b"tony", email=b"ar@dumont.fr", fullname=b"tony ", + ), + committer_date=TimestampWithTimezone( + timestamp=Timestamp(seconds=1127351742, microseconds=220000,), + offset=0, + negative_utc=False, + ), + parents=tuple([hg_revision.id, hg_revision2.id]), + type=RevisionType.MERCURIAL, + directory=directory2.id, + metadata=None, + extra_headers=( + (b"node", hash_to_bytes("7f294a01c49065a90b3fe8b4ad49f08ce9656ef6")), + ), + synthetic=True, + ) + hg_revision4 = Revision( + id=hash_to_bytes("42070a39e5387e9b99bb3d83674e3a4a1ff39b69"), + message=b"parent of self.revision2", + author=Person( + name=b"me", email=b"me@soft.heri", fullname=b"me ", + ), + date=TimestampWithTimezone( + timestamp=Timestamp(seconds=1234567843, microseconds=220000,), + offset=-720, + negative_utc=False, + ), + committer=Person( + name=b"committer-dude", + email=b"committer@dude.com", + fullname=b"committer-dude ", + ), + committer_date=TimestampWithTimezone( + timestamp=Timestamp(seconds=1244567843, microseconds=220000,), + offset=-720, + negative_utc=False, + ), + parents=tuple([hg_revision3.id]), + type=RevisionType.MERCURIAL, + directory=directory.id, + metadata=None, + extra_headers=( + (b"node", hash_to_bytes("f4160af0485c85823d9e829bae2c00b00a2e6297")), + ), + synthetic=False, + ) + hg_revisions: Tuple[Revision, ...] = ( + hg_revision, + hg_revision2, + hg_revision3, + hg_revision4, + ) + revisions: Tuple[Revision, ...] = git_revisions + hg_revisions origins: Tuple[Origin, ...] = ( Origin(url="https://github.com/user1/repo1"), diff --git a/swh/storage/tests/storage_tests.py b/swh/storage/tests/storage_tests.py --- a/swh/storage/tests/storage_tests.py +++ b/swh/storage/tests/storage_tests.py @@ -1,4 +1,4 @@ -# Copyright (C) 2015-2020 The Software Heritage developers +# Copyright (C) 2015-2021 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information @@ -19,9 +19,11 @@ from swh.model import from_disk from swh.model.hashutil import hash_to_bytes from swh.model.hypothesis_strategies import objects +from swh.model.identifiers import CoreSWHID, ObjectType from swh.model.model import ( Content, Directory, + ExtID, Origin, OriginVisit, OriginVisitStatus, @@ -1060,6 +1062,171 @@ revision3.id, } + def test_extid_add_git(self, swh_storage, sample_data): + + gitids = [ + revision.id + for revision in sample_data.revisions + if revision.type.value == "git" + ] + nullids = [None] * len(gitids) + extids = [ + ExtID( + extid=gitid, + extid_type="git", + target=CoreSWHID(object_id=gitid, object_type=ObjectType.REVISION,), + ) + for gitid in gitids + ] + + assert swh_storage.extid_get_from_extid("git", gitids) == nullids + assert swh_storage.extid_get_from_target(ObjectType.REVISION, gitids) == nullids + + summary = swh_storage.extid_add(extids) + assert summary == {"extid:add": len(gitids)} + + assert swh_storage.extid_get_from_extid("git", gitids) == extids + assert swh_storage.extid_get_from_target(ObjectType.REVISION, gitids) == extids + + assert swh_storage.extid_get_from_extid("hg", gitids) == nullids + assert swh_storage.extid_get_from_target(ObjectType.RELEASE, gitids) == nullids + + def test_extid_add_hg(self, swh_storage, sample_data): + def get_node(revision): + node = None + if revision.extra_headers: + node = dict(revision.extra_headers).get(b"node") + if node is None and revision.metadata: + node = hash_to_bytes(revision.metadata.get("node")) + return node + + swhids = [ + revision.id + for revision in sample_data.revisions + if revision.type.value == "hg" + ] + extids = [ + get_node(revision) + for revision in sample_data.revisions + if revision.type.value == "hg" + ] + nullids = [None] * len(swhids) + + assert swh_storage.extid_get_from_extid("hg", extids) == nullids + assert swh_storage.extid_get_from_target(ObjectType.REVISION, swhids) == nullids + + extid_objs = [ + ExtID( + extid=hgid, + extid_type="hg", + target=CoreSWHID(object_id=swhid, object_type=ObjectType.REVISION,), + ) + for hgid, swhid in zip(extids, swhids) + ] + summary = swh_storage.extid_add(extid_objs) + assert summary == {"extid:add": len(swhids)} + + assert swh_storage.extid_get_from_extid("hg", extids) == extid_objs + assert ( + swh_storage.extid_get_from_target(ObjectType.REVISION, swhids) == extid_objs + ) + + assert swh_storage.extid_get_from_extid("git", extids) == nullids + assert swh_storage.extid_get_from_target(ObjectType.RELEASE, swhids) == nullids + + def test_extid_add_twice(self, swh_storage, sample_data): + + gitids = [ + revision.id + for revision in sample_data.revisions + if revision.type.value == "git" + ] + + extids = [ + ExtID( + extid=gitid, + extid_type="git", + target=CoreSWHID(object_id=gitid, object_type=ObjectType.REVISION,), + ) + for gitid in gitids + ] + summary = swh_storage.extid_add(extids) + assert summary == {"extid:add": len(gitids)} + + # add them again, should be noop + summary = swh_storage.extid_add(extids) + # assert summary == {"extid:add": 0} + assert swh_storage.extid_get_from_extid("git", gitids) == extids + assert swh_storage.extid_get_from_target(ObjectType.REVISION, gitids) == extids + + def test_extid_add_extid_unicity(self, swh_storage, sample_data): + + ids = [ + revision.id + for revision in sample_data.revisions + if revision.type.value == "git" + ] + nullids = [None] * len(ids) + + extids = [ + ExtID( + extid=extid, + extid_type="git", + target=CoreSWHID(object_id=extid, object_type=ObjectType.REVISION,), + ) + for extid in ids + ] + swh_storage.extid_add(extids) + + # try to add "modified-extid" versions, should be noops + extids2 = [ + ExtID( + extid=extid, + extid_type="hg", + target=CoreSWHID(object_id=extid, object_type=ObjectType.REVISION,), + ) + for extid in ids + ] + swh_storage.extid_add(extids2) + + assert swh_storage.extid_get_from_extid("git", ids) == extids + assert swh_storage.extid_get_from_extid("hg", ids) == nullids + assert swh_storage.extid_get_from_target(ObjectType.REVISION, ids) == extids + + def test_extid_add_target_unicity(self, swh_storage, sample_data): + + ids = [ + revision.id + for revision in sample_data.revisions + if revision.type.value == "git" + ] + nullids = [None] * len(ids) + + extids = [ + ExtID( + extid=extid, + extid_type="git", + target=CoreSWHID(object_id=extid, object_type=ObjectType.REVISION,), + ) + for extid in ids + ] + swh_storage.extid_add(extids) + + # try to add "modified" versions, should be noops + extids2 = [ + ExtID( + extid=extid, + extid_type="git", + target=CoreSWHID(object_id=extid, object_type=ObjectType.RELEASE,), + ) + for extid in ids + ] + swh_storage.extid_add(extids2) + + assert swh_storage.extid_get_from_extid("git", ids) == extids + assert swh_storage.extid_get_from_target(ObjectType.REVISION, ids) == extids + assert swh_storage.extid_get_from_target(ObjectType.RELEASE, ids) == nullids + def test_release_add(self, swh_storage, sample_data): release, release2 = sample_data.releases[:2] diff --git a/swh/storage/tests/test_backfill.py b/swh/storage/tests/test_backfill.py --- a/swh/storage/tests/test_backfill.py +++ b/swh/storage/tests/test_backfill.py @@ -169,6 +169,7 @@ "content": lambda start, end: [(None, None)], "skipped_content": lambda start, end: [(None, None)], "directory": lambda start, end: [(None, None)], + "extid": lambda start, end: [(None, None)], "metadata_authority": lambda start, end: [(None, None)], "metadata_fetcher": lambda start, end: [(None, None)], "revision": lambda start, end: [(None, None)], diff --git a/swh/storage/tests/test_kafka_writer.py b/swh/storage/tests/test_kafka_writer.py --- a/swh/storage/tests/test_kafka_writer.py +++ b/swh/storage/tests/test_kafka_writer.py @@ -41,6 +41,7 @@ "content", "skipped_content", "directory", + "extid", "metadata_authority", "metadata_fetcher", "revision", @@ -71,6 +72,7 @@ for obj_type in ( "content", "directory", + "extid", "metadata_authority", "metadata_fetcher", "origin", @@ -127,6 +129,7 @@ for obj_type in ( "content", "directory", + "extid", "metadata_authority", "metadata_fetcher", "origin", diff --git a/swh/storage/writer.py b/swh/storage/writer.py --- a/swh/storage/writer.py +++ b/swh/storage/writer.py @@ -10,6 +10,7 @@ from swh.model.model import ( Content, Directory, + ExtID, MetadataAuthority, MetadataFetcher, Origin, @@ -115,3 +116,6 @@ def metadata_authority_add(self, authorities: Iterable[MetadataAuthority]) -> None: self.write_additions("metadata_authority", authorities) + + def extid_add(self, extids: Iterable[ExtID]) -> None: + self.write_additions("extid", extids)