diff --git a/sql/upgrades/157.sql b/sql/upgrades/157.sql index 4767d461..cadd4c24 100644 --- a/sql/upgrades/157.sql +++ b/sql/upgrades/157.sql @@ -1,46 +1,63 @@ -- SWH DB schema upgrade -- from_version: 156 -- to_version: 157 -- description: Add extrinsic artifact metadata -- latest schema version insert into dbversion(version, release, description) values(157, now(), 'Work In Progress'); create domain swhid as text check (value ~ '^swh:[0-9]+:.*'); alter table origin_metadata rename to object_metadata; -- Use the origin URL as identifier, instead of the origin id alter table object_metadata add column type text; comment on column object_metadata.type is 'the type of object (content/directory/revision/release/snapshot/origin) the metadata is on'; alter table object_metadata add column origin_url text; update object_metadata set type = 'origin', origin_url = origin.url from origin where object_metadata.origin_id = origin.id; alter table object_metadata alter column type set not null; alter table object_metadata alter column origin_url set not null; alter table object_metadata drop column id; alter table object_metadata drop column origin_id; alter table object_metadata rename column origin_url to id; comment on column object_metadata.id is 'the SWHID or origin URL for which the metadata was found'; create unique index object_metadata_content_authority_date_fetcher on object_metadata(id, authority_id, discovery_date, fetcher_id); + + +-- Add context columns +alter table object_metadata + add column origin text; +alter table object_metadata + add column visit bigint; +alter table object_metadata + add column snapshot swhid; +alter table object_metadata + add column release swhid; +alter table object_metadata + add column revision swhid; +alter table object_metadata + add column path bytea; +alter table object_metadata + add column directory swhid; diff --git a/swh/storage/cassandra/cql.py b/swh/storage/cassandra/cql.py index 2a03a925..7cbddfd7 100644 --- a/swh/storage/cassandra/cql.py +++ b/swh/storage/cassandra/cql.py @@ -1,985 +1,994 @@ # Copyright (C) 2019-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime import functools import json import logging import random from typing import ( Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple, TypeVar, + Union, ) from cassandra import CoordinationFailure from cassandra.cluster import Cluster, EXEC_PROFILE_DEFAULT, ExecutionProfile, ResultSet from cassandra.policies import DCAwareRoundRobinPolicy, TokenAwarePolicy from cassandra.query import PreparedStatement, BoundStatement from tenacity import ( retry, stop_after_attempt, wait_random_exponential, retry_if_exception_type, ) from swh.model.model import ( Sha1Git, TimestampWithTimezone, Timestamp, Person, Content, SkippedContent, OriginVisit, OriginVisitStatus, Origin, ) from .common import Row, TOKEN_BEGIN, TOKEN_END, hash_url from .schema import CREATE_TABLES_QUERIES, HASH_ALGORITHMS logger = logging.getLogger(__name__) _execution_profiles = { EXEC_PROFILE_DEFAULT: ExecutionProfile( load_balancing_policy=TokenAwarePolicy(DCAwareRoundRobinPolicy()) ), } # Configuration for cassandra-driver's access to servers: # * hit the right server directly when sending a query (TokenAwarePolicy), # * if there's more than one, then pick one at random that's in the same # datacenter as the client (DCAwareRoundRobinPolicy) def create_keyspace( hosts: List[str], keyspace: str, port: int = 9042, *, durable_writes=True ): cluster = Cluster(hosts, port=port, execution_profiles=_execution_profiles) session = cluster.connect() extra_params = "" if not durable_writes: extra_params = "AND durable_writes = false" session.execute( """CREATE KEYSPACE IF NOT EXISTS "%s" WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 } %s; """ % (keyspace, extra_params) ) session.execute('USE "%s"' % keyspace) for query in CREATE_TABLES_QUERIES: session.execute(query) T = TypeVar("T") def _prepared_statement(query: str) -> Callable[[Callable[..., T]], Callable[..., T]]: """Returns a decorator usable on methods of CqlRunner, to inject them with a 'statement' argument, that is a prepared statement corresponding to the query. This only works on methods of CqlRunner, as preparing a statement requires a connection to a Cassandra server.""" def decorator(f): @functools.wraps(f) def newf(self, *args, **kwargs) -> T: if f.__name__ not in self._prepared_statements: statement: PreparedStatement = self._session.prepare(query) self._prepared_statements[f.__name__] = statement return f( self, *args, **kwargs, statement=self._prepared_statements[f.__name__] ) return newf return decorator def _prepared_insert_statement(table_name: str, columns: List[str]): """Shorthand for using `_prepared_statement` for `INSERT INTO` statements.""" return _prepared_statement( "INSERT INTO %s (%s) VALUES (%s)" % (table_name, ", ".join(columns), ", ".join("?" for _ in columns),) ) def _prepared_exists_statement(table_name: str): """Shorthand for using `_prepared_statement` for queries that only check which ids in a list exist in the table.""" return _prepared_statement(f"SELECT id FROM {table_name} WHERE id IN ?") class CqlRunner: """Class managing prepared statements and building queries to be sent to Cassandra.""" def __init__(self, hosts: List[str], keyspace: str, port: int): self._cluster = Cluster( hosts, port=port, execution_profiles=_execution_profiles ) self._session = self._cluster.connect(keyspace) self._cluster.register_user_type( keyspace, "microtimestamp_with_timezone", TimestampWithTimezone ) self._cluster.register_user_type(keyspace, "microtimestamp", Timestamp) self._cluster.register_user_type(keyspace, "person", Person) self._prepared_statements: Dict[str, PreparedStatement] = {} ########################## # Common utility functions ########################## MAX_RETRIES = 3 @retry( wait=wait_random_exponential(multiplier=1, max=10), stop=stop_after_attempt(MAX_RETRIES), retry=retry_if_exception_type(CoordinationFailure), ) def _execute_with_retries(self, statement, args) -> ResultSet: return self._session.execute(statement, args, timeout=1000.0) @_prepared_statement( "UPDATE object_count SET count = count + ? " "WHERE partition_key = 0 AND object_type = ?" ) def _increment_counter( self, object_type: str, nb: int, *, statement: PreparedStatement ) -> None: self._execute_with_retries(statement, [nb, object_type]) def _add_one(self, statement, object_type: str, obj, keys: List[str]) -> None: self._increment_counter(object_type, 1) self._execute_with_retries(statement, [getattr(obj, key) for key in keys]) def _get_random_row(self, statement) -> Optional[Row]: """Takes a prepared statement of the form "SELECT * FROM WHERE token() > ? LIMIT 1" and uses it to return a random row""" token = random.randint(TOKEN_BEGIN, TOKEN_END) rows = self._execute_with_retries(statement, [token]) if not rows: # There are no row with a greater token; wrap around to get # the row with the smallest token rows = self._execute_with_retries(statement, [TOKEN_BEGIN]) if rows: return rows.one() else: return None def _missing(self, statement, ids): res = self._execute_with_retries(statement, [ids]) found_ids = {id_ for (id_,) in res} return [id_ for id_ in ids if id_ not in found_ids] ########################## # 'content' table ########################## _content_pk = ["sha1", "sha1_git", "sha256", "blake2s256"] _content_keys = [ "sha1", "sha1_git", "sha256", "blake2s256", "length", "ctime", "status", ] def _content_add_finalize(self, statement: BoundStatement) -> None: """Returned currified by content_add_prepare, to be called when the content row should be added to the primary table.""" self._execute_with_retries(statement, None) self._increment_counter("content", 1) @_prepared_insert_statement("content", _content_keys) def content_add_prepare( self, content, *, statement ) -> Tuple[int, Callable[[], None]]: """Prepares insertion of a Content to the main 'content' table. Returns a token (to be used in secondary tables), and a function to be called to perform the insertion in the main table.""" statement = statement.bind( [getattr(content, key) for key in self._content_keys] ) # Type used for hashing keys (usually, it will be # cassandra.metadata.Murmur3Token) token_class = self._cluster.metadata.token_map.token_class # Token of the row when it will be inserted. This is equivalent to # "SELECT token({', '.join(self._content_pk)}) FROM content WHERE ..." # after the row is inserted; but we need the token to insert in the # index tables *before* inserting to the main 'content' table token = token_class.from_key(statement.routing_key).value assert TOKEN_BEGIN <= token <= TOKEN_END # Function to be called after the indexes contain their respective # row finalizer = functools.partial(self._content_add_finalize, statement) return (token, finalizer) @_prepared_statement( "SELECT * FROM content WHERE " + " AND ".join(map("%s = ?".__mod__, HASH_ALGORITHMS)) ) def content_get_from_pk( self, content_hashes: Dict[str, bytes], *, statement ) -> Optional[Row]: rows = list( self._execute_with_retries( statement, [content_hashes[algo] for algo in HASH_ALGORITHMS] ) ) assert len(rows) <= 1 if rows: return rows[0] else: return None @_prepared_statement( "SELECT * FROM content WHERE token(" + ", ".join(_content_pk) + ") = ?" ) def content_get_from_token(self, token, *, statement) -> Iterable[Row]: return self._execute_with_retries(statement, [token]) @_prepared_statement( "SELECT * FROM content WHERE token(%s) > ? LIMIT 1" % ", ".join(_content_pk) ) def content_get_random(self, *, statement) -> Optional[Row]: return self._get_random_row(statement) @_prepared_statement( ( "SELECT token({0}) AS tok, {1} FROM content " "WHERE token({0}) >= ? AND token({0}) <= ? LIMIT ?" ).format(", ".join(_content_pk), ", ".join(_content_keys)) ) def content_get_token_range( self, start: int, end: int, limit: int, *, statement ) -> Iterable[Row]: return self._execute_with_retries(statement, [start, end, limit]) ########################## # 'content_by_*' tables ########################## @_prepared_statement("SELECT sha1_git FROM content_by_sha1_git WHERE sha1_git IN ?") def content_missing_by_sha1_git( self, ids: List[bytes], *, statement ) -> List[bytes]: return self._missing(statement, ids) def content_index_add_one(self, algo: str, content: Content, token: int) -> None: """Adds a row mapping content[algo] to the token of the Content in the main 'content' table.""" query = ( f"INSERT INTO content_by_{algo} ({algo}, target_token) " f"VALUES (%s, %s)" ) self._execute_with_retries(query, [content.get_hash(algo), token]) def content_get_tokens_from_single_hash( self, algo: str, hash_: bytes ) -> Iterable[int]: assert algo in HASH_ALGORITHMS query = f"SELECT target_token FROM content_by_{algo} WHERE {algo} = %s" return (tok for (tok,) in self._execute_with_retries(query, [hash_])) ########################## # 'skipped_content' table ########################## _skipped_content_pk = ["sha1", "sha1_git", "sha256", "blake2s256"] _skipped_content_keys = [ "sha1", "sha1_git", "sha256", "blake2s256", "length", "ctime", "status", "reason", "origin", ] _magic_null_pk = b"" """ NULLs (or all-empty blobs) are not allowed in primary keys; instead use a special value that can't possibly be a valid hash. """ def _skipped_content_add_finalize(self, statement: BoundStatement) -> None: """Returned currified by skipped_content_add_prepare, to be called when the content row should be added to the primary table.""" self._execute_with_retries(statement, None) self._increment_counter("skipped_content", 1) @_prepared_insert_statement("skipped_content", _skipped_content_keys) def skipped_content_add_prepare( self, content, *, statement ) -> Tuple[int, Callable[[], None]]: """Prepares insertion of a Content to the main 'skipped_content' table. Returns a token (to be used in secondary tables), and a function to be called to perform the insertion in the main table.""" # Replace NULLs (which are not allowed in the partition key) with # an empty byte string content = content.to_dict() for key in self._skipped_content_pk: if content[key] is None: content[key] = self._magic_null_pk statement = statement.bind( [content.get(key) for key in self._skipped_content_keys] ) # Type used for hashing keys (usually, it will be # cassandra.metadata.Murmur3Token) token_class = self._cluster.metadata.token_map.token_class # Token of the row when it will be inserted. This is equivalent to # "SELECT token({', '.join(self._content_pk)}) # FROM skipped_content WHERE ..." # after the row is inserted; but we need the token to insert in the # index tables *before* inserting to the main 'skipped_content' table token = token_class.from_key(statement.routing_key).value assert TOKEN_BEGIN <= token <= TOKEN_END # Function to be called after the indexes contain their respective # row finalizer = functools.partial(self._skipped_content_add_finalize, statement) return (token, finalizer) @_prepared_statement( "SELECT * FROM skipped_content WHERE " + " AND ".join(map("%s = ?".__mod__, HASH_ALGORITHMS)) ) def skipped_content_get_from_pk( self, content_hashes: Dict[str, bytes], *, statement ) -> Optional[Row]: rows = list( self._execute_with_retries( statement, [ content_hashes[algo] or self._magic_null_pk for algo in HASH_ALGORITHMS ], ) ) assert len(rows) <= 1 if rows: # TODO: convert _magic_null_pk back to None? return rows[0] else: return None ########################## # 'skipped_content_by_*' tables ########################## def skipped_content_index_add_one( self, algo: str, content: SkippedContent, token: int ) -> None: """Adds a row mapping content[algo] to the token of the SkippedContent in the main 'skipped_content' table.""" query = ( f"INSERT INTO skipped_content_by_{algo} ({algo}, target_token) " f"VALUES (%s, %s)" ) self._execute_with_retries( query, [content.get_hash(algo) or self._magic_null_pk, token] ) ########################## # 'revision' table ########################## _revision_keys = [ "id", "date", "committer_date", "type", "directory", "message", "author", "committer", "synthetic", "metadata", ] @_prepared_exists_statement("revision") def revision_missing(self, ids: List[bytes], *, statement) -> List[bytes]: return self._missing(statement, ids) @_prepared_insert_statement("revision", _revision_keys) def revision_add_one(self, revision: Dict[str, Any], *, statement) -> None: self._execute_with_retries( statement, [revision[key] for key in self._revision_keys] ) self._increment_counter("revision", 1) @_prepared_statement("SELECT id FROM revision WHERE id IN ?") def revision_get_ids(self, revision_ids, *, statement) -> ResultSet: return self._execute_with_retries(statement, [revision_ids]) @_prepared_statement("SELECT * FROM revision WHERE id IN ?") def revision_get(self, revision_ids, *, statement) -> ResultSet: return self._execute_with_retries(statement, [revision_ids]) @_prepared_statement("SELECT * FROM revision WHERE token(id) > ? LIMIT 1") def revision_get_random(self, *, statement) -> Optional[Row]: return self._get_random_row(statement) ########################## # 'revision_parent' table ########################## _revision_parent_keys = ["id", "parent_rank", "parent_id"] @_prepared_insert_statement("revision_parent", _revision_parent_keys) def revision_parent_add_one( self, id_: Sha1Git, parent_rank: int, parent_id: Sha1Git, *, statement ) -> None: self._execute_with_retries(statement, [id_, parent_rank, parent_id]) @_prepared_statement("SELECT parent_id FROM revision_parent WHERE id = ?") def revision_parent_get(self, revision_id: Sha1Git, *, statement) -> ResultSet: return self._execute_with_retries(statement, [revision_id]) ########################## # 'release' table ########################## _release_keys = [ "id", "target", "target_type", "date", "name", "message", "author", "synthetic", ] @_prepared_exists_statement("release") def release_missing(self, ids: List[bytes], *, statement) -> List[bytes]: return self._missing(statement, ids) @_prepared_insert_statement("release", _release_keys) def release_add_one(self, release: Dict[str, Any], *, statement) -> None: self._execute_with_retries( statement, [release[key] for key in self._release_keys] ) self._increment_counter("release", 1) @_prepared_statement("SELECT * FROM release WHERE id in ?") def release_get(self, release_ids: List[str], *, statement) -> None: return self._execute_with_retries(statement, [release_ids]) @_prepared_statement("SELECT * FROM release WHERE token(id) > ? LIMIT 1") def release_get_random(self, *, statement) -> Optional[Row]: return self._get_random_row(statement) ########################## # 'directory' table ########################## _directory_keys = ["id"] @_prepared_exists_statement("directory") def directory_missing(self, ids: List[bytes], *, statement) -> List[bytes]: return self._missing(statement, ids) @_prepared_insert_statement("directory", _directory_keys) def directory_add_one(self, directory_id: Sha1Git, *, statement) -> None: """Called after all calls to directory_entry_add_one, to commit/finalize the directory.""" self._execute_with_retries(statement, [directory_id]) self._increment_counter("directory", 1) @_prepared_statement("SELECT * FROM directory WHERE token(id) > ? LIMIT 1") def directory_get_random(self, *, statement) -> Optional[Row]: return self._get_random_row(statement) ########################## # 'directory_entry' table ########################## _directory_entry_keys = ["directory_id", "name", "type", "target", "perms"] @_prepared_insert_statement("directory_entry", _directory_entry_keys) def directory_entry_add_one(self, entry: Dict[str, Any], *, statement) -> None: self._execute_with_retries( statement, [entry[key] for key in self._directory_entry_keys] ) @_prepared_statement("SELECT * FROM directory_entry WHERE directory_id IN ?") def directory_entry_get(self, directory_ids, *, statement) -> ResultSet: return self._execute_with_retries(statement, [directory_ids]) ########################## # 'snapshot' table ########################## _snapshot_keys = ["id"] @_prepared_exists_statement("snapshot") def snapshot_missing(self, ids: List[bytes], *, statement) -> List[bytes]: return self._missing(statement, ids) @_prepared_insert_statement("snapshot", _snapshot_keys) def snapshot_add_one(self, snapshot_id: Sha1Git, *, statement) -> None: self._execute_with_retries(statement, [snapshot_id]) self._increment_counter("snapshot", 1) @_prepared_statement("SELECT * FROM snapshot WHERE id = ?") def snapshot_get(self, snapshot_id: Sha1Git, *, statement) -> ResultSet: return self._execute_with_retries(statement, [snapshot_id]) @_prepared_statement("SELECT * FROM snapshot WHERE token(id) > ? LIMIT 1") def snapshot_get_random(self, *, statement) -> Optional[Row]: return self._get_random_row(statement) ########################## # 'snapshot_branch' table ########################## _snapshot_branch_keys = ["snapshot_id", "name", "target_type", "target"] @_prepared_insert_statement("snapshot_branch", _snapshot_branch_keys) def snapshot_branch_add_one(self, branch: Dict[str, Any], *, statement) -> None: self._execute_with_retries( statement, [branch[key] for key in self._snapshot_branch_keys] ) @_prepared_statement( "SELECT ascii_bins_count(target_type) AS counts " "FROM snapshot_branch " "WHERE snapshot_id = ? " ) def snapshot_count_branches(self, snapshot_id: Sha1Git, *, statement) -> ResultSet: return self._execute_with_retries(statement, [snapshot_id]) @_prepared_statement( "SELECT * FROM snapshot_branch WHERE snapshot_id = ? AND name >= ? LIMIT ?" ) def snapshot_branch_get( self, snapshot_id: Sha1Git, from_: bytes, limit: int, *, statement ) -> None: return self._execute_with_retries(statement, [snapshot_id, from_, limit]) ########################## # 'origin' table ########################## origin_keys = ["sha1", "url", "type", "next_visit_id"] @_prepared_statement( "INSERT INTO origin (sha1, url, next_visit_id) " "VALUES (?, ?, 1) IF NOT EXISTS" ) def origin_add_one(self, origin: Origin, *, statement) -> None: self._execute_with_retries(statement, [hash_url(origin.url), origin.url]) self._increment_counter("origin", 1) @_prepared_statement("SELECT * FROM origin WHERE sha1 = ?") def origin_get_by_sha1(self, sha1: bytes, *, statement) -> ResultSet: return self._execute_with_retries(statement, [sha1]) def origin_get_by_url(self, url: str) -> ResultSet: return self.origin_get_by_sha1(hash_url(url)) @_prepared_statement( f'SELECT token(sha1) AS tok, {", ".join(origin_keys)} ' f"FROM origin WHERE token(sha1) >= ? LIMIT ?" ) def origin_list(self, start_token: int, limit: int, *, statement) -> ResultSet: return self._execute_with_retries(statement, [start_token, limit]) @_prepared_statement("SELECT * FROM origin") def origin_iter_all(self, *, statement) -> ResultSet: return self._execute_with_retries(statement, []) @_prepared_statement("SELECT next_visit_id FROM origin WHERE sha1 = ?") def _origin_get_next_visit_id(self, origin_sha1: bytes, *, statement) -> int: rows = list(self._execute_with_retries(statement, [origin_sha1])) assert len(rows) == 1 # TODO: error handling return rows[0].next_visit_id @_prepared_statement( "UPDATE origin SET next_visit_id=? WHERE sha1 = ? IF next_visit_id=?" ) def origin_generate_unique_visit_id(self, origin_url: str, *, statement) -> int: origin_sha1 = hash_url(origin_url) next_id = self._origin_get_next_visit_id(origin_sha1) while True: res = list( self._execute_with_retries( statement, [next_id + 1, origin_sha1, next_id] ) ) assert len(res) == 1 if res[0].applied: # No data race return next_id else: # Someone else updated it before we did, let's try again next_id = res[0].next_visit_id # TODO: abort after too many attempts return next_id ########################## # 'origin_visit' table ########################## _origin_visit_keys = [ "origin", "visit", "type", "date", ] @_prepared_statement( "SELECT * FROM origin_visit WHERE origin = ? AND visit > ? " "ORDER BY visit ASC" ) def _origin_visit_get_pagination_asc_no_limit( self, origin_url: str, last_visit: int, *, statement ) -> ResultSet: return self._execute_with_retries(statement, [origin_url, last_visit]) @_prepared_statement( "SELECT * FROM origin_visit WHERE origin = ? AND visit > ? " "ORDER BY visit ASC " "LIMIT ?" ) def _origin_visit_get_pagination_asc_limit( self, origin_url: str, last_visit: int, limit: int, *, statement ) -> ResultSet: return self._execute_with_retries(statement, [origin_url, last_visit, limit]) @_prepared_statement( "SELECT * FROM origin_visit WHERE origin = ? AND visit < ? " "ORDER BY visit DESC" ) def _origin_visit_get_pagination_desc_no_limit( self, origin_url: str, last_visit: int, *, statement ) -> ResultSet: return self._execute_with_retries(statement, [origin_url, last_visit]) @_prepared_statement( "SELECT * FROM origin_visit WHERE origin = ? AND visit < ? " "ORDER BY visit DESC " "LIMIT ?" ) def _origin_visit_get_pagination_desc_limit( self, origin_url: str, last_visit: int, limit: int, *, statement ) -> ResultSet: return self._execute_with_retries(statement, [origin_url, last_visit, limit]) @_prepared_statement( "SELECT * FROM origin_visit WHERE origin = ? ORDER BY visit ASC LIMIT ?" ) def _origin_visit_get_no_pagination_asc_limit( self, origin_url: str, limit: int, *, statement ) -> ResultSet: return self._execute_with_retries(statement, [origin_url, limit]) @_prepared_statement( "SELECT * FROM origin_visit WHERE origin = ? ORDER BY visit ASC " ) def _origin_visit_get_no_pagination_asc_no_limit( self, origin_url: str, *, statement ) -> ResultSet: return self._execute_with_retries(statement, [origin_url]) @_prepared_statement( "SELECT * FROM origin_visit WHERE origin = ? ORDER BY visit DESC" ) def _origin_visit_get_no_pagination_desc_no_limit( self, origin_url: str, *, statement ) -> ResultSet: return self._execute_with_retries(statement, [origin_url]) @_prepared_statement( "SELECT * FROM origin_visit WHERE origin = ? ORDER BY visit DESC LIMIT ?" ) def _origin_visit_get_no_pagination_desc_limit( self, origin_url: str, limit: int, *, statement ) -> ResultSet: return self._execute_with_retries(statement, [origin_url, limit]) def origin_visit_get( self, origin_url: str, last_visit: Optional[int], limit: Optional[int], order: str = "asc", ) -> ResultSet: order = order.lower() assert order in ["asc", "desc"] args: List[Any] = [origin_url] if last_visit is not None: page_name = "pagination" args.append(last_visit) else: page_name = "no_pagination" if limit is not None: limit_name = "limit" args.append(limit) else: limit_name = "no_limit" method_name = f"_origin_visit_get_{page_name}_{order}_{limit_name}" origin_visit_get_method = getattr(self, method_name) return origin_visit_get_method(*args) @_prepared_insert_statement("origin_visit", _origin_visit_keys) def origin_visit_add_one(self, visit: OriginVisit, *, statement) -> None: self._add_one(statement, "origin_visit", visit, self._origin_visit_keys) _origin_visit_status_keys = [ "origin", "visit", "date", "status", "snapshot", "metadata", ] @_prepared_insert_statement("origin_visit_status", _origin_visit_status_keys) def origin_visit_status_add_one( self, visit_update: OriginVisitStatus, *, statement ) -> None: assert self._origin_visit_status_keys[-1] == "metadata" keys = self._origin_visit_status_keys metadata = json.dumps(visit_update.metadata) self._execute_with_retries( statement, [getattr(visit_update, key) for key in keys[:-1]] + [metadata] ) def origin_visit_status_get_latest(self, origin: str, visit: int,) -> Optional[Row]: """Given an origin visit id, return its latest origin_visit_status """ rows = self.origin_visit_status_get(origin, visit) return rows[0] if rows else None @_prepared_statement( "SELECT * FROM origin_visit_status " "WHERE origin = ? AND visit = ? " "ORDER BY date DESC" ) def origin_visit_status_get( self, origin: str, visit: int, allowed_statuses: Optional[List[str]] = None, require_snapshot: bool = False, *, statement, ) -> List[Row]: """Return all origin visit statuses for a given visit """ return list(self._execute_with_retries(statement, [origin, visit])) @_prepared_statement("SELECT * FROM origin_visit WHERE origin = ? AND visit = ?") def origin_visit_get_one( self, origin_url: str, visit_id: int, *, statement ) -> Optional[Row]: # TODO: error handling rows = list(self._execute_with_retries(statement, [origin_url, visit_id])) if rows: return rows[0] else: return None @_prepared_statement("SELECT * FROM origin_visit WHERE origin = ?") def origin_visit_get_all(self, origin_url: str, *, statement) -> ResultSet: return self._execute_with_retries(statement, [origin_url]) @_prepared_statement("SELECT * FROM origin_visit WHERE token(origin) >= ?") def _origin_visit_iter_from(self, min_token: int, *, statement) -> Iterator[Row]: yield from self._execute_with_retries(statement, [min_token]) @_prepared_statement("SELECT * FROM origin_visit WHERE token(origin) < ?") def _origin_visit_iter_to(self, max_token: int, *, statement) -> Iterator[Row]: yield from self._execute_with_retries(statement, [max_token]) def origin_visit_iter(self, start_token: int) -> Iterator[Row]: """Returns all origin visits in order from this token, and wraps around the token space.""" yield from self._origin_visit_iter_from(start_token) yield from self._origin_visit_iter_to(start_token) ########################## # 'metadata_authority' table ########################## _metadata_authority_keys = ["url", "type", "metadata"] @_prepared_insert_statement("metadata_authority", _metadata_authority_keys) def metadata_authority_add(self, url, type, metadata, *, statement): return self._execute_with_retries(statement, [url, type, metadata]) @_prepared_statement("SELECT * from metadata_authority WHERE type = ? AND url = ?") def metadata_authority_get(self, type, url, *, statement) -> Optional[Row]: return next(iter(self._execute_with_retries(statement, [type, url])), None) ########################## # 'metadata_fetcher' table ########################## _metadata_fetcher_keys = ["name", "version", "metadata"] @_prepared_insert_statement("metadata_fetcher", _metadata_fetcher_keys) def metadata_fetcher_add(self, name, version, metadata, *, statement): return self._execute_with_retries(statement, [name, version, metadata]) @_prepared_statement( "SELECT * from metadata_fetcher WHERE name = ? AND version = ?" ) def metadata_fetcher_get(self, name, version, *, statement) -> Optional[Row]: return next(iter(self._execute_with_retries(statement, [name, version])), None) ######################### # 'object_metadata' table ######################### _object_metadata_keys = [ "type", "id", "authority_type", "authority_url", "discovery_date", "fetcher_name", "fetcher_version", "format", "metadata", + "origin", + "visit", + "snapshot", + "release", + "revision", + "path", + "directory", ] @_prepared_statement( f"INSERT INTO object_metadata ({', '.join(_object_metadata_keys)}) " f"VALUES ({', '.join('?' for _ in _object_metadata_keys)})" ) def object_metadata_add( self, object_type: str, id: str, authority_type, authority_url, discovery_date, fetcher_name, fetcher_version, format, metadata, + context: Dict[str, Union[str, bytes, int]], *, statement, ): params = [ object_type, id, authority_type, authority_url, discovery_date, fetcher_name, fetcher_version, format, metadata, ] return self._execute_with_retries(statement, params,) @_prepared_statement( "SELECT * from object_metadata " "WHERE id=? AND authority_url=? AND discovery_date>? AND authority_type=?" ) def object_metadata_get_after_date( self, id: str, authority_type: str, authority_url: str, after: datetime.datetime, *, statement, ): return self._execute_with_retries( statement, [id, authority_url, after, authority_type] ) @_prepared_statement( "SELECT * from object_metadata " "WHERE id=? AND authority_type=? AND authority_url=? " "AND (discovery_date, fetcher_name, fetcher_version) > (?, ?, ?)" ) def object_metadata_get_after_date_and_fetcher( self, id: str, authority_type: str, authority_url: str, after_date: datetime.datetime, after_fetcher_name: str, after_fetcher_version: str, *, statement, ): return self._execute_with_retries( statement, [ id, authority_type, authority_url, after_date, after_fetcher_name, after_fetcher_version, ], ) @_prepared_statement( "SELECT * from object_metadata " "WHERE id=? AND authority_url=? AND authority_type=?" ) def object_metadata_get( self, id: str, authority_type: str, authority_url: str, *, statement ) -> Iterable[Row]: return self._execute_with_retries( statement, [id, authority_url, authority_type] ) ########################## # Miscellaneous ########################## @_prepared_statement("SELECT uuid() FROM revision LIMIT 1;") def check_read(self, *, statement): self._execute_with_retries(statement, []) @_prepared_statement( "SELECT object_type, count FROM object_count WHERE partition_key=0" ) def stat_counters(self, *, statement) -> ResultSet: return self._execute_with_retries(statement, []) diff --git a/swh/storage/cassandra/schema.py b/swh/storage/cassandra/schema.py index 89d5d7ee..01bdecec 100644 --- a/swh/storage/cassandra/schema.py +++ b/swh/storage/cassandra/schema.py @@ -1,273 +1,282 @@ # Copyright (C) 2019-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information CREATE_TABLES_QUERIES = """ CREATE OR REPLACE FUNCTION ascii_bins_count_sfunc ( state tuple>, -- (nb_none, map) bin_name ascii ) CALLED ON NULL INPUT RETURNS tuple> LANGUAGE java AS $$ if (bin_name == null) { state.setInt(0, state.getInt(0) + 1); } else { Map counters = state.getMap( 1, String.class, Integer.class); Integer nb = counters.get(bin_name); if (nb == null) { nb = 0; } counters.put(bin_name, nb + 1); state.setMap(1, counters, String.class, Integer.class); } return state; $$ ; CREATE OR REPLACE AGGREGATE ascii_bins_count ( ascii ) SFUNC ascii_bins_count_sfunc STYPE tuple> INITCOND (0, {}) ; CREATE TYPE IF NOT EXISTS microtimestamp ( seconds bigint, microseconds int ); CREATE TYPE IF NOT EXISTS microtimestamp_with_timezone ( timestamp frozen, offset smallint, negative_utc boolean ); CREATE TYPE IF NOT EXISTS person ( fullname blob, name blob, email blob ); CREATE TABLE IF NOT EXISTS content ( sha1 blob, sha1_git blob, sha256 blob, blake2s256 blob, length bigint, ctime timestamp, -- creation time, i.e. time of (first) injection into the storage status ascii, PRIMARY KEY ((sha1, sha1_git, sha256, blake2s256)) ); CREATE TABLE IF NOT EXISTS skipped_content ( sha1 blob, sha1_git blob, sha256 blob, blake2s256 blob, length bigint, ctime timestamp, -- creation time, i.e. time of (first) injection into the storage status ascii, reason text, origin text, PRIMARY KEY ((sha1, sha1_git, sha256, blake2s256)) ); CREATE TABLE IF NOT EXISTS revision ( id blob PRIMARY KEY, date microtimestamp_with_timezone, committer_date microtimestamp_with_timezone, type ascii, directory blob, -- source code "root" directory message blob, author person, committer person, synthetic boolean, -- true iff revision has been created by Software Heritage metadata text -- extra metadata as JSON(tarball checksums, -- extra commit information, etc...) ); CREATE TABLE IF NOT EXISTS revision_parent ( id blob, parent_rank int, -- parent position in merge commits, 0-based parent_id blob, PRIMARY KEY ((id), parent_rank) ); CREATE TABLE IF NOT EXISTS release ( id blob PRIMARY KEY, target_type ascii, target blob, date microtimestamp_with_timezone, name blob, message blob, author person, synthetic boolean, -- true iff release has been created by Software Heritage ); CREATE TABLE IF NOT EXISTS directory ( id blob PRIMARY KEY, ); CREATE TABLE IF NOT EXISTS directory_entry ( directory_id blob, name blob, -- path name, relative to containing dir target blob, perms int, -- unix-like permissions type ascii, -- target type PRIMARY KEY ((directory_id), name) ); CREATE TABLE IF NOT EXISTS snapshot ( id blob PRIMARY KEY, ); -- For a given snapshot_id, branches are sorted by their name, -- allowing easy pagination. CREATE TABLE IF NOT EXISTS snapshot_branch ( snapshot_id blob, name blob, target_type ascii, target blob, PRIMARY KEY ((snapshot_id), name) ); CREATE TABLE IF NOT EXISTS origin_visit ( origin text, visit bigint, date timestamp, type text, PRIMARY KEY ((origin), visit) ); CREATE TABLE IF NOT EXISTS origin_visit_status ( origin text, visit bigint, date timestamp, status ascii, metadata text, snapshot blob, PRIMARY KEY ((origin), visit, date) ); CREATE TABLE IF NOT EXISTS origin ( sha1 blob PRIMARY KEY, url text, type text, next_visit_id int, -- We need integer visit ids for compatibility with the pgsql -- storage, so we're using lightweight transactions with this trick: -- https://stackoverflow.com/a/29391877/539465 ); CREATE TABLE IF NOT EXISTS metadata_authority ( url text, type ascii, metadata text, PRIMARY KEY ((url), type) ); CREATE TABLE IF NOT EXISTS metadata_fetcher ( name ascii, version ascii, metadata text, PRIMARY KEY ((name), version) ); CREATE TABLE IF NOT EXISTS object_metadata ( type text, id text, -- metadata source authority_type text, authority_url text, discovery_date timestamp, fetcher_name ascii, fetcher_version ascii, -- metadata itself format ascii, metadata blob, + -- context + origin text, + visit bigint, + snapshot text, + release text, + revision text, + path blob, + directory text, + PRIMARY KEY ((id), authority_type, authority_url, discovery_date, fetcher_name, fetcher_version) ); CREATE TABLE IF NOT EXISTS object_count ( partition_key smallint, -- Constant, must always be 0 object_type ascii, count counter, PRIMARY KEY ((partition_key), object_type) ); """.split( "\n\n\n" ) CONTENT_INDEX_TEMPLATE = """ -- Secondary table, used for looking up "content" from a single hash CREATE TABLE IF NOT EXISTS content_by_{main_algo} ( {main_algo} blob, target_token bigint, -- value of token(pk) on the "primary" table PRIMARY KEY (({main_algo}), target_token) ); CREATE TABLE IF NOT EXISTS skipped_content_by_{main_algo} ( {main_algo} blob, target_token bigint, -- value of token(pk) on the "primary" table PRIMARY KEY (({main_algo}), target_token) ); """ TABLES = ( "skipped_content content revision revision_parent release " "directory directory_entry snapshot snapshot_branch " "origin_visit origin object_metadata object_count " "origin_visit_status metadata_authority " "metadata_fetcher" ).split() HASH_ALGORITHMS = ["sha1", "sha1_git", "sha256", "blake2s256"] for main_algo in HASH_ALGORITHMS: CREATE_TABLES_QUERIES.extend( CONTENT_INDEX_TEMPLATE.format( main_algo=main_algo, other_algos=", ".join( [algo for algo in HASH_ALGORITHMS if algo != main_algo] ), ).split("\n\n") ) TABLES.append("content_by_%s" % main_algo) TABLES.append("skipped_content_by_%s" % main_algo) diff --git a/swh/storage/cassandra/storage.py b/swh/storage/cassandra/storage.py index b8656b1d..56ffcc12 100644 --- a/swh/storage/cassandra/storage.py +++ b/swh/storage/cassandra/storage.py @@ -1,1202 +1,1225 @@ # Copyright (C) 2019-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime import itertools import json import random import re -from typing import Any, Dict, List, Iterable, Optional +from typing import Any, Dict, List, Iterable, Optional, Union import attr from deprecated import deprecated from swh.core.api.serializers import msgpack_loads, msgpack_dumps from swh.model.model import ( Revision, Release, Directory, DirectoryEntry, Content, SkippedContent, OriginVisit, OriginVisitStatus, Snapshot, Origin, ) from swh.model.hashutil import DEFAULT_ALGORITHMS from swh.storage.objstorage import ObjStorage from swh.storage.writer import JournalWriter from swh.storage.utils import now from ..exc import StorageArgumentException, HashCollision +from ..extrinsic_metadata import check_extrinsic_metadata_context, CONTEXT_KEYS from .common import TOKEN_BEGIN, TOKEN_END from .converters import ( revision_to_db, revision_from_db, release_to_db, release_from_db, row_to_visit_status, ) from .cql import CqlRunner from .schema import HASH_ALGORITHMS # Max block size of contents to return BULK_BLOCK_CONTENT_LEN_MAX = 10000 class CassandraStorage: def __init__(self, hosts, keyspace, objstorage, port=9042, journal_writer=None): self._cql_runner = CqlRunner(hosts, keyspace, port) self.journal_writer = JournalWriter(journal_writer) self.objstorage = ObjStorage(objstorage) def check_config(self, *, check_write): self._cql_runner.check_read() return True def _content_get_from_hash(self, algo, hash_) -> Iterable: """From the name of a hash algorithm and a value of that hash, looks up the "hash -> token" secondary table (content_by_{algo}) to get tokens. Then, looks up the main table (content) to get all contents with that token, and filters out contents whose hash doesn't match.""" found_tokens = self._cql_runner.content_get_tokens_from_single_hash(algo, hash_) for token in found_tokens: # Query the main table ('content'). res = self._cql_runner.content_get_from_token(token) for row in res: # re-check the the hash (in case of murmur3 collision) if getattr(row, algo) == hash_: yield row def _content_add(self, contents: List[Content], with_data: bool) -> Dict: # Filter-out content already in the database. contents = [ c for c in contents if not self._cql_runner.content_get_from_pk(c.to_dict()) ] self.journal_writer.content_add(contents) if with_data: # First insert to the objstorage, if the endpoint is # `content_add` (as opposed to `content_add_metadata`). # TODO: this should probably be done in concurrently to inserting # in index tables (but still before the main table; so an entry is # only added to the main table after everything else was # successfully inserted. summary = self.objstorage.content_add( c for c in contents if c.status != "absent" ) content_add_bytes = summary["content:add:bytes"] content_add = 0 for content in contents: content_add += 1 # Check for sha1 or sha1_git collisions. This test is not atomic # with the insertion, so it won't detect a collision if both # contents are inserted at the same time, but it's good enough. # # The proper way to do it would probably be a BATCH, but this # would be inefficient because of the number of partitions we # need to affect (len(HASH_ALGORITHMS)+1, which is currently 5) for algo in {"sha1", "sha1_git"}: collisions = [] # Get tokens of 'content' rows with the same value for # sha1/sha1_git rows = self._content_get_from_hash(algo, content.get_hash(algo)) for row in rows: if getattr(row, algo) != content.get_hash(algo): # collision of token(partition key), ignore this # row continue for algo in HASH_ALGORITHMS: if getattr(row, algo) != content.get_hash(algo): # This hash didn't match; discard the row. collisions.append( {algo: getattr(row, algo) for algo in HASH_ALGORITHMS} ) if collisions: collisions.append(content.hashes()) raise HashCollision(algo, content.get_hash(algo), collisions) (token, insertion_finalizer) = self._cql_runner.content_add_prepare(content) # Then add to index tables for algo in HASH_ALGORITHMS: self._cql_runner.content_index_add_one(algo, content, token) # Then to the main table insertion_finalizer() summary = { "content:add": content_add, } if with_data: summary["content:add:bytes"] = content_add_bytes return summary def content_add(self, content: Iterable[Content]) -> Dict: contents = [attr.evolve(c, ctime=now()) for c in content] return self._content_add(list(contents), with_data=True) def content_update(self, content, keys=[]): raise NotImplementedError( "content_update is not supported by the Cassandra backend" ) def content_add_metadata(self, content: Iterable[Content]) -> Dict: return self._content_add(list(content), with_data=False) def content_get(self, content): if len(content) > BULK_BLOCK_CONTENT_LEN_MAX: raise StorageArgumentException( "Sending at most %s contents." % BULK_BLOCK_CONTENT_LEN_MAX ) yield from self.objstorage.content_get(content) def content_get_partition( self, partition_id: int, nb_partitions: int, limit: int = 1000, page_token: str = None, ): if limit is None: raise StorageArgumentException("limit should not be None") # Compute start and end of the range of tokens covered by the # requested partition partition_size = (TOKEN_END - TOKEN_BEGIN) // nb_partitions range_start = TOKEN_BEGIN + partition_id * partition_size range_end = TOKEN_BEGIN + (partition_id + 1) * partition_size # offset the range start according to the `page_token`. if page_token is not None: if not (range_start <= int(page_token) <= range_end): raise StorageArgumentException("Invalid page_token.") range_start = int(page_token) # Get the first rows of the range rows = self._cql_runner.content_get_token_range(range_start, range_end, limit) rows = list(rows) if len(rows) == limit: next_page_token: Optional[str] = str(rows[-1].tok + 1) else: next_page_token = None return { "contents": [row._asdict() for row in rows if row.status != "absent"], "next_page_token": next_page_token, } def content_get_metadata(self, contents: List[bytes]) -> Dict[bytes, List[Dict]]: result: Dict[bytes, List[Dict]] = {sha1: [] for sha1 in contents} for sha1 in contents: # Get all (sha1, sha1_git, sha256, blake2s256) whose sha1 # matches the argument, from the index table ('content_by_sha1') for row in self._content_get_from_hash("sha1", sha1): content_metadata = row._asdict() content_metadata.pop("ctime") result[content_metadata["sha1"]].append(content_metadata) return result def content_find(self, content): # Find an algorithm that is common to all the requested contents. # It will be used to do an initial filtering efficiently. filter_algos = list(set(content).intersection(HASH_ALGORITHMS)) if not filter_algos: raise StorageArgumentException( "content keys must contain at least one of: " "%s" % ", ".join(sorted(HASH_ALGORITHMS)) ) common_algo = filter_algos[0] results = [] rows = self._content_get_from_hash(common_algo, content[common_algo]) for row in rows: # Re-check all the hashes, in case of collisions (either of the # hash of the partition key, or the hashes in it) for algo in HASH_ALGORITHMS: if content.get(algo) and getattr(row, algo) != content[algo]: # This hash didn't match; discard the row. break else: # All hashes match, keep this row. results.append( { **row._asdict(), "ctime": row.ctime.replace(tzinfo=datetime.timezone.utc), } ) return results def content_missing(self, content, key_hash="sha1"): for cont in content: res = self.content_find(cont) if not res: yield cont[key_hash] if any(c["status"] == "missing" for c in res): yield cont[key_hash] def content_missing_per_sha1(self, contents): return self.content_missing([{"sha1": c for c in contents}]) def content_missing_per_sha1_git(self, contents): return self.content_missing( [{"sha1_git": c for c in contents}], key_hash="sha1_git" ) def content_get_random(self): return self._cql_runner.content_get_random().sha1_git def _skipped_content_get_from_hash(self, algo, hash_) -> Iterable: """From the name of a hash algorithm and a value of that hash, looks up the "hash -> token" secondary table (skipped_content_by_{algo}) to get tokens. Then, looks up the main table (content) to get all contents with that token, and filters out contents whose hash doesn't match.""" found_tokens = self._cql_runner.skipped_content_get_tokens_from_single_hash( algo, hash_ ) for token in found_tokens: # Query the main table ('content'). res = self._cql_runner.skipped_content_get_from_token(token) for row in res: # re-check the the hash (in case of murmur3 collision) if getattr(row, algo) == hash_: yield row def _skipped_content_add(self, contents: Iterable[SkippedContent]) -> Dict: # Filter-out content already in the database. contents = [ c for c in contents if not self._cql_runner.skipped_content_get_from_pk(c.to_dict()) ] self.journal_writer.skipped_content_add(contents) for content in contents: # Compute token of the row in the main table (token, insertion_finalizer) = self._cql_runner.skipped_content_add_prepare( content ) # Then add to index tables for algo in HASH_ALGORITHMS: self._cql_runner.skipped_content_index_add_one(algo, content, token) # Then to the main table insertion_finalizer() return {"skipped_content:add": len(contents)} def skipped_content_add(self, content: Iterable[SkippedContent]) -> Dict: contents = [attr.evolve(c, ctime=now()) for c in content] return self._skipped_content_add(contents) def skipped_content_missing(self, contents): for content in contents: if not self._cql_runner.skipped_content_get_from_pk(content): yield {algo: content[algo] for algo in DEFAULT_ALGORITHMS} def directory_add(self, directories: Iterable[Directory]) -> Dict: directories = list(directories) # Filter out directories that are already inserted. missing = self.directory_missing([dir_.id for dir_ in directories]) directories = [dir_ for dir_ in directories if dir_.id in missing] self.journal_writer.directory_add(directories) for directory in directories: # Add directory entries to the 'directory_entry' table for entry in directory.entries: self._cql_runner.directory_entry_add_one( {**entry.to_dict(), "directory_id": directory.id} ) # Add the directory *after* adding all the entries, so someone # calling snapshot_get_branch in the meantime won't end up # with half the entries. self._cql_runner.directory_add_one(directory.id) return {"directory:add": len(missing)} def directory_missing(self, directories): return self._cql_runner.directory_missing(directories) def _join_dentry_to_content(self, dentry): keys = ( "status", "sha1", "sha1_git", "sha256", "length", ) ret = dict.fromkeys(keys) ret.update(dentry.to_dict()) if ret["type"] == "file": content = self.content_find({"sha1_git": ret["target"]}) if content: content = content[0] for key in keys: ret[key] = content[key] return ret def _directory_ls(self, directory_id, recursive, prefix=b""): if self.directory_missing([directory_id]): return rows = list(self._cql_runner.directory_entry_get([directory_id])) for row in rows: # Build and yield the directory entry dict entry = row._asdict() del entry["directory_id"] entry = DirectoryEntry.from_dict(entry) ret = self._join_dentry_to_content(entry) ret["name"] = prefix + ret["name"] ret["dir_id"] = directory_id yield ret if recursive and ret["type"] == "dir": yield from self._directory_ls( ret["target"], True, prefix + ret["name"] + b"/" ) def directory_entry_get_by_path(self, directory, paths): return self._directory_entry_get_by_path(directory, paths, b"") def _directory_entry_get_by_path(self, directory, paths, prefix): if not paths: return contents = list(self.directory_ls(directory)) if not contents: return def _get_entry(entries, name): """Finds the entry with the requested name, prepends the prefix (to get its full path), and returns it. If no entry has that name, returns None.""" for entry in entries: if entry["name"] == name: entry = entry.copy() entry["name"] = prefix + entry["name"] return entry first_item = _get_entry(contents, paths[0]) if len(paths) == 1: return first_item if not first_item or first_item["type"] != "dir": return return self._directory_entry_get_by_path( first_item["target"], paths[1:], prefix + paths[0] + b"/" ) def directory_ls(self, directory, recursive=False): yield from self._directory_ls(directory, recursive) def directory_get_random(self): return self._cql_runner.directory_get_random().id def revision_add(self, revisions: Iterable[Revision]) -> Dict: revisions = list(revisions) # Filter-out revisions already in the database missing = self.revision_missing([rev.id for rev in revisions]) revisions = [rev for rev in revisions if rev.id in missing] self.journal_writer.revision_add(revisions) for revision in revisions: revobject = revision_to_db(revision) if revobject: # Add parents first for (rank, parent) in enumerate(revobject["parents"]): self._cql_runner.revision_parent_add_one( revobject["id"], rank, parent ) # Then write the main revision row. # Writing this after all parents were written ensures that # read endpoints don't return a partial view while writing # the parents self._cql_runner.revision_add_one(revobject) return {"revision:add": len(revisions)} def revision_missing(self, revisions): return self._cql_runner.revision_missing(revisions) def revision_get(self, revisions): rows = self._cql_runner.revision_get(revisions) revs = {} for row in rows: # TODO: use a single query to get all parents? # (it might have lower latency, but requires more code and more # bandwidth, because revision id would be part of each returned # row) parent_rows = self._cql_runner.revision_parent_get(row.id) # parent_rank is the clustering key, so results are already # sorted by rank. parents = tuple(row.parent_id for row in parent_rows) rev = revision_from_db(row, parents=parents) revs[rev.id] = rev.to_dict() for rev_id in revisions: yield revs.get(rev_id) def _get_parent_revs(self, rev_ids, seen, limit, short): if limit and len(seen) >= limit: return rev_ids = [id_ for id_ in rev_ids if id_ not in seen] if not rev_ids: return seen |= set(rev_ids) # We need this query, even if short=True, to return consistent # results (ie. not return only a subset of a revision's parents # if it is being written) if short: rows = self._cql_runner.revision_get_ids(rev_ids) else: rows = self._cql_runner.revision_get(rev_ids) for row in rows: # TODO: use a single query to get all parents? # (it might have less latency, but requires less code and more # bandwidth (because revision id would be part of each returned # row) parent_rows = self._cql_runner.revision_parent_get(row.id) # parent_rank is the clustering key, so results are already # sorted by rank. parents = tuple(row.parent_id for row in parent_rows) if short: yield (row.id, parents) else: rev = revision_from_db(row, parents=parents) yield rev.to_dict() yield from self._get_parent_revs(parents, seen, limit, short) def revision_log(self, revisions, limit=None): seen = set() yield from self._get_parent_revs(revisions, seen, limit, False) def revision_shortlog(self, revisions, limit=None): seen = set() yield from self._get_parent_revs(revisions, seen, limit, True) def revision_get_random(self): return self._cql_runner.revision_get_random().id def release_add(self, releases: Iterable[Release]) -> Dict: to_add = [] for rel in releases: if rel not in to_add: to_add.append(rel) missing = set(self.release_missing([rel.id for rel in to_add])) to_add = [rel for rel in to_add if rel.id in missing] self.journal_writer.release_add(to_add) for release in to_add: if release: self._cql_runner.release_add_one(release_to_db(release)) return {"release:add": len(to_add)} def release_missing(self, releases): return self._cql_runner.release_missing(releases) def release_get(self, releases): rows = self._cql_runner.release_get(releases) rels = {} for row in rows: release = release_from_db(row) rels[row.id] = release.to_dict() for rel_id in releases: yield rels.get(rel_id) def release_get_random(self): return self._cql_runner.release_get_random().id def snapshot_add(self, snapshots: Iterable[Snapshot]) -> Dict: missing = self._cql_runner.snapshot_missing([snp.id for snp in snapshots]) snapshots = [snp for snp in snapshots if snp.id in missing] for snapshot in snapshots: self.journal_writer.snapshot_add([snapshot]) # Add branches for (branch_name, branch) in snapshot.branches.items(): if branch is None: target_type = None target = None else: target_type = branch.target_type.value target = branch.target self._cql_runner.snapshot_branch_add_one( { "snapshot_id": snapshot.id, "name": branch_name, "target_type": target_type, "target": target, } ) # Add the snapshot *after* adding all the branches, so someone # calling snapshot_get_branch in the meantime won't end up # with half the branches. self._cql_runner.snapshot_add_one(snapshot.id) return {"snapshot:add": len(snapshots)} def snapshot_missing(self, snapshots): return self._cql_runner.snapshot_missing(snapshots) def snapshot_get(self, snapshot_id): return self.snapshot_get_branches(snapshot_id) def snapshot_get_by_origin_visit(self, origin, visit): try: visit = self.origin_visit_get_by(origin, visit) except IndexError: return None return self.snapshot_get(visit["snapshot"]) def snapshot_count_branches(self, snapshot_id): if self._cql_runner.snapshot_missing([snapshot_id]): # Makes sure we don't fetch branches for a snapshot that is # being added. return None rows = list(self._cql_runner.snapshot_count_branches(snapshot_id)) assert len(rows) == 1 (nb_none, counts) = rows[0].counts counts = dict(counts) if nb_none: counts[None] = nb_none return counts def snapshot_get_branches( self, snapshot_id, branches_from=b"", branches_count=1000, target_types=None ): if self._cql_runner.snapshot_missing([snapshot_id]): # Makes sure we don't fetch branches for a snapshot that is # being added. return None branches = [] while len(branches) < branches_count + 1: new_branches = list( self._cql_runner.snapshot_branch_get( snapshot_id, branches_from, branches_count + 1 ) ) if not new_branches: break branches_from = new_branches[-1].name new_branches_filtered = new_branches # Filter by target_type if target_types: new_branches_filtered = [ branch for branch in new_branches_filtered if branch.target is not None and branch.target_type in target_types ] branches.extend(new_branches_filtered) if len(new_branches) < branches_count + 1: break if len(branches) > branches_count: last_branch = branches.pop(-1).name else: last_branch = None branches = { branch.name: {"target": branch.target, "target_type": branch.target_type,} if branch.target else None for branch in branches } return { "id": snapshot_id, "branches": branches, "next_branch": last_branch, } def snapshot_get_random(self): return self._cql_runner.snapshot_get_random().id def object_find_by_sha1_git(self, ids): results = {id_: [] for id_ in ids} missing_ids = set(ids) # Mind the order, revision is the most likely one for a given ID, # so we check revisions first. queries = [ ("revision", self._cql_runner.revision_missing), ("release", self._cql_runner.release_missing), ("content", self._cql_runner.content_missing_by_sha1_git), ("directory", self._cql_runner.directory_missing), ] for (object_type, query_fn) in queries: found_ids = missing_ids - set(query_fn(missing_ids)) for sha1_git in found_ids: results[sha1_git].append( {"sha1_git": sha1_git, "type": object_type,} ) missing_ids.remove(sha1_git) if not missing_ids: # We found everything, skipping the next queries. break return results def origin_get(self, origins): if isinstance(origins, dict): # Old API return_single = True origins = [origins] else: return_single = False if any("id" in origin for origin in origins): raise StorageArgumentException("Origin ids are not supported.") results = [self.origin_get_one(origin) for origin in origins] if return_single: assert len(results) == 1 return results[0] else: return results def origin_get_one(self, origin: Dict[str, Any]) -> Optional[Dict[str, Any]]: if "id" in origin: raise StorageArgumentException("Origin ids are not supported.") if "url" not in origin: raise StorageArgumentException("Missing origin url") rows = self._cql_runner.origin_get_by_url(origin["url"]) rows = list(rows) if rows: assert len(rows) == 1 result = rows[0]._asdict() return { "url": result["url"], } else: return None def origin_get_by_sha1(self, sha1s): results = [] for sha1 in sha1s: rows = self._cql_runner.origin_get_by_sha1(sha1) if rows: results.append({"url": rows.one().url}) else: results.append(None) return results def origin_list(self, page_token: Optional[str] = None, limit: int = 100) -> dict: # Compute what token to begin the listing from start_token = TOKEN_BEGIN if page_token: start_token = int(page_token) if not (TOKEN_BEGIN <= start_token <= TOKEN_END): raise StorageArgumentException("Invalid page_token.") rows = self._cql_runner.origin_list(start_token, limit) rows = list(rows) if len(rows) == limit: next_page_token: Optional[str] = str(rows[-1].tok + 1) else: next_page_token = None return { "origins": [{"url": row.url} for row in rows], "next_page_token": next_page_token, } def origin_search( self, url_pattern, offset=0, limit=50, regexp=False, with_visit=False ): # TODO: remove this endpoint, swh-search should be used instead. origins = self._cql_runner.origin_iter_all() if regexp: pat = re.compile(url_pattern) origins = [orig for orig in origins if pat.search(orig.url)] else: origins = [orig for orig in origins if url_pattern in orig.url] if with_visit: origins = [orig for orig in origins if orig.next_visit_id > 1] return [{"url": orig.url,} for orig in origins[offset : offset + limit]] def origin_add(self, origins: Iterable[Origin]) -> Dict[str, int]: known_origins = [ Origin.from_dict(d) for d in self.origin_get([origin.to_dict() for origin in origins]) if d is not None ] to_add = [origin for origin in origins if origin not in known_origins] self.journal_writer.origin_add(to_add) for origin in to_add: self._cql_runner.origin_add_one(origin) return {"origin:add": len(to_add)} @deprecated("Use origin_add([origin]) instead") def origin_add_one(self, origin: Origin) -> str: known_origin = self.origin_get_one(origin.to_dict()) if known_origin: origin_url = known_origin["url"] else: self.journal_writer.origin_add([origin]) self._cql_runner.origin_add_one(origin) origin_url = origin.url return origin_url def origin_visit_add(self, visits: Iterable[OriginVisit]) -> Iterable[OriginVisit]: for visit in visits: origin = self.origin_get({"url": visit.origin}) if not origin: # Cannot add a visit without an origin raise StorageArgumentException("Unknown origin %s", visit.origin) all_visits = [] nb_visits = 0 for visit in visits: nb_visits += 1 if not visit.visit: visit_id = self._cql_runner.origin_generate_unique_visit_id( visit.origin ) visit = attr.evolve(visit, visit=visit_id) self.journal_writer.origin_visit_add([visit]) self._cql_runner.origin_visit_add_one(visit) assert visit.visit is not None all_visits.append(visit) self._origin_visit_status_add( OriginVisitStatus( origin=visit.origin, visit=visit.visit, date=visit.date, status="created", snapshot=None, ) ) return all_visits def _origin_visit_status_add(self, visit_status: OriginVisitStatus) -> None: """Add an origin visit status""" self.journal_writer.origin_visit_status_add([visit_status]) self._cql_runner.origin_visit_status_add_one(visit_status) def origin_visit_status_add( self, visit_statuses: Iterable[OriginVisitStatus] ) -> None: # First round to check existence (fail early if any is ko) for visit_status in visit_statuses: origin_url = self.origin_get({"url": visit_status.origin}) if not origin_url: raise StorageArgumentException(f"Unknown origin {visit_status.origin}") for visit_status in visit_statuses: self._origin_visit_status_add(visit_status) def _origin_visit_merge( self, visit: Dict[str, Any], visit_status: OriginVisitStatus, ) -> Dict[str, Any]: """Merge origin_visit and visit_status together. """ return OriginVisit.from_dict( { # default to the values in visit **visit, # override with the last update **visit_status.to_dict(), # visit['origin'] is the URL (via a join), while # visit_status['origin'] is only an id. "origin": visit["origin"], # but keep the date of the creation of the origin visit "date": visit["date"], } ).to_dict() def _origin_visit_apply_last_status(self, visit: Dict[str, Any]) -> Dict[str, Any]: """Retrieve the latest visit status information for the origin visit. Then merge it with the visit and return it. """ row = self._cql_runner.origin_visit_status_get_latest( visit["origin"], visit["visit"] ) assert row is not None return self._origin_visit_merge(visit, row_to_visit_status(row)) def _origin_visit_get_updated(self, origin: str, visit_id: int) -> Dict[str, Any]: """Retrieve origin visit and latest origin visit status and merge them into an origin visit. """ row_visit = self._cql_runner.origin_visit_get_one(origin, visit_id) assert row_visit is not None visit = self._format_origin_visit_row(row_visit) return self._origin_visit_apply_last_status(visit) @staticmethod def _format_origin_visit_row(visit): return { **visit._asdict(), "origin": visit.origin, "date": visit.date.replace(tzinfo=datetime.timezone.utc), } def origin_visit_get( self, origin: str, last_visit: Optional[int] = None, limit: Optional[int] = None, order: str = "asc", ) -> Iterable[Dict[str, Any]]: rows = self._cql_runner.origin_visit_get(origin, last_visit, limit, order) for row in rows: visit = self._format_origin_visit_row(row) yield self._origin_visit_apply_last_status(visit) def origin_visit_find_by_date( self, origin: str, visit_date: datetime.datetime ) -> Optional[Dict[str, Any]]: # Iterator over all the visits of the origin # This should be ok for now, as there aren't too many visits # per origin. rows = list(self._cql_runner.origin_visit_get_all(origin)) def key(visit): dt = visit.date.replace(tzinfo=datetime.timezone.utc) - visit_date return (abs(dt), -visit.visit) if rows: row = min(rows, key=key) visit = self._format_origin_visit_row(row) return self._origin_visit_apply_last_status(visit) return None def origin_visit_get_by(self, origin: str, visit: int) -> Optional[Dict[str, Any]]: row = self._cql_runner.origin_visit_get_one(origin, visit) if row: visit_ = self._format_origin_visit_row(row) return self._origin_visit_apply_last_status(visit_) return None def origin_visit_get_latest( self, origin: str, type: Optional[str] = None, allowed_statuses: Optional[List[str]] = None, require_snapshot: bool = False, ) -> Optional[Dict[str, Any]]: # TODO: Do not fetch all visits rows = self._cql_runner.origin_visit_get_all(origin) latest_visit = None for row in rows: visit = self._format_origin_visit_row(row) updated_visit = self._origin_visit_apply_last_status(visit) if type is not None and updated_visit["type"] != type: continue if allowed_statuses and updated_visit["status"] not in allowed_statuses: continue if require_snapshot and updated_visit["snapshot"] is None: continue # updated_visit is a candidate if latest_visit is not None: if updated_visit["date"] < latest_visit["date"]: continue if updated_visit["visit"] < latest_visit["visit"]: continue latest_visit = updated_visit return latest_visit def origin_visit_status_get_latest( self, origin_url: str, visit: int, allowed_statuses: Optional[List[str]] = None, require_snapshot: bool = False, ) -> Optional[OriginVisitStatus]: rows = self._cql_runner.origin_visit_status_get( origin_url, visit, allowed_statuses, require_snapshot ) # filtering is done python side as we cannot do it server side if allowed_statuses: rows = [row for row in rows if row.status in allowed_statuses] if require_snapshot: rows = [row for row in rows if row.snapshot is not None] if not rows: return None return row_to_visit_status(rows[0]) def origin_visit_get_random(self, type: str) -> Optional[Dict[str, Any]]: back_in_the_day = now() - datetime.timedelta(weeks=12) # 3 months back # Random position to start iteration at start_token = random.randint(TOKEN_BEGIN, TOKEN_END) # Iterator over all visits, ordered by token(origins) then visit_id rows = self._cql_runner.origin_visit_iter(start_token) for row in rows: visit = self._format_origin_visit_row(row) visit_status = self._origin_visit_apply_last_status(visit) if ( visit_status["date"] > back_in_the_day and visit_status["status"] == "full" ): return visit_status else: return None def stat_counters(self): rows = self._cql_runner.stat_counters() keys = ( "content", "directory", "origin", "origin_visit", "release", "revision", "skipped_content", "snapshot", ) stats = {key: 0 for key in keys} stats.update({row.object_type: row.count for row in rows}) return stats def refresh_stat_counters(self): pass def origin_metadata_add( self, origin_url: str, discovery_date: datetime.datetime, authority: Dict[str, Any], fetcher: Dict[str, Any], format: str, metadata: bytes, ) -> None: if not isinstance(origin_url, str): raise StorageArgumentException( "origin_url must be str, not %r" % (origin_url,) ) + + context: Dict[str, Union[str, bytes, int]] = {} # origins have no context + self._object_metadata_add( - "origin", origin_url, discovery_date, authority, fetcher, format, metadata, + "origin", + origin_url, + discovery_date, + authority, + fetcher, + format, + metadata, + context, ) def origin_metadata_get( self, origin_url: str, authority: Dict[str, str], after: Optional[datetime.datetime] = None, page_token: Optional[bytes] = None, limit: int = 1000, ) -> Dict[str, Any]: if not isinstance(origin_url, str): raise TypeError("origin_url must be str, not %r" % (origin_url,)) res = self._object_metadata_get( "origin", origin_url, authority, after, page_token, limit ) for result in res["results"]: result["origin_url"] = result.pop("id") return res def _object_metadata_add( self, object_type: str, id: str, discovery_date: datetime.datetime, authority: Dict[str, Any], fetcher: Dict[str, Any], format: str, metadata: bytes, + context: Dict[str, Union[str, bytes, int]], ) -> None: + check_extrinsic_metadata_context(object_type, context) + if not self._cql_runner.metadata_authority_get(**authority): raise StorageArgumentException(f"Unknown authority {authority}") if not self._cql_runner.metadata_fetcher_get(**fetcher): raise StorageArgumentException(f"Unknown fetcher {fetcher}") try: self._cql_runner.object_metadata_add( object_type, id, authority["type"], authority["url"], discovery_date, fetcher["name"], fetcher["version"], format, metadata, + context, ) except TypeError as e: raise StorageArgumentException(*e.args) def _object_metadata_get( self, object_type: str, id: str, authority: Dict[str, str], after: Optional[datetime.datetime] = None, page_token: Optional[bytes] = None, limit: int = 1000, ) -> Dict[str, Any]: if page_token is not None: (after_date, after_fetcher_name, after_fetcher_url) = msgpack_loads( page_token ) if after and after_date < after: raise StorageArgumentException( "page_token is inconsistent with the value of 'after'." ) entries = self._cql_runner.object_metadata_get_after_date_and_fetcher( id, authority["type"], authority["url"], after_date, after_fetcher_name, after_fetcher_url, ) elif after is not None: entries = self._cql_runner.object_metadata_get_after_date( id, authority["type"], authority["url"], after ) else: entries = self._cql_runner.object_metadata_get( id, authority["type"], authority["url"] ) if limit: entries = itertools.islice(entries, 0, limit + 1) results = [] for entry in entries: discovery_date = entry.discovery_date.replace(tzinfo=datetime.timezone.utc) result = { "id": entry.id, "authority": { "type": entry.authority_type, "url": entry.authority_url, }, "fetcher": { "name": entry.fetcher_name, "version": entry.fetcher_version, }, "discovery_date": discovery_date, "format": entry.format, "metadata": entry.metadata, } + if CONTEXT_KEYS[object_type]: + context = {} + for key in CONTEXT_KEYS[object_type]: + value = getattr(entry, key) + if value is not None: + context[key] = value + result["context"] = context + results.append(result) if len(results) > limit: results.pop() assert len(results) == limit last_result = results[-1] next_page_token: Optional[bytes] = msgpack_dumps( ( last_result["discovery_date"], last_result["fetcher"]["name"], last_result["fetcher"]["version"], ) ) else: next_page_token = None return { "next_page_token": next_page_token, "results": results, } def metadata_fetcher_add( self, name: str, version: str, metadata: Dict[str, Any] ) -> None: self._cql_runner.metadata_fetcher_add(name, version, json.dumps(metadata)) def metadata_fetcher_get(self, name: str, version: str) -> Optional[Dict[str, Any]]: fetcher = self._cql_runner.metadata_fetcher_get(name, version) if fetcher: return { "name": fetcher.name, "version": fetcher.version, "metadata": json.loads(fetcher.metadata), } else: return None def metadata_authority_add( self, type: str, url: str, metadata: Dict[str, Any] ) -> None: self._cql_runner.metadata_authority_add(url, type, json.dumps(metadata)) def metadata_authority_get(self, type: str, url: str) -> Optional[Dict[str, Any]]: authority = self._cql_runner.metadata_authority_get(type, url) if authority: return { "type": authority.type, "url": authority.url, "metadata": json.loads(authority.metadata), } else: return None def clear_buffers(self, object_types: Optional[Iterable[str]] = None) -> None: """Do nothing """ return None def flush(self, object_types: Optional[Iterable[str]] = None) -> Dict: return {} diff --git a/swh/storage/db.py b/swh/storage/db.py index b6c43c37..f4f0db00 100644 --- a/swh/storage/db.py +++ b/swh/storage/db.py @@ -1,1283 +1,1300 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime import random import select -from typing import Any, Dict, Iterable, List, Optional, Tuple +from typing import Any, Dict, Iterable, List, Optional, Tuple, Union from swh.core.db import BaseDb from swh.core.db.db_utils import stored_procedure, jsonize from swh.core.db.db_utils import execute_values_generator from swh.model.model import OriginVisit, OriginVisitStatus, SHA1_SIZE class Db(BaseDb): """Proxy to the SWH DB, with wrappers around stored procedures """ def mktemp_dir_entry(self, entry_type, cur=None): self._cursor(cur).execute( "SELECT swh_mktemp_dir_entry(%s)", (("directory_entry_%s" % entry_type),) ) @stored_procedure("swh_mktemp_revision") def mktemp_revision(self, cur=None): pass @stored_procedure("swh_mktemp_release") def mktemp_release(self, cur=None): pass @stored_procedure("swh_mktemp_snapshot_branch") def mktemp_snapshot_branch(self, cur=None): pass def register_listener(self, notify_queue, cur=None): """Register a listener for NOTIFY queue `notify_queue`""" self._cursor(cur).execute("LISTEN %s" % notify_queue) def listen_notifies(self, timeout): """Listen to notifications for `timeout` seconds""" if select.select([self.conn], [], [], timeout) == ([], [], []): return else: self.conn.poll() while self.conn.notifies: yield self.conn.notifies.pop(0) @stored_procedure("swh_content_add") def content_add_from_temp(self, cur=None): pass @stored_procedure("swh_directory_add") def directory_add_from_temp(self, cur=None): pass @stored_procedure("swh_skipped_content_add") def skipped_content_add_from_temp(self, cur=None): pass @stored_procedure("swh_revision_add") def revision_add_from_temp(self, cur=None): pass @stored_procedure("swh_release_add") def release_add_from_temp(self, cur=None): pass def content_update_from_temp(self, keys_to_update, cur=None): cur = self._cursor(cur) cur.execute( """select swh_content_update(ARRAY[%s] :: text[])""" % keys_to_update ) content_get_metadata_keys = [ "sha1", "sha1_git", "sha256", "blake2s256", "length", "status", ] content_add_keys = content_get_metadata_keys + ["ctime"] skipped_content_keys = [ "sha1", "sha1_git", "sha256", "blake2s256", "length", "reason", "status", "origin", ] def content_get_metadata_from_sha1s(self, sha1s, cur=None): cur = self._cursor(cur) yield from execute_values_generator( cur, """ select t.sha1, %s from (values %%s) as t (sha1) inner join content using (sha1) """ % ", ".join(self.content_get_metadata_keys[1:]), ((sha1,) for sha1 in sha1s), ) def content_get_range(self, start, end, limit=None, cur=None): """Retrieve contents within range [start, end]. """ cur = self._cursor(cur) query = """select %s from content where %%s <= sha1 and sha1 <= %%s order by sha1 limit %%s""" % ", ".join( self.content_get_metadata_keys ) cur.execute(query, (start, end, limit)) yield from cur content_hash_keys = ["sha1", "sha1_git", "sha256", "blake2s256"] def content_missing_from_list(self, contents, cur=None): cur = self._cursor(cur) keys = ", ".join(self.content_hash_keys) equality = " AND ".join( ("t.%s = c.%s" % (key, key)) for key in self.content_hash_keys ) yield from execute_values_generator( cur, """ SELECT %s FROM (VALUES %%s) as t(%s) WHERE NOT EXISTS ( SELECT 1 FROM content c WHERE %s ) """ % (keys, keys, equality), (tuple(c[key] for key in self.content_hash_keys) for c in contents), ) def content_missing_per_sha1(self, sha1s, cur=None): cur = self._cursor(cur) yield from execute_values_generator( cur, """ SELECT t.sha1 FROM (VALUES %s) AS t(sha1) WHERE NOT EXISTS ( SELECT 1 FROM content c WHERE c.sha1 = t.sha1 )""", ((sha1,) for sha1 in sha1s), ) def content_missing_per_sha1_git(self, contents, cur=None): cur = self._cursor(cur) yield from execute_values_generator( cur, """ SELECT t.sha1_git FROM (VALUES %s) AS t(sha1_git) WHERE NOT EXISTS ( SELECT 1 FROM content c WHERE c.sha1_git = t.sha1_git )""", ((sha1,) for sha1 in contents), ) def skipped_content_missing(self, contents, cur=None): if not contents: return [] cur = self._cursor(cur) query = """SELECT * FROM (VALUES %s) AS t (%s) WHERE not exists (SELECT 1 FROM skipped_content s WHERE s.sha1 is not distinct from t.sha1::sha1 and s.sha1_git is not distinct from t.sha1_git::sha1 and s.sha256 is not distinct from t.sha256::bytea);""" % ( (", ".join("%s" for _ in contents)), ", ".join(self.content_hash_keys), ) cur.execute( query, [tuple(cont[key] for key in self.content_hash_keys) for cont in contents], ) yield from cur def snapshot_exists(self, snapshot_id, cur=None): """Check whether a snapshot with the given id exists""" cur = self._cursor(cur) cur.execute("""SELECT 1 FROM snapshot where id=%s""", (snapshot_id,)) return bool(cur.fetchone()) def snapshot_missing_from_list(self, snapshots, cur=None): cur = self._cursor(cur) yield from execute_values_generator( cur, """ SELECT id FROM (VALUES %s) as t(id) WHERE NOT EXISTS ( SELECT 1 FROM snapshot d WHERE d.id = t.id ) """, ((id,) for id in snapshots), ) def snapshot_add(self, snapshot_id, cur=None): """Add a snapshot from the temporary table""" cur = self._cursor(cur) cur.execute("""SELECT swh_snapshot_add(%s)""", (snapshot_id,)) snapshot_count_cols = ["target_type", "count"] def snapshot_count_branches(self, snapshot_id, cur=None): cur = self._cursor(cur) query = """\ SELECT %s FROM swh_snapshot_count_branches(%%s) """ % ", ".join( self.snapshot_count_cols ) cur.execute(query, (snapshot_id,)) yield from cur snapshot_get_cols = ["snapshot_id", "name", "target", "target_type"] def snapshot_get_by_id( self, snapshot_id, branches_from=b"", branches_count=None, target_types=None, cur=None, ): cur = self._cursor(cur) query = """\ SELECT %s FROM swh_snapshot_get_by_id(%%s, %%s, %%s, %%s :: snapshot_target[]) """ % ", ".join( self.snapshot_get_cols ) cur.execute(query, (snapshot_id, branches_from, branches_count, target_types)) yield from cur def snapshot_get_by_origin_visit(self, origin_url, visit_id, cur=None): cur = self._cursor(cur) query = """\ SELECT ovs.snapshot FROM origin_visit ov INNER JOIN origin o ON o.id = ov.origin INNER JOIN origin_visit_status ovs ON ov.origin = ovs.origin AND ov.visit = ovs.visit WHERE o.url=%s AND ov.visit=%s ORDER BY ovs.date DESC LIMIT 1 """ cur.execute(query, (origin_url, visit_id)) ret = cur.fetchone() if ret: return ret[0] def snapshot_get_random(self, cur=None): return self._get_random_row_from_table("snapshot", ["id"], "id", cur) content_find_cols = [ "sha1", "sha1_git", "sha256", "blake2s256", "length", "ctime", "status", ] def content_find( self, sha1=None, sha1_git=None, sha256=None, blake2s256=None, cur=None ): """Find the content optionally on a combination of the following checksums sha1, sha1_git, sha256 or blake2s256. Args: sha1: sha1 content git_sha1: the sha1 computed `a la git` sha1 of the content sha256: sha256 content blake2s256: blake2s256 content Returns: The tuple (sha1, sha1_git, sha256, blake2s256) if found or None. """ cur = self._cursor(cur) checksum_dict = { "sha1": sha1, "sha1_git": sha1_git, "sha256": sha256, "blake2s256": blake2s256, } where_parts = [] args = [] # Adds only those keys which have value other than None for algorithm in checksum_dict: if checksum_dict[algorithm] is not None: args.append(checksum_dict[algorithm]) where_parts.append(algorithm + "= %s") query = " AND ".join(where_parts) cur.execute( """SELECT %s FROM content WHERE %s """ % (",".join(self.content_find_cols), query), args, ) content = cur.fetchall() return content def content_get_random(self, cur=None): return self._get_random_row_from_table("content", ["sha1_git"], "sha1_git", cur) def directory_missing_from_list(self, directories, cur=None): cur = self._cursor(cur) yield from execute_values_generator( cur, """ SELECT id FROM (VALUES %s) as t(id) WHERE NOT EXISTS ( SELECT 1 FROM directory d WHERE d.id = t.id ) """, ((id,) for id in directories), ) directory_ls_cols = [ "dir_id", "type", "target", "name", "perms", "status", "sha1", "sha1_git", "sha256", "length", ] def directory_walk_one(self, directory, cur=None): cur = self._cursor(cur) cols = ", ".join(self.directory_ls_cols) query = "SELECT %s FROM swh_directory_walk_one(%%s)" % cols cur.execute(query, (directory,)) yield from cur def directory_walk(self, directory, cur=None): cur = self._cursor(cur) cols = ", ".join(self.directory_ls_cols) query = "SELECT %s FROM swh_directory_walk(%%s)" % cols cur.execute(query, (directory,)) yield from cur def directory_entry_get_by_path(self, directory, paths, cur=None): """Retrieve a directory entry by path. """ cur = self._cursor(cur) cols = ", ".join(self.directory_ls_cols) query = "SELECT %s FROM swh_find_directory_entry_by_path(%%s, %%s)" % cols cur.execute(query, (directory, paths)) data = cur.fetchone() if set(data) == {None}: return None return data def directory_get_random(self, cur=None): return self._get_random_row_from_table("directory", ["id"], "id", cur) def revision_missing_from_list(self, revisions, cur=None): cur = self._cursor(cur) yield from execute_values_generator( cur, """ SELECT id FROM (VALUES %s) as t(id) WHERE NOT EXISTS ( SELECT 1 FROM revision r WHERE r.id = t.id ) """, ((id,) for id in revisions), ) revision_add_cols = [ "id", "date", "date_offset", "date_neg_utc_offset", "committer_date", "committer_date_offset", "committer_date_neg_utc_offset", "type", "directory", "message", "author_fullname", "author_name", "author_email", "committer_fullname", "committer_name", "committer_email", "metadata", "synthetic", ] revision_get_cols = revision_add_cols + ["parents"] def origin_visit_add(self, origin, ts, type, cur=None): """Add a new origin_visit for origin origin at timestamp ts. Args: origin: origin concerned by the visit ts: the date of the visit type: type of loader for the visit Returns: The new visit index step for that origin """ cur = self._cursor(cur) self._cursor(cur).execute( "SELECT swh_origin_visit_add(%s, %s, %s)", (origin, ts, type) ) return cur.fetchone()[0] origin_visit_status_cols = [ "origin", "visit", "date", "status", "snapshot", "metadata", ] def origin_visit_status_add( self, visit_status: OriginVisitStatus, cur=None ) -> None: """Add new origin visit status """ assert self.origin_visit_status_cols[0] == "origin" assert self.origin_visit_status_cols[-1] == "metadata" cols = self.origin_visit_status_cols[1:-1] cur = self._cursor(cur) cur.execute( f"WITH origin_id as (select id from origin where url=%s) " f"INSERT INTO origin_visit_status " f"(origin, {', '.join(cols)}, metadata) " f"VALUES ((select id from origin_id), " f"{', '.join(['%s']*len(cols))}, %s) " f"ON CONFLICT (origin, visit, date) do nothing", [visit_status.origin] + [getattr(visit_status, key) for key in cols] + [jsonize(visit_status.metadata)], ) origin_visit_upsert_cols = [ "origin", "visit", "date", "type", ] def origin_visit_upsert(self, origin_visit: OriginVisit, cur=None) -> None: # doing an extra query like this is way simpler than trying to join # the origin id in the query below ov = origin_visit origin_id = next(self.origin_id_get_by_url([ov.origin])) cur = self._cursor(cur) query = """INSERT INTO origin_visit ({cols}) VALUES ({values}) ON CONFLICT ON CONSTRAINT origin_visit_pkey DO UPDATE SET {updates}""".format( cols=", ".join(self.origin_visit_upsert_cols), values=", ".join("%s" for col in self.origin_visit_upsert_cols), updates=", ".join( "{0}=excluded.{0}".format(col) for col in self.origin_visit_upsert_cols ), ) cur.execute( query, (origin_id, ov.visit, ov.date, ov.type), ) origin_visit_get_cols = [ "origin", "visit", "date", "type", "status", "metadata", "snapshot", ] origin_visit_select_cols = [ "o.url AS origin", "ov.visit", "ov.date", "ov.type AS type", "ovs.status", "ovs.metadata", "ovs.snapshot", ] origin_visit_status_select_cols = [ "o.url AS origin", "ovs.visit", "ovs.date", "ovs.status", "ovs.snapshot", "ovs.metadata", ] def _make_origin_visit_status( self, row: Optional[Tuple[Any]] ) -> Optional[Dict[str, Any]]: """Make an origin_visit_status dict out of a row """ if not row: return None return dict(zip(self.origin_visit_status_cols, row)) def origin_visit_status_get_latest( self, origin_url: str, visit: int, allowed_statuses: Optional[List[str]] = None, require_snapshot: bool = False, cur=None, ) -> Optional[Dict[str, Any]]: """Given an origin visit id, return its latest origin_visit_status """ cur = self._cursor(cur) query_parts = [ "SELECT %s" % ", ".join(self.origin_visit_status_select_cols), "FROM origin_visit_status ovs ", "INNER JOIN origin o ON o.id = ovs.origin", ] query_parts.append("WHERE o.url = %s") query_params: List[Any] = [origin_url] query_parts.append("AND ovs.visit = %s") query_params.append(visit) if require_snapshot: query_parts.append("AND ovs.snapshot is not null") if allowed_statuses: query_parts.append("AND ovs.status IN %s") query_params.append(tuple(allowed_statuses)) query_parts.append("ORDER BY ovs.date DESC LIMIT 1") query = "\n".join(query_parts) cur.execute(query, tuple(query_params)) row = cur.fetchone() return self._make_origin_visit_status(row) def origin_visit_get_all( self, origin_id, last_visit=None, order="asc", limit=None, cur=None ): """Retrieve all visits for origin with id origin_id. Args: origin_id: The occurrence's origin Yields: The visits for that origin """ cur = self._cursor(cur) assert order.lower() in ["asc", "desc"] query_parts = [ "SELECT DISTINCT ON (ov.visit) %s " % ", ".join(self.origin_visit_select_cols), "FROM origin_visit ov", "INNER JOIN origin o ON o.id = ov.origin", "INNER JOIN origin_visit_status ovs", "ON ov.origin = ovs.origin AND ov.visit = ovs.visit", ] query_parts.append("WHERE o.url = %s") query_params: List[Any] = [origin_id] if last_visit is not None: op_comparison = ">" if order == "asc" else "<" query_parts.append(f"and ov.visit {op_comparison} %s") query_params.append(last_visit) if order == "asc": query_parts.append("ORDER BY ov.visit ASC, ovs.date DESC") elif order == "desc": query_parts.append("ORDER BY ov.visit DESC, ovs.date DESC") else: assert False if limit is not None: query_parts.append("LIMIT %s") query_params.append(limit) query = "\n".join(query_parts) cur.execute(query, tuple(query_params)) yield from cur def origin_visit_get(self, origin_id, visit_id, cur=None): """Retrieve information on visit visit_id of origin origin_id. Args: origin_id: the origin concerned visit_id: The visit step for that origin Returns: The origin_visit information """ cur = self._cursor(cur) query = """\ SELECT %s FROM origin_visit ov INNER JOIN origin o ON o.id = ov.origin INNER JOIN origin_visit_status ovs ON ov.origin = ovs.origin AND ov.visit = ovs.visit WHERE o.url = %%s AND ov.visit = %%s ORDER BY ovs.date DESC LIMIT 1 """ % ( ", ".join(self.origin_visit_select_cols) ) cur.execute(query, (origin_id, visit_id)) r = cur.fetchall() if not r: return None return r[0] def origin_visit_find_by_date(self, origin, visit_date, cur=None): cur = self._cursor(cur) cur.execute( "SELECT * FROM swh_visit_find_by_date(%s, %s)", (origin, visit_date) ) rows = cur.fetchall() if rows: visit = dict(zip(self.origin_visit_get_cols, rows[0])) visit["origin"] = origin return visit def origin_visit_exists(self, origin_id, visit_id, cur=None): """Check whether an origin visit with the given ids exists""" cur = self._cursor(cur) query = "SELECT 1 FROM origin_visit where origin = %s AND visit = %s" cur.execute(query, (origin_id, visit_id)) return bool(cur.fetchone()) def origin_visit_get_latest( self, origin_id: str, type: Optional[str], allowed_statuses: Optional[Iterable[str]], require_snapshot: bool, cur=None, ): """Retrieve the most recent origin_visit of the given origin, with optional filters. Args: origin_id: the origin concerned type: Optional visit type to filter on allowed_statuses: the visit statuses allowed for the returned visit require_snapshot (bool): If True, only a visit with a known snapshot will be returned. Returns: The origin_visit information, or None if no visit matches. """ cur = self._cursor(cur) query_parts = [ "SELECT %s" % ", ".join(self.origin_visit_select_cols), "FROM origin_visit ov ", "INNER JOIN origin o ON o.id = ov.origin", "INNER JOIN origin_visit_status ovs ", "ON o.id = ovs.origin AND ov.visit = ovs.visit ", ] query_parts.append("WHERE o.url = %s") query_params: List[Any] = [origin_id] if type is not None: query_parts.append("AND ov.type = %s") query_params.append(type) if require_snapshot: query_parts.append("AND ovs.snapshot is not null") if allowed_statuses: query_parts.append("AND ovs.status IN %s") query_params.append(tuple(allowed_statuses)) query_parts.append( "ORDER BY ov.date DESC, ov.visit DESC, ovs.date DESC LIMIT 1" ) query = "\n".join(query_parts) cur.execute(query, tuple(query_params)) r = cur.fetchone() if not r: return None return r def origin_visit_get_random(self, type, cur=None): """Randomly select one origin visit that was full and in the last 3 months """ cur = self._cursor(cur) columns = ",".join(self.origin_visit_select_cols) query = f"""select {columns} from origin_visit ov inner join origin o on ov.origin=o.id inner join origin_visit_status ovs on ov.origin = ovs.origin and ov.visit = ovs.visit where ovs.status='full' and ov.type=%s and ov.date > now() - '3 months'::interval and random() < 0.1 limit 1 """ cur.execute(query, (type,)) return cur.fetchone() @staticmethod def mangle_query_key(key, main_table): if key == "id": return "t.id" if key == "parents": return """ ARRAY( SELECT rh.parent_id::bytea FROM revision_history rh WHERE rh.id = t.id ORDER BY rh.parent_rank )""" if "_" not in key: return "%s.%s" % (main_table, key) head, tail = key.split("_", 1) if head in ("author", "committer") and tail in ( "name", "email", "id", "fullname", ): return "%s.%s" % (head, tail) return "%s.%s" % (main_table, key) def revision_get_from_list(self, revisions, cur=None): cur = self._cursor(cur) query_keys = ", ".join( self.mangle_query_key(k, "revision") for k in self.revision_get_cols ) yield from execute_values_generator( cur, """ SELECT %s FROM (VALUES %%s) as t(sortkey, id) LEFT JOIN revision ON t.id = revision.id LEFT JOIN person author ON revision.author = author.id LEFT JOIN person committer ON revision.committer = committer.id ORDER BY sortkey """ % query_keys, ((sortkey, id) for sortkey, id in enumerate(revisions)), ) def revision_log(self, root_revisions, limit=None, cur=None): cur = self._cursor(cur) query = """SELECT %s FROM swh_revision_log(%%s, %%s) """ % ", ".join( self.revision_get_cols ) cur.execute(query, (root_revisions, limit)) yield from cur revision_shortlog_cols = ["id", "parents"] def revision_shortlog(self, root_revisions, limit=None, cur=None): cur = self._cursor(cur) query = """SELECT %s FROM swh_revision_list(%%s, %%s) """ % ", ".join( self.revision_shortlog_cols ) cur.execute(query, (root_revisions, limit)) yield from cur def revision_get_random(self, cur=None): return self._get_random_row_from_table("revision", ["id"], "id", cur) def release_missing_from_list(self, releases, cur=None): cur = self._cursor(cur) yield from execute_values_generator( cur, """ SELECT id FROM (VALUES %s) as t(id) WHERE NOT EXISTS ( SELECT 1 FROM release r WHERE r.id = t.id ) """, ((id,) for id in releases), ) object_find_by_sha1_git_cols = ["sha1_git", "type"] def object_find_by_sha1_git(self, ids, cur=None): cur = self._cursor(cur) yield from execute_values_generator( cur, """ WITH t (sha1_git) AS (VALUES %s), known_objects as (( select id as sha1_git, 'release'::object_type as type, object_id from release r where exists (select 1 from t where t.sha1_git = r.id) ) union all ( select id as sha1_git, 'revision'::object_type as type, object_id from revision r where exists (select 1 from t where t.sha1_git = r.id) ) union all ( select id as sha1_git, 'directory'::object_type as type, object_id from directory d where exists (select 1 from t where t.sha1_git = d.id) ) union all ( select sha1_git as sha1_git, 'content'::object_type as type, object_id from content c where exists (select 1 from t where t.sha1_git = c.sha1_git) )) select t.sha1_git as sha1_git, k.type from t left join known_objects k on t.sha1_git = k.sha1_git """, ((id,) for id in ids), ) def stat_counters(self, cur=None): cur = self._cursor(cur) cur.execute("SELECT * FROM swh_stat_counters()") yield from cur def origin_add(self, url, cur=None): """Insert a new origin and return the new identifier.""" insert = """INSERT INTO origin (url) values (%s) RETURNING url""" cur.execute(insert, (url,)) return cur.fetchone()[0] origin_cols = ["url"] def origin_get_by_url(self, origins, cur=None): """Retrieve origin `(type, url)` from urls if found.""" cur = self._cursor(cur) query = """SELECT %s FROM (VALUES %%s) as t(url) LEFT JOIN origin ON t.url = origin.url """ % ",".join( "origin." + col for col in self.origin_cols ) yield from execute_values_generator(cur, query, ((url,) for url in origins)) def origin_get_by_sha1(self, sha1s, cur=None): """Retrieve origin urls from sha1s if found.""" cur = self._cursor(cur) query = """SELECT %s FROM (VALUES %%s) as t(sha1) LEFT JOIN origin ON t.sha1 = digest(origin.url, 'sha1') """ % ",".join( "origin." + col for col in self.origin_cols ) yield from execute_values_generator(cur, query, ((sha1,) for sha1 in sha1s)) def origin_id_get_by_url(self, origins, cur=None): """Retrieve origin `(type, url)` from urls if found.""" cur = self._cursor(cur) query = """SELECT id FROM (VALUES %s) as t(url) LEFT JOIN origin ON t.url = origin.url """ for row in execute_values_generator(cur, query, ((url,) for url in origins)): yield row[0] origin_get_range_cols = ["id", "url"] def origin_get_range(self, origin_from=1, origin_count=100, cur=None): """Retrieve ``origin_count`` origins whose ids are greater or equal than ``origin_from``. Origins are sorted by id before retrieving them. Args: origin_from (int): the minimum id of origins to retrieve origin_count (int): the maximum number of origins to retrieve """ cur = self._cursor(cur) query = """SELECT %s FROM origin WHERE id >= %%s ORDER BY id LIMIT %%s """ % ",".join( self.origin_get_range_cols ) cur.execute(query, (origin_from, origin_count)) yield from cur def _origin_query( self, url_pattern, count=False, offset=0, limit=50, regexp=False, with_visit=False, cur=None, ): """ Method factorizing query creation for searching and counting origins. """ cur = self._cursor(cur) if count: origin_cols = "COUNT(*)" else: origin_cols = ",".join(self.origin_cols) query = """SELECT %s FROM origin o WHERE """ if with_visit: query += """ EXISTS ( SELECT 1 FROM origin_visit ov INNER JOIN origin_visit_status ovs ON ov.origin = ovs.origin AND ov.visit = ovs.visit INNER JOIN snapshot ON ovs.snapshot=snapshot.id WHERE ov.origin=o.id ) AND """ query += "url %s %%s " if not count: query += "ORDER BY id OFFSET %%s LIMIT %%s" if not regexp: query = query % (origin_cols, "ILIKE") query_params = ("%" + url_pattern + "%", offset, limit) else: query = query % (origin_cols, "~*") query_params = (url_pattern, offset, limit) if count: query_params = (query_params[0],) cur.execute(query, query_params) def origin_search( self, url_pattern, offset=0, limit=50, regexp=False, with_visit=False, cur=None ): """Search for origins whose urls contain a provided string pattern or match a provided regular expression. The search is performed in a case insensitive way. Args: url_pattern (str): the string pattern to search for in origin urls offset (int): number of found origins to skip before returning results limit (int): the maximum number of found origins to return regexp (bool): if True, consider the provided pattern as a regular expression and returns origins whose urls match it with_visit (bool): if True, filter out origins with no visit """ self._origin_query( url_pattern, offset=offset, limit=limit, regexp=regexp, with_visit=with_visit, cur=cur, ) yield from cur def origin_count(self, url_pattern, regexp=False, with_visit=False, cur=None): """Count origins whose urls contain a provided string pattern or match a provided regular expression. The pattern search in origin urls is performed in a case insensitive way. Args: url_pattern (str): the string pattern to search for in origin urls regexp (bool): if True, consider the provided pattern as a regular expression and returns origins whose urls match it with_visit (bool): if True, filter out origins with no visit """ self._origin_query( url_pattern, count=True, regexp=regexp, with_visit=with_visit, cur=cur ) return cur.fetchone()[0] release_add_cols = [ "id", "target", "target_type", "date", "date_offset", "date_neg_utc_offset", "name", "comment", "synthetic", "author_fullname", "author_name", "author_email", ] release_get_cols = release_add_cols def release_get_from_list(self, releases, cur=None): cur = self._cursor(cur) query_keys = ", ".join( self.mangle_query_key(k, "release") for k in self.release_get_cols ) yield from execute_values_generator( cur, """ SELECT %s FROM (VALUES %%s) as t(sortkey, id) LEFT JOIN release ON t.id = release.id LEFT JOIN person author ON release.author = author.id ORDER BY sortkey """ % query_keys, ((sortkey, id) for sortkey, id in enumerate(releases)), ) def release_get_random(self, cur=None): return self._get_random_row_from_table("release", ["id"], "id", cur) + _object_metadata_context_cols = [ + "origin", + "visit", + "snapshot", + "release", + "revision", + "path", + "directory", + ] + """The list of context columns for all artifact types.""" + _object_metadata_insert_cols = [ "type", "id", "authority_id", "fetcher_id", "discovery_date", "format", "metadata", + *_object_metadata_context_cols, ] """List of columns of the object_metadata table, used when writing metadata.""" _object_metadata_insert_query = f""" INSERT INTO object_metadata ({', '.join(_object_metadata_insert_cols)}) VALUES ({', '.join('%s' for _ in _object_metadata_insert_cols)}) ON CONFLICT (id, authority_id, discovery_date, fetcher_id) DO UPDATE SET format=EXCLUDED.format, metadata=EXCLUDED.metadata """ object_metadata_get_cols = [ "id", "discovery_date", "metadata_authority.type", "metadata_authority.url", "metadata_fetcher.id", "metadata_fetcher.name", "metadata_fetcher.version", + *_object_metadata_context_cols, "format", "metadata", ] """List of columns of the object_metadata, metadata_authority, and metadata_fetcher tables, used when reading object metadata.""" _object_metadata_select_query = f""" SELECT object_metadata.id AS id, {', '.join(object_metadata_get_cols[1:-1])}, object_metadata.metadata AS metadata FROM object_metadata INNER JOIN metadata_authority ON (metadata_authority.id=authority_id) INNER JOIN metadata_fetcher ON (metadata_fetcher.id=fetcher_id) WHERE object_metadata.id=%s AND authority_id=%s """ def object_metadata_add( self, object_type: str, id: str, + context: Dict[str, Union[str, bytes, int]], discovery_date: datetime.datetime, authority_id: int, fetcher_id: int, format: str, metadata: bytes, cur, ): query = self._object_metadata_insert_query args: Dict[str, Any] = dict( type=object_type, id=id, authority_id=authority_id, fetcher_id=fetcher_id, discovery_date=discovery_date, format=format, metadata=metadata, ) + for col in self._object_metadata_context_cols: + args[col] = context.get(col) + params = [args[col] for col in self._object_metadata_insert_cols] cur.execute(query, params) def object_metadata_get( self, object_type: str, id: str, authority_id: int, after_time: Optional[datetime.datetime], after_fetcher: Optional[int], limit: int, cur, ): query_parts = [self._object_metadata_select_query] args = [id, authority_id] if after_fetcher is not None: assert after_time query_parts.append("AND (discovery_date, fetcher_id) > (%s, %s)") args.extend([after_time, after_fetcher]) elif after_time is not None: query_parts.append("AND discovery_date > %s") args.append(after_time) query_parts.append("ORDER BY discovery_date, fetcher_id") if limit: query_parts.append("LIMIT %s") args.append(limit) cur.execute(" ".join(query_parts), args) yield from cur metadata_fetcher_cols = ["name", "version", "metadata"] def metadata_fetcher_add( self, name: str, version: str, metadata: bytes, cur=None ) -> None: cur = self._cursor(cur) cur.execute( "INSERT INTO metadata_fetcher (name, version, metadata) " "VALUES (%s, %s, %s) ON CONFLICT DO NOTHING", (name, version, jsonize(metadata)), ) def metadata_fetcher_get(self, name: str, version: str, cur=None): cur = self._cursor(cur) cur.execute( f"SELECT {', '.join(self.metadata_fetcher_cols)} " f"FROM metadata_fetcher " f"WHERE name=%s AND version=%s", (name, version), ) return cur.fetchone() def metadata_fetcher_get_id( self, name: str, version: str, cur=None ) -> Optional[int]: cur = self._cursor(cur) cur.execute( "SELECT id FROM metadata_fetcher WHERE name=%s AND version=%s", (name, version), ) row = cur.fetchone() if row: return row[0] else: return None metadata_authority_cols = ["type", "url", "metadata"] def metadata_authority_add( self, type: str, url: str, metadata: bytes, cur=None ) -> None: cur = self._cursor(cur) cur.execute( "INSERT INTO metadata_authority (type, url, metadata) " "VALUES (%s, %s, %s) ON CONFLICT DO NOTHING", (type, url, jsonize(metadata)), ) def metadata_authority_get(self, type: str, url: str, cur=None): cur = self._cursor(cur) cur.execute( f"SELECT {', '.join(self.metadata_authority_cols)} " f"FROM metadata_authority " f"WHERE type=%s AND url=%s", (type, url), ) return cur.fetchone() def metadata_authority_get_id(self, type: str, url: str, cur=None) -> Optional[int]: cur = self._cursor(cur) cur.execute( "SELECT id FROM metadata_authority WHERE type=%s AND url=%s", (type, url) ) row = cur.fetchone() if row: return row[0] else: return None def _get_random_row_from_table(self, table_name, cols, id_col, cur=None): random_sha1 = bytes(random.randint(0, 255) for _ in range(SHA1_SIZE)) cur = self._cursor(cur) query = """ (SELECT {cols} FROM {table} WHERE {id_col} >= %s ORDER BY {id_col} LIMIT 1) UNION (SELECT {cols} FROM {table} WHERE {id_col} < %s ORDER BY {id_col} DESC LIMIT 1) LIMIT 1 """.format( cols=", ".join(cols), table=table_name, id_col=id_col ) cur.execute(query, (random_sha1, random_sha1)) row = cur.fetchone() if row: return row[0] diff --git a/swh/storage/extrinsic_metadata.py b/swh/storage/extrinsic_metadata.py new file mode 100644 index 00000000..3078a5aa --- /dev/null +++ b/swh/storage/extrinsic_metadata.py @@ -0,0 +1,55 @@ +# Copyright (C) 2020 The Software Heritage developers +# See the AUTHORS file at the top-level directory of this distribution +# License: GNU General Public License version 3, or any later version +# See top-level LICENSE file for more information + +from typing import Any, cast, Dict + +from swh.model.identifiers import PersistentId, parse_persistent_identifier + +from .exc import StorageArgumentException + +CONTEXT_KEYS: Dict[str, Dict[str, type]] = {} +CONTEXT_KEYS["origin"] = {} +CONTEXT_KEYS["snapshot"] = {"origin": str, "visit": int} +CONTEXT_KEYS["release"] = {**CONTEXT_KEYS["snapshot"], "snapshot": PersistentId} +CONTEXT_KEYS["revision"] = {**CONTEXT_KEYS["release"], "release": PersistentId} +CONTEXT_KEYS["directory"] = { + **CONTEXT_KEYS["revision"], + "revision": PersistentId, + "path": bytes, +} +CONTEXT_KEYS["content"] = {**CONTEXT_KEYS["directory"], "directory": PersistentId} + + +def check_extrinsic_metadata_context(object_type: str, context: Dict[str, Any]): + key_types = CONTEXT_KEYS[object_type] + + extra_keys = set(context) - set(key_types) + if extra_keys: + raise StorageArgumentException(f"Unknown context keys: {', '.join(extra_keys)}") + + for (key, value) in context.items(): + expected_type = key_types[key] + expected_type_str = str(expected_type) # for display + + # If an SWHID is expected and a string is given, parse it + if expected_type is PersistentId and isinstance(value, str): + value = parse_persistent_identifier(value) + expected_type_str = "PersistentId or str" + + # Check the type of the context value + if not isinstance(value, expected_type): + raise StorageArgumentException( + f"Context key {key} must have type {expected_type_str}, " + f"but is {value!r}" + ) + + # If it is an SWHID, check it is also a core SWHID. + if expected_type is PersistentId: + value = cast(PersistentId, value) + if value.metadata != {}: + raise StorageArgumentException( + f"Context key {key} must be a core SWHID, " + f"but it has qualifiers {', '.join(value.metadata)}." + ) diff --git a/swh/storage/in_memory.py b/swh/storage/in_memory.py index 91a30f28..5e6fed16 100644 --- a/swh/storage/in_memory.py +++ b/swh/storage/in_memory.py @@ -1,1254 +1,1278 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import re import bisect import collections import copy import datetime import itertools import random from collections import defaultdict from datetime import timedelta from typing import ( Any, Callable, Dict, Generic, Hashable, Iterable, Iterator, List, Optional, Tuple, TypeVar, + Union, ) import attr from deprecated import deprecated from swh.core.api.serializers import msgpack_loads, msgpack_dumps from swh.model.model import ( BaseContent, Content, SkippedContent, Directory, Revision, Release, Snapshot, OriginVisit, OriginVisitStatus, Origin, SHA1_SIZE, ) from swh.model.hashutil import DEFAULT_ALGORITHMS, hash_to_bytes, hash_to_hex from swh.storage.objstorage import ObjStorage from swh.storage.utils import now -from .exc import StorageArgumentException, HashCollision - from .converters import origin_url_to_sha1 +from .exc import StorageArgumentException, HashCollision +from .extrinsic_metadata import check_extrinsic_metadata_context, CONTEXT_KEYS from .utils import get_partition_bounds_bytes from .writer import JournalWriter # Max block size of contents to return BULK_BLOCK_CONTENT_LEN_MAX = 10000 SortedListItem = TypeVar("SortedListItem") SortedListKey = TypeVar("SortedListKey") FetcherKey = Tuple[str, str] class SortedList(collections.UserList, Generic[SortedListKey, SortedListItem]): data: List[Tuple[SortedListKey, SortedListItem]] # https://github.com/python/mypy/issues/708 # key: Callable[[SortedListItem], SortedListKey] def __init__( self, data: List[SortedListItem] = None, key: Optional[Callable[[SortedListItem], SortedListKey]] = None, ): if key is None: def key(item): return item assert key is not None # for mypy super().__init__(sorted((key(x), x) for x in data or [])) self.key: Callable[[SortedListItem], SortedListKey] = key def add(self, item: SortedListItem): k = self.key(item) bisect.insort(self.data, (k, item)) def __iter__(self) -> Iterator[SortedListItem]: for (k, item) in self.data: yield item def iter_from(self, start_key: Any) -> Iterator[SortedListItem]: """Returns an iterator over all the elements whose key is greater or equal to `start_key`. (This is an efficient equivalent to: `(x for x in L if key(x) >= start_key)`) """ from_index = bisect.bisect_left(self.data, (start_key,)) for (k, item) in itertools.islice(self.data, from_index, None): yield item def iter_after(self, start_key: Any) -> Iterator[SortedListItem]: """Same as iter_from, but using a strict inequality.""" it = self.iter_from(start_key) for item in it: if self.key(item) > start_key: # type: ignore yield item break yield from it class InMemoryStorage: def __init__(self, journal_writer=None): self.reset() self.journal_writer = JournalWriter(journal_writer) def reset(self): self._contents = {} self._content_indexes = defaultdict(lambda: defaultdict(set)) self._skipped_contents = {} self._skipped_content_indexes = defaultdict(lambda: defaultdict(set)) self._directories = {} self._revisions = {} self._releases = {} self._snapshots = {} self._origins = {} self._origins_by_id = [] self._origins_by_sha1 = {} self._origin_visits = {} self._origin_visit_statuses: Dict[Tuple[str, int], List[OriginVisitStatus]] = {} self._persons = {} # {origin_url: {authority: [metadata]}} self._object_metadata: Dict[ str, Dict[ Hashable, SortedList[Tuple[datetime.datetime, FetcherKey], Dict[str, Any]], ], ] = defaultdict( lambda: defaultdict( lambda: SortedList(key=lambda x: (x["discovery_date"], x["fetcher"])) ) ) # noqa self._metadata_fetchers: Dict[FetcherKey, Dict[str, Any]] = {} self._metadata_authorities: Dict[Hashable, Dict[str, Any]] = {} self._objects = defaultdict(list) self._sorted_sha1s = SortedList[bytes, bytes]() self.objstorage = ObjStorage({"cls": "memory", "args": {}}) def check_config(self, *, check_write): return True def _content_add(self, contents: Iterable[Content], with_data: bool) -> Dict: self.journal_writer.content_add(contents) content_add = 0 if with_data: summary = self.objstorage.content_add( c for c in contents if c.status != "absent" ) content_add_bytes = summary["content:add:bytes"] for content in contents: key = self._content_key(content) if key in self._contents: continue for algorithm in DEFAULT_ALGORITHMS: hash_ = content.get_hash(algorithm) if hash_ in self._content_indexes[algorithm] and ( algorithm not in {"blake2s256", "sha256"} ): colliding_content_hashes = [] # Add the already stored contents for content_hashes_set in self._content_indexes[algorithm][hash_]: hashes = dict(content_hashes_set) colliding_content_hashes.append(hashes) # Add the new colliding content colliding_content_hashes.append(content.hashes()) raise HashCollision(algorithm, hash_, colliding_content_hashes) for algorithm in DEFAULT_ALGORITHMS: hash_ = content.get_hash(algorithm) self._content_indexes[algorithm][hash_].add(key) self._objects[content.sha1_git].append(("content", content.sha1)) self._contents[key] = content self._sorted_sha1s.add(content.sha1) self._contents[key] = attr.evolve(self._contents[key], data=None) content_add += 1 summary = { "content:add": content_add, } if with_data: summary["content:add:bytes"] = content_add_bytes return summary def content_add(self, content: Iterable[Content]) -> Dict: content = [attr.evolve(c, ctime=now()) for c in content] return self._content_add(content, with_data=True) def content_update(self, content, keys=[]): self.journal_writer.content_update(content) for cont_update in content: cont_update = cont_update.copy() sha1 = cont_update.pop("sha1") for old_key in self._content_indexes["sha1"][sha1]: old_cont = self._contents.pop(old_key) for algorithm in DEFAULT_ALGORITHMS: hash_ = old_cont.get_hash(algorithm) self._content_indexes[algorithm][hash_].remove(old_key) new_cont = attr.evolve(old_cont, **cont_update) new_key = self._content_key(new_cont) self._contents[new_key] = new_cont for algorithm in DEFAULT_ALGORITHMS: hash_ = new_cont.get_hash(algorithm) self._content_indexes[algorithm][hash_].add(new_key) def content_add_metadata(self, content: Iterable[Content]) -> Dict: return self._content_add(content, with_data=False) def content_get(self, content): # FIXME: Make this method support slicing the `data`. if len(content) > BULK_BLOCK_CONTENT_LEN_MAX: raise StorageArgumentException( "Sending at most %s contents." % BULK_BLOCK_CONTENT_LEN_MAX ) yield from self.objstorage.content_get(content) def content_get_range(self, start, end, limit=1000): if limit is None: raise StorageArgumentException("limit should not be None") sha1s = ( (sha1, content_key) for sha1 in self._sorted_sha1s.iter_from(start) for content_key in self._content_indexes["sha1"][sha1] ) matched = [] next_content = None for sha1, key in sha1s: if sha1 > end: break if len(matched) >= limit: next_content = sha1 break matched.append(self._contents[key].to_dict()) return { "contents": matched, "next": next_content, } def content_get_partition( self, partition_id: int, nb_partitions: int, limit: int = 1000, page_token: str = None, ): if limit is None: raise StorageArgumentException("limit should not be None") (start, end) = get_partition_bounds_bytes( partition_id, nb_partitions, SHA1_SIZE ) if page_token: start = hash_to_bytes(page_token) if end is None: end = b"\xff" * SHA1_SIZE result = self.content_get_range(start, end, limit) result2 = { "contents": result["contents"], "next_page_token": None, } if result["next"]: result2["next_page_token"] = hash_to_hex(result["next"]) return result2 def content_get_metadata(self, contents: List[bytes]) -> Dict[bytes, List[Dict]]: result: Dict = {sha1: [] for sha1 in contents} for sha1 in contents: if sha1 in self._content_indexes["sha1"]: objs = self._content_indexes["sha1"][sha1] # only 1 element as content_add_metadata would have raised a # hash collision otherwise for key in objs: d = self._contents[key].to_dict() del d["ctime"] if "data" in d: del d["data"] result[sha1].append(d) return result def content_find(self, content): if not set(content).intersection(DEFAULT_ALGORITHMS): raise StorageArgumentException( "content keys must contain at least one of: %s" % ", ".join(sorted(DEFAULT_ALGORITHMS)) ) found = [] for algo in DEFAULT_ALGORITHMS: hash = content.get(algo) if hash and hash in self._content_indexes[algo]: found.append(self._content_indexes[algo][hash]) if not found: return [] keys = list(set.intersection(*found)) return [self._contents[key].to_dict() for key in keys] def content_missing(self, content, key_hash="sha1"): for cont in content: for (algo, hash_) in cont.items(): if algo not in DEFAULT_ALGORITHMS: continue if hash_ not in self._content_indexes.get(algo, []): yield cont[key_hash] break else: for result in self.content_find(cont): if result["status"] == "missing": yield cont[key_hash] def content_missing_per_sha1(self, contents): for content in contents: if content not in self._content_indexes["sha1"]: yield content def content_missing_per_sha1_git(self, contents): for content in contents: if content not in self._content_indexes["sha1_git"]: yield content def content_get_random(self): return random.choice(list(self._content_indexes["sha1_git"])) def _skipped_content_add(self, contents: List[SkippedContent]) -> Dict: self.journal_writer.skipped_content_add(contents) summary = {"skipped_content:add": 0} missing_contents = self.skipped_content_missing([c.hashes() for c in contents]) missing = {self._content_key(c) for c in missing_contents} contents = [c for c in contents if self._content_key(c) in missing] for content in contents: key = self._content_key(content) for algo in DEFAULT_ALGORITHMS: if content.get_hash(algo): self._skipped_content_indexes[algo][content.get_hash(algo)].add(key) self._skipped_contents[key] = content summary["skipped_content:add"] += 1 return summary def skipped_content_add(self, content: Iterable[SkippedContent]) -> Dict: content = [attr.evolve(c, ctime=now()) for c in content] return self._skipped_content_add(content) def skipped_content_missing(self, contents): for content in contents: matches = list(self._skipped_contents.values()) for (algorithm, key) in self._content_key(content): if algorithm == "blake2s256": continue # Filter out skipped contents with the same hash matches = [ match for match in matches if match.get_hash(algorithm) == key ] # if none of the contents match if not matches: yield {algo: content[algo] for algo in DEFAULT_ALGORITHMS} def directory_add(self, directories: Iterable[Directory]) -> Dict: directories = [dir_ for dir_ in directories if dir_.id not in self._directories] self.journal_writer.directory_add(directories) count = 0 for directory in directories: count += 1 self._directories[directory.id] = directory self._objects[directory.id].append(("directory", directory.id)) return {"directory:add": count} def directory_missing(self, directories): for id in directories: if id not in self._directories: yield id def _join_dentry_to_content(self, dentry): keys = ( "status", "sha1", "sha1_git", "sha256", "length", ) ret = dict.fromkeys(keys) ret.update(dentry) if ret["type"] == "file": # TODO: Make it able to handle more than one content content = self.content_find({"sha1_git": ret["target"]}) if content: content = content[0] for key in keys: ret[key] = content[key] return ret def _directory_ls(self, directory_id, recursive, prefix=b""): if directory_id in self._directories: for entry in self._directories[directory_id].entries: ret = self._join_dentry_to_content(entry.to_dict()) ret["name"] = prefix + ret["name"] ret["dir_id"] = directory_id yield ret if recursive and ret["type"] == "dir": yield from self._directory_ls( ret["target"], True, prefix + ret["name"] + b"/" ) def directory_ls(self, directory, recursive=False): yield from self._directory_ls(directory, recursive) def directory_entry_get_by_path(self, directory, paths): return self._directory_entry_get_by_path(directory, paths, b"") def directory_get_random(self): if not self._directories: return None return random.choice(list(self._directories)) def _directory_entry_get_by_path(self, directory, paths, prefix): if not paths: return contents = list(self.directory_ls(directory)) if not contents: return def _get_entry(entries, name): for entry in entries: if entry["name"] == name: entry = entry.copy() entry["name"] = prefix + entry["name"] return entry first_item = _get_entry(contents, paths[0]) if len(paths) == 1: return first_item if not first_item or first_item["type"] != "dir": return return self._directory_entry_get_by_path( first_item["target"], paths[1:], prefix + paths[0] + b"/" ) def revision_add(self, revisions: Iterable[Revision]) -> Dict: revisions = [rev for rev in revisions if rev.id not in self._revisions] self.journal_writer.revision_add(revisions) count = 0 for revision in revisions: revision = attr.evolve( revision, committer=self._person_add(revision.committer), author=self._person_add(revision.author), ) self._revisions[revision.id] = revision self._objects[revision.id].append(("revision", revision.id)) count += 1 return {"revision:add": count} def revision_missing(self, revisions): for id in revisions: if id not in self._revisions: yield id def revision_get(self, revisions): for id in revisions: if id in self._revisions: yield self._revisions.get(id).to_dict() else: yield None def _get_parent_revs(self, rev_id, seen, limit): if limit and len(seen) >= limit: return if rev_id in seen or rev_id not in self._revisions: return seen.add(rev_id) yield self._revisions[rev_id].to_dict() for parent in self._revisions[rev_id].parents: yield from self._get_parent_revs(parent, seen, limit) def revision_log(self, revisions, limit=None): seen = set() for rev_id in revisions: yield from self._get_parent_revs(rev_id, seen, limit) def revision_shortlog(self, revisions, limit=None): yield from ( (rev["id"], rev["parents"]) for rev in self.revision_log(revisions, limit) ) def revision_get_random(self): return random.choice(list(self._revisions)) def release_add(self, releases: Iterable[Release]) -> Dict: to_add = [] for rel in releases: if rel.id not in self._releases and rel not in to_add: to_add.append(rel) self.journal_writer.release_add(to_add) for rel in to_add: if rel.author: self._person_add(rel.author) self._objects[rel.id].append(("release", rel.id)) self._releases[rel.id] = rel return {"release:add": len(to_add)} def release_missing(self, releases): yield from (rel for rel in releases if rel not in self._releases) def release_get(self, releases): for rel_id in releases: if rel_id in self._releases: yield self._releases[rel_id].to_dict() else: yield None def release_get_random(self): return random.choice(list(self._releases)) def snapshot_add(self, snapshots: Iterable[Snapshot]) -> Dict: count = 0 snapshots = (snap for snap in snapshots if snap.id not in self._snapshots) for snapshot in snapshots: self.journal_writer.snapshot_add([snapshot]) self._snapshots[snapshot.id] = snapshot self._objects[snapshot.id].append(("snapshot", snapshot.id)) count += 1 return {"snapshot:add": count} def snapshot_missing(self, snapshots): for id in snapshots: if id not in self._snapshots: yield id def snapshot_get(self, snapshot_id): return self.snapshot_get_branches(snapshot_id) def snapshot_get_by_origin_visit(self, origin, visit): origin_url = self._get_origin_url(origin) if not origin_url: return if origin_url not in self._origins or visit > len( self._origin_visits[origin_url] ): return None visit = self._origin_visit_get_updated(origin_url, visit) snapshot_id = visit.snapshot if snapshot_id: return self.snapshot_get(snapshot_id) else: return None def snapshot_count_branches(self, snapshot_id): snapshot = self._snapshots[snapshot_id] return collections.Counter( branch.target_type.value if branch else None for branch in snapshot.branches.values() ) def snapshot_get_branches( self, snapshot_id, branches_from=b"", branches_count=1000, target_types=None ): snapshot = self._snapshots.get(snapshot_id) if snapshot is None: return None sorted_branch_names = sorted(snapshot.branches) from_index = bisect.bisect_left(sorted_branch_names, branches_from) if target_types: next_branch = None branches = {} for branch_name in sorted_branch_names[from_index:]: branch = snapshot.branches[branch_name] if branch and branch.target_type.value in target_types: if len(branches) < branches_count: branches[branch_name] = branch else: next_branch = branch_name break else: # As there is no 'target_types', we can do that much faster to_index = from_index + branches_count returned_branch_names = sorted_branch_names[from_index:to_index] branches = { branch_name: snapshot.branches[branch_name] for branch_name in returned_branch_names } if to_index >= len(sorted_branch_names): next_branch = None else: next_branch = sorted_branch_names[to_index] branches = { name: branch.to_dict() if branch else None for (name, branch) in branches.items() } return { "id": snapshot_id, "branches": branches, "next_branch": next_branch, } def snapshot_get_random(self): return random.choice(list(self._snapshots)) def object_find_by_sha1_git(self, ids): ret = {} for id_ in ids: objs = self._objects.get(id_, []) ret[id_] = [{"sha1_git": id_, "type": obj[0],} for obj in objs] return ret def _convert_origin(self, t): if t is None: return None return t.to_dict() def origin_get(self, origins): if isinstance(origins, dict): # Old API return_single = True origins = [origins] else: return_single = False # Sanity check to be error-compatible with the pgsql backend if any("id" in origin for origin in origins) and not all( "id" in origin for origin in origins ): raise StorageArgumentException( 'Either all origins or none at all should have an "id".' ) if any("url" in origin for origin in origins) and not all( "url" in origin for origin in origins ): raise StorageArgumentException( "Either all origins or none at all should have " 'an "url" key.' ) results = [] for origin in origins: result = None if "url" in origin: if origin["url"] in self._origins: result = self._origins[origin["url"]] else: raise StorageArgumentException("Origin must have an url.") results.append(self._convert_origin(result)) if return_single: assert len(results) == 1 return results[0] else: return results def origin_get_by_sha1(self, sha1s): return [self._convert_origin(self._origins_by_sha1.get(sha1)) for sha1 in sha1s] def origin_get_range(self, origin_from=1, origin_count=100): origin_from = max(origin_from, 1) if origin_from <= len(self._origins_by_id): max_idx = origin_from + origin_count - 1 if max_idx > len(self._origins_by_id): max_idx = len(self._origins_by_id) for idx in range(origin_from - 1, max_idx): origin = self._convert_origin(self._origins[self._origins_by_id[idx]]) yield {"id": idx + 1, **origin} def origin_list(self, page_token: Optional[str] = None, limit: int = 100) -> dict: origin_urls = sorted(self._origins) if page_token: from_ = bisect.bisect_left(origin_urls, page_token) else: from_ = 0 result = { "origins": [ {"url": origin_url} for origin_url in origin_urls[from_ : from_ + limit] ] } if from_ + limit < len(origin_urls): result["next_page_token"] = origin_urls[from_ + limit] return result def origin_search( self, url_pattern, offset=0, limit=50, regexp=False, with_visit=False ): origins = map(self._convert_origin, self._origins.values()) if regexp: pat = re.compile(url_pattern) origins = [orig for orig in origins if pat.search(orig["url"])] else: origins = [orig for orig in origins if url_pattern in orig["url"]] if with_visit: filtered_origins = [] for orig in origins: visits = ( self._origin_visit_get_updated(ov.origin, ov.visit) for ov in self._origin_visits[orig["url"]] ) for ov in visits: if ov.snapshot and ov.snapshot in self._snapshots: filtered_origins.append(orig) break else: filtered_origins = origins return filtered_origins[offset : offset + limit] def origin_count(self, url_pattern, regexp=False, with_visit=False): return len( self.origin_search( url_pattern, regexp=regexp, with_visit=with_visit, limit=len(self._origins), ) ) def origin_add(self, origins: Iterable[Origin]) -> Dict[str, int]: origins = list(origins) added = 0 for origin in origins: if origin.url not in self._origins: self.origin_add_one(origin) added += 1 return {"origin:add": added} @deprecated("Use origin_add([origin]) instead") def origin_add_one(self, origin: Origin) -> str: if origin.url not in self._origins: self.journal_writer.origin_add([origin]) # generate an origin_id because it is needed by origin_get_range. # TODO: remove this when we remove origin_get_range origin_id = len(self._origins) + 1 self._origins_by_id.append(origin.url) assert len(self._origins_by_id) == origin_id self._origins[origin.url] = origin self._origins_by_sha1[origin_url_to_sha1(origin.url)] = origin self._origin_visits[origin.url] = [] self._objects[origin.url].append(("origin", origin.url)) return origin.url def origin_visit_add(self, visits: Iterable[OriginVisit]) -> Iterable[OriginVisit]: for visit in visits: origin = self.origin_get({"url": visit.origin}) if not origin: # Cannot add a visit without an origin raise StorageArgumentException("Unknown origin %s", visit.origin) all_visits = [] for visit in visits: origin_url = visit.origin if origin_url in self._origins: origin = self._origins[origin_url] if visit.visit: self.journal_writer.origin_visit_add([visit]) while len(self._origin_visits[origin_url]) < visit.visit: self._origin_visits[origin_url].append(None) self._origin_visits[origin_url][visit.visit - 1] = visit else: # visit ids are in the range [1, +inf[ visit_id = len(self._origin_visits[origin_url]) + 1 visit = attr.evolve(visit, visit=visit_id) self.journal_writer.origin_visit_add([visit]) self._origin_visits[origin_url].append(visit) visit_key = (origin_url, visit.visit) self._objects[visit_key].append(("origin_visit", None)) assert visit.visit is not None self._origin_visit_status_add_one( OriginVisitStatus( origin=visit.origin, visit=visit.visit, date=visit.date, status="created", snapshot=None, ) ) all_visits.append(visit) return all_visits def _origin_visit_status_add_one(self, visit_status: OriginVisitStatus) -> None: """Add an origin visit status without checks. If already present, do nothing. """ self.journal_writer.origin_visit_status_add([visit_status]) visit_key = (visit_status.origin, visit_status.visit) self._origin_visit_statuses.setdefault(visit_key, []) visit_statuses = self._origin_visit_statuses[visit_key] if visit_status not in visit_statuses: visit_statuses.append(visit_status) def origin_visit_status_add( self, visit_statuses: Iterable[OriginVisitStatus], ) -> None: # First round to check existence (fail early if any is ko) for visit_status in visit_statuses: origin_url = self.origin_get({"url": visit_status.origin}) if not origin_url: raise StorageArgumentException(f"Unknown origin {visit_status.origin}") for visit_status in visit_statuses: self._origin_visit_status_add_one(visit_status) def _origin_visit_get_updated(self, origin: str, visit_id: int) -> OriginVisit: """Merge origin visit and latest origin visit status """ assert visit_id >= 1 visit = self._origin_visits[origin][visit_id - 1] assert visit is not None visit_key = (origin, visit_id) visit_update = max(self._origin_visit_statuses[visit_key], key=lambda v: v.date) return OriginVisit.from_dict( { # default to the values in visit **visit.to_dict(), # override with the last update **visit_update.to_dict(), # but keep the date of the creation of the origin visit "date": visit.date, } ) def origin_visit_get( self, origin: str, last_visit: Optional[int] = None, limit: Optional[int] = None, order: str = "asc", ) -> Iterable[Dict[str, Any]]: order = order.lower() assert order in ["asc", "desc"] origin_url = self._get_origin_url(origin) if origin_url in self._origin_visits: visits = self._origin_visits[origin_url] visits = sorted(visits, key=lambda v: v.visit, reverse=(order == "desc")) if last_visit is not None: if order == "asc": visits = [v for v in visits if v.visit > last_visit] else: visits = [v for v in visits if v.visit < last_visit] if limit is not None: visits = visits[:limit] for visit in visits: if not visit: continue visit_id = visit.visit visit_update = self._origin_visit_get_updated(origin_url, visit_id) assert visit_update is not None yield visit_update.to_dict() def origin_visit_find_by_date( self, origin: str, visit_date: datetime.datetime ) -> Optional[Dict[str, Any]]: origin_url = self._get_origin_url(origin) if origin_url in self._origin_visits: visits = self._origin_visits[origin_url] visit = min(visits, key=lambda v: (abs(v.date - visit_date), -v.visit)) visit_update = self._origin_visit_get_updated(origin, visit.visit) assert visit_update is not None return visit_update.to_dict() return None def origin_visit_get_by(self, origin: str, visit: int) -> Optional[Dict[str, Any]]: origin_url = self._get_origin_url(origin) if origin_url in self._origin_visits and visit <= len( self._origin_visits[origin_url] ): visit_update = self._origin_visit_get_updated(origin_url, visit) assert visit_update is not None return visit_update.to_dict() return None def origin_visit_get_latest( self, origin: str, type: Optional[str] = None, allowed_statuses: Optional[List[str]] = None, require_snapshot: bool = False, ) -> Optional[Dict[str, Any]]: ori = self._origins.get(origin) if not ori: return None visits = self._origin_visits[ori.url] visits = [ self._origin_visit_get_updated(visit.origin, visit.visit) for visit in visits if visit is not None ] if type is not None: visits = [visit for visit in visits if visit.type == type] if allowed_statuses is not None: visits = [visit for visit in visits if visit.status in allowed_statuses] if require_snapshot: visits = [visit for visit in visits if visit.snapshot] visit = max(visits, key=lambda v: (v.date, v.visit), default=None) if visit is None: return None return visit.to_dict() def origin_visit_status_get_latest( self, origin_url: str, visit: int, allowed_statuses: Optional[List[str]] = None, require_snapshot: bool = False, ) -> Optional[OriginVisitStatus]: ori = self._origins.get(origin_url) if not ori: return None visit_key = (origin_url, visit) visits = self._origin_visit_statuses.get(visit_key) if not visits: return None if allowed_statuses is not None: visits = [visit for visit in visits if visit.status in allowed_statuses] if require_snapshot: visits = [visit for visit in visits if visit.snapshot] visit_status = max(visits, key=lambda v: (v.date, v.visit), default=None) return visit_status def _select_random_origin_visit_by_type(self, type: str) -> str: while True: url = random.choice(list(self._origin_visits.keys())) random_origin_visits = self._origin_visits[url] if random_origin_visits[0].type == type: return url def origin_visit_get_random(self, type: str) -> Optional[Dict[str, Any]]: url = self._select_random_origin_visit_by_type(type) random_origin_visits = copy.deepcopy(self._origin_visits[url]) random_origin_visits.reverse() back_in_the_day = now() - timedelta(weeks=12) # 3 months back # This should be enough for tests for visit in random_origin_visits: updated_visit = self._origin_visit_get_updated(url, visit.visit) assert updated_visit is not None if updated_visit.date > back_in_the_day and updated_visit.status == "full": return updated_visit.to_dict() else: return None def stat_counters(self): keys = ( "content", "directory", "origin", "origin_visit", "person", "release", "revision", "skipped_content", "snapshot", ) stats = {key: 0 for key in keys} stats.update( collections.Counter( obj_type for (obj_type, obj_id) in itertools.chain(*self._objects.values()) ) ) return stats def refresh_stat_counters(self): pass def content_metadata_add( self, id: str, + context: Dict[str, Union[str, bytes, int]], discovery_date: datetime.datetime, authority: Dict[str, Any], fetcher: Dict[str, Any], format: str, metadata: bytes, ) -> None: self._object_metadata_add( - "content", id, discovery_date, authority, fetcher, format, metadata, + "content", + id, + discovery_date, + authority, + fetcher, + format, + metadata, + context, ) def origin_metadata_add( self, origin_url: str, discovery_date: datetime.datetime, authority: Dict[str, Any], fetcher: Dict[str, Any], format: str, metadata: bytes, ) -> None: if not isinstance(origin_url, str): raise StorageArgumentException( "origin_url must be str, not %r" % (origin_url,) ) + + context: Dict[str, Union[str, bytes, int]] = {} # origins have no context + self._object_metadata_add( - "origin", origin_url, discovery_date, authority, fetcher, format, metadata, + "origin", + origin_url, + discovery_date, + authority, + fetcher, + format, + metadata, + context, ) def _object_metadata_add( self, object_type: str, id: str, discovery_date: datetime.datetime, authority: Dict[str, Any], fetcher: Dict[str, Any], format: str, metadata: bytes, + context: Dict[str, Union[str, bytes, int]], ) -> None: + check_extrinsic_metadata_context(object_type, context) if not isinstance(metadata, bytes): raise StorageArgumentException( "metadata must be bytes, not %r" % (metadata,) ) authority_key = self._metadata_authority_key(authority) if authority_key not in self._metadata_authorities: raise StorageArgumentException(f"Unknown authority {authority}") fetcher_key = self._metadata_fetcher_key(fetcher) if fetcher_key not in self._metadata_fetchers: raise StorageArgumentException(f"Unknown fetcher {fetcher}") object_metadata_list = self._object_metadata[id][authority_key] object_metadata: Dict[str, Any] = { "id": id, "discovery_date": discovery_date, "authority": authority_key, "fetcher": fetcher_key, "format": format, "metadata": metadata, } + if CONTEXT_KEYS[object_type]: + object_metadata["context"] = context + for existing_object_metadata in object_metadata_list: if ( existing_object_metadata["fetcher"] == fetcher_key and existing_object_metadata["discovery_date"] == discovery_date ): # Duplicate of an existing one; replace it. existing_object_metadata.update(object_metadata) break else: object_metadata_list.add(object_metadata) def origin_metadata_get( self, origin_url: str, authority: Dict[str, str], after: Optional[datetime.datetime] = None, page_token: Optional[bytes] = None, limit: int = 1000, ) -> Dict[str, Any]: if not isinstance(origin_url, str): raise TypeError("origin_url must be str, not %r" % (origin_url,)) res = self._object_metadata_get( "origin", origin_url, authority, after, page_token, limit ) res["results"] = copy.deepcopy(res["results"]) for result in res["results"]: result["origin_url"] = result.pop("id") return res def _object_metadata_get( self, object_type: str, id: str, authority: Dict[str, str], after: Optional[datetime.datetime] = None, page_token: Optional[bytes] = None, limit: int = 1000, ) -> Dict[str, Any]: authority_key = self._metadata_authority_key(authority) if page_token is not None: (after_time, after_fetcher) = msgpack_loads(page_token) after_fetcher = tuple(after_fetcher) if after is not None and after > after_time: raise StorageArgumentException( "page_token is inconsistent with the value of 'after'." ) entries = self._object_metadata[id][authority_key].iter_after( (after_time, after_fetcher) ) elif after is not None: entries = self._object_metadata[id][authority_key].iter_from((after,)) entries = (entry for entry in entries if entry["discovery_date"] > after) else: entries = iter(self._object_metadata[id][authority_key]) if limit: entries = itertools.islice(entries, 0, limit + 1) results = [] for entry in entries: authority = self._metadata_authorities[entry["authority"]] fetcher = self._metadata_fetchers[entry["fetcher"]] if after: assert entry["discovery_date"] > after results.append( { **entry, "authority": {"type": authority["type"], "url": authority["url"],}, "fetcher": { "name": fetcher["name"], "version": fetcher["version"], }, } ) if len(results) > limit: results.pop() assert len(results) == limit last_result = results[-1] next_page_token: Optional[bytes] = msgpack_dumps( ( last_result["discovery_date"], self._metadata_fetcher_key(last_result["fetcher"]), ) ) else: next_page_token = None return { "next_page_token": next_page_token, "results": results, } def metadata_fetcher_add( self, name: str, version: str, metadata: Dict[str, Any] ) -> None: fetcher = { "name": name, "version": version, "metadata": metadata, } key = self._metadata_fetcher_key(fetcher) if key not in self._metadata_fetchers: self._metadata_fetchers[key] = fetcher def metadata_fetcher_get(self, name: str, version: str) -> Optional[Dict[str, Any]]: return self._metadata_fetchers.get( self._metadata_fetcher_key({"name": name, "version": version}) ) def metadata_authority_add( self, type: str, url: str, metadata: Dict[str, Any] ) -> None: authority = { "type": type, "url": url, "metadata": metadata, } key = self._metadata_authority_key(authority) self._metadata_authorities[key] = authority def metadata_authority_get(self, type: str, url: str) -> Optional[Dict[str, Any]]: return self._metadata_authorities.get( self._metadata_authority_key({"type": type, "url": url}) ) def _get_origin_url(self, origin): if isinstance(origin, str): return origin else: raise TypeError("origin must be a string.") def _person_add(self, person): key = ("person", person.fullname) if key not in self._objects: self._persons[person.fullname] = person self._objects[key].append(key) return self._persons[person.fullname] @staticmethod def _content_key(content): """ A stable key and the algorithm for a content""" if isinstance(content, BaseContent): content = content.to_dict() return tuple((key, content.get(key)) for key in sorted(DEFAULT_ALGORITHMS)) @staticmethod def _metadata_fetcher_key(fetcher: Dict) -> FetcherKey: return (fetcher["name"], fetcher["version"]) @staticmethod def _metadata_authority_key(authority: Dict) -> Hashable: return (authority["type"], authority["url"]) def diff_directories(self, from_dir, to_dir, track_renaming=False): raise NotImplementedError("InMemoryStorage.diff_directories") def diff_revisions(self, from_rev, to_rev, track_renaming=False): raise NotImplementedError("InMemoryStorage.diff_revisions") def diff_revision(self, revision, track_renaming=False): raise NotImplementedError("InMemoryStorage.diff_revision") def clear_buffers(self, object_types: Optional[Iterable[str]] = None) -> None: """Do nothing """ return None def flush(self, object_types: Optional[Iterable[str]] = None) -> Dict: return {} diff --git a/swh/storage/sql/30-swh-schema.sql b/swh/storage/sql/30-swh-schema.sql index 28bae81a..02b3b1ab 100644 --- a/swh/storage/sql/30-swh-schema.sql +++ b/swh/storage/sql/30-swh-schema.sql @@ -1,488 +1,497 @@ --- --- SQL implementation of the Software Heritage data model --- -- schema versions create table dbversion ( version int primary key, release timestamptz, description text ); comment on table dbversion is 'Details of current db version'; comment on column dbversion.version is 'SQL schema version'; comment on column dbversion.release is 'Version deployment timestamp'; comment on column dbversion.description is 'Release description'; -- latest schema version insert into dbversion(version, release, description) values(157, now(), 'Work In Progress'); -- a SHA1 checksum create domain sha1 as bytea check (length(value) = 20); -- a Git object ID, i.e., a Git-style salted SHA1 checksum create domain sha1_git as bytea check (length(value) = 20); -- a SHA256 checksum create domain sha256 as bytea check (length(value) = 32); -- a blake2 checksum create domain blake2s256 as bytea check (length(value) = 32); -- UNIX path (absolute, relative, individual path component, etc.) create domain unix_path as bytea; -- a set of UNIX-like access permissions, as manipulated by, e.g., chmod create domain file_perms as int; -- an SWHID create domain swhid as text check (value ~ '^swh:[0-9]+:.*'); -- Checksums about actual file content. Note that the content itself is not -- stored in the DB, but on external (key-value) storage. A single checksum is -- used as key there, but the other can be used to verify that we do not inject -- content collisions not knowingly. create table content ( sha1 sha1 not null, sha1_git sha1_git not null, sha256 sha256 not null, blake2s256 blake2s256 not null, length bigint not null, ctime timestamptz not null default now(), -- creation time, i.e. time of (first) injection into the storage status content_status not null default 'visible', object_id bigserial ); comment on table content is 'Checksums of file content which is actually stored externally'; comment on column content.sha1 is 'Content sha1 hash'; comment on column content.sha1_git is 'Git object sha1 hash'; comment on column content.sha256 is 'Content Sha256 hash'; comment on column content.blake2s256 is 'Content blake2s hash'; comment on column content.length is 'Content length'; comment on column content.ctime is 'First seen time'; comment on column content.status is 'Content status (absent, visible, hidden)'; comment on column content.object_id is 'Content identifier'; -- An origin is a place, identified by an URL, where software source code -- artifacts can be found. We support different kinds of origins, e.g., git and -- other VCS repositories, web pages that list tarballs URLs (e.g., -- http://www.kernel.org), indirect tarball URLs (e.g., -- http://www.example.org/latest.tar.gz), etc. The key feature of an origin is -- that it can be *fetched* from (wget, git clone, svn checkout, etc.) to -- retrieve all the contained software. create table origin ( id bigserial not null, url text not null ); comment on column origin.id is 'Artifact origin id'; comment on column origin.url is 'URL of origin'; -- Content blobs observed somewhere, but not ingested into the archive for -- whatever reason. This table is separate from the content table as we might -- not have the sha1 checksum of skipped contents (for instance when we inject -- git repositories, objects that are too big will be skipped here, and we will -- only know their sha1_git). 'reason' contains the reason the content was -- skipped. origin is a nullable column allowing to find out which origin -- contains that skipped content. create table skipped_content ( sha1 sha1, sha1_git sha1_git, sha256 sha256, blake2s256 blake2s256, length bigint not null, ctime timestamptz not null default now(), status content_status not null default 'absent', reason text not null, origin bigint, object_id bigserial ); comment on table skipped_content is 'Content blobs observed, but not ingested in the archive'; comment on column skipped_content.sha1 is 'Skipped content sha1 hash'; comment on column skipped_content.sha1_git is 'Git object sha1 hash'; comment on column skipped_content.sha256 is 'Skipped content sha256 hash'; comment on column skipped_content.blake2s256 is 'Skipped content blake2s hash'; comment on column skipped_content.length is 'Skipped content length'; comment on column skipped_content.ctime is 'First seen time'; comment on column skipped_content.status is 'Skipped content status (absent, visible, hidden)'; comment on column skipped_content.reason is 'Reason for skipping'; comment on column skipped_content.origin is 'Origin table identifier'; comment on column skipped_content.object_id is 'Skipped content identifier'; -- A file-system directory. A directory is a list of directory entries (see -- tables: directory_entry_{dir,file}). -- -- To list the contents of a directory: -- 1. list the contained directory_entry_dir using array dir_entries -- 2. list the contained directory_entry_file using array file_entries -- 3. list the contained directory_entry_rev using array rev_entries -- 4. UNION -- -- Synonyms/mappings: -- * git: tree create table directory ( id sha1_git not null, dir_entries bigint[], -- sub-directories, reference directory_entry_dir file_entries bigint[], -- contained files, reference directory_entry_file rev_entries bigint[], -- mounted revisions, reference directory_entry_rev object_id bigserial -- short object identifier ); comment on table directory is 'Contents of a directory, synonymous to tree (git)'; comment on column directory.id is 'Git object sha1 hash'; comment on column directory.dir_entries is 'Sub-directories, reference directory_entry_dir'; comment on column directory.file_entries is 'Contained files, reference directory_entry_file'; comment on column directory.rev_entries is 'Mounted revisions, reference directory_entry_rev'; comment on column directory.object_id is 'Short object identifier'; -- A directory entry pointing to a (sub-)directory. create table directory_entry_dir ( id bigserial, target sha1_git not null, -- id of target directory name unix_path not null, -- path name, relative to containing dir perms file_perms not null -- unix-like permissions ); comment on table directory_entry_dir is 'Directory entry for directory'; comment on column directory_entry_dir.id is 'Directory identifier'; comment on column directory_entry_dir.target is 'Target directory identifier'; comment on column directory_entry_dir.name is 'Path name, relative to containing directory'; comment on column directory_entry_dir.perms is 'Unix-like permissions'; -- A directory entry pointing to a file content. create table directory_entry_file ( id bigserial, target sha1_git not null, -- id of target file name unix_path not null, -- path name, relative to containing dir perms file_perms not null -- unix-like permissions ); comment on table directory_entry_file is 'Directory entry for file'; comment on column directory_entry_file.id is 'File identifier'; comment on column directory_entry_file.target is 'Target file identifier'; comment on column directory_entry_file.name is 'Path name, relative to containing directory'; comment on column directory_entry_file.perms is 'Unix-like permissions'; -- A directory entry pointing to a revision. create table directory_entry_rev ( id bigserial, target sha1_git not null, -- id of target revision name unix_path not null, -- path name, relative to containing dir perms file_perms not null -- unix-like permissions ); comment on table directory_entry_rev is 'Directory entry for revision'; comment on column directory_entry_dir.id is 'Revision identifier'; comment on column directory_entry_dir.target is 'Target revision in identifier'; comment on column directory_entry_dir.name is 'Path name, relative to containing directory'; comment on column directory_entry_dir.perms is 'Unix-like permissions'; -- A person referenced by some source code artifacts, e.g., a VCS revision or -- release metadata. create table person ( id bigserial, name bytea, -- advisory: not null if we managed to parse a name email bytea, -- advisory: not null if we managed to parse an email fullname bytea not null -- freeform specification; what is actually used in the checksums -- will usually be of the form 'name ' ); comment on table person is 'Person referenced in code artifact release metadata'; comment on column person.id is 'Person identifier'; comment on column person.name is 'Name'; comment on column person.email is 'Email'; comment on column person.fullname is 'Full name (raw name)'; -- The state of a source code tree at a specific point in time. -- -- Synonyms/mappings: -- * git / subversion / etc: commit -- * tarball: a specific tarball -- -- Revisions are organized as DAGs. Each revision points to 0, 1, or more (in -- case of merges) parent revisions. Each revision points to a directory, i.e., -- a file-system tree containing files and directories. create table revision ( id sha1_git not null, date timestamptz, date_offset smallint, committer_date timestamptz, committer_date_offset smallint, type revision_type not null, directory sha1_git, -- source code 'root' directory message bytea, author bigint, committer bigint, synthetic boolean not null default false, -- true iff revision has been created by Software Heritage metadata jsonb, -- extra metadata (tarball checksums, extra commit information, etc...) object_id bigserial, date_neg_utc_offset boolean, committer_date_neg_utc_offset boolean ); comment on table revision is 'A revision represents the state of a source code tree at a specific point in time'; comment on column revision.id is 'Git-style SHA1 commit identifier'; comment on column revision.date is 'Author timestamp as UNIX epoch'; comment on column revision.date_offset is 'Author timestamp timezone, as minute offsets from UTC'; comment on column revision.date_neg_utc_offset is 'True indicates a -0 UTC offset on author timestamp'; comment on column revision.committer_date is 'Committer timestamp as UNIX epoch'; comment on column revision.committer_date_offset is 'Committer timestamp timezone, as minute offsets from UTC'; comment on column revision.committer_date_neg_utc_offset is 'True indicates a -0 UTC offset on committer timestamp'; comment on column revision.type is 'Type of revision'; comment on column revision.directory is 'Directory identifier'; comment on column revision.message is 'Commit message'; comment on column revision.author is 'Author identity'; comment on column revision.committer is 'Committer identity'; comment on column revision.synthetic is 'True iff revision has been synthesized by Software Heritage'; comment on column revision.metadata is 'Extra revision metadata'; comment on column revision.object_id is 'Non-intrinsic, sequential object identifier'; -- either this table or the sha1_git[] column on the revision table create table revision_history ( id sha1_git not null, parent_id sha1_git not null, parent_rank int not null default 0 -- parent position in merge commits, 0-based ); comment on table revision_history is 'Sequence of revision history with parent and position in history'; comment on column revision_history.id is 'Revision history git object sha1 checksum'; comment on column revision_history.parent_id is 'Parent revision git object identifier'; comment on column revision_history.parent_rank is 'Parent position in merge commits, 0-based'; -- Crawling history of software origins visited by Software Heritage. Each -- visit is a 3-way mapping between a software origin, a timestamp, and a -- snapshot object capturing the full-state of the origin at visit time. create table origin_visit ( origin bigint not null, visit bigint not null, date timestamptz not null, type text not null ); comment on column origin_visit.origin is 'Visited origin'; comment on column origin_visit.visit is 'Sequential visit number for the origin'; comment on column origin_visit.date is 'Visit timestamp'; comment on column origin_visit.type is 'Type of loader that did the visit (hg, git, ...)'; -- Crawling history of software origin visits by Software Heritage. Each -- visit see its history change through new origin visit status updates create table origin_visit_status ( origin bigint not null, visit bigint not null, date timestamptz not null, status origin_visit_state not null, metadata jsonb, snapshot sha1_git ); comment on column origin_visit_status.origin is 'Origin concerned by the visit update'; comment on column origin_visit_status.visit is 'Visit concerned by the visit update'; comment on column origin_visit_status.date is 'Visit update timestamp'; comment on column origin_visit_status.status is 'Visit status (ongoing, failed, full)'; comment on column origin_visit_status.metadata is 'Optional origin visit metadata'; comment on column origin_visit_status.snapshot is 'Optional, possibly partial, snapshot of the origin visit. It can be partial.'; -- A snapshot represents the entire state of a software origin as crawled by -- Software Heritage. This table is a simple mapping between (public) intrinsic -- snapshot identifiers and (private) numeric sequential identifiers. create table snapshot ( object_id bigserial not null, -- PK internal object identifier id sha1_git not null -- snapshot intrinsic identifier ); comment on table snapshot is 'State of a software origin as crawled by Software Heritage'; comment on column snapshot.object_id is 'Internal object identifier'; comment on column snapshot.id is 'Intrinsic snapshot identifier'; -- Each snapshot associate "branch" names to other objects in the Software -- Heritage Merkle DAG. This table describes branches as mappings between names -- and target typed objects. create table snapshot_branch ( object_id bigserial not null, -- PK internal object identifier name bytea not null, -- branch name, e.g., "master" or "feature/drag-n-drop" target bytea, -- target object identifier, e.g., a revision identifier target_type snapshot_target -- target object type, e.g., "revision" ); comment on table snapshot_branch is 'Associates branches with objects in Heritage Merkle DAG'; comment on column snapshot_branch.object_id is 'Internal object identifier'; comment on column snapshot_branch.name is 'Branch name'; comment on column snapshot_branch.target is 'Target object identifier'; comment on column snapshot_branch.target_type is 'Target object type'; -- Mapping between snapshots and their branches. create table snapshot_branches ( snapshot_id bigint not null, -- snapshot identifier, ref. snapshot.object_id branch_id bigint not null -- branch identifier, ref. snapshot_branch.object_id ); comment on table snapshot_branches is 'Mapping between snapshot and their branches'; comment on column snapshot_branches.snapshot_id is 'Snapshot identifier'; comment on column snapshot_branches.branch_id is 'Branch identifier'; -- A "memorable" point in time in the development history of a software -- project. -- -- Synonyms/mappings: -- * git: tag (of the annotated kind, otherwise they are just references) -- * tarball: the release version number create table release ( id sha1_git not null, target sha1_git, date timestamptz, date_offset smallint, name bytea, comment bytea, author bigint, synthetic boolean not null default false, -- true iff release has been created by Software Heritage object_id bigserial, target_type object_type not null, date_neg_utc_offset boolean ); comment on table release is 'Details of a software release, synonymous with a tag (git) or version number (tarball)'; comment on column release.id is 'Release git identifier'; comment on column release.target is 'Target git identifier'; comment on column release.date is 'Release timestamp'; comment on column release.date_offset is 'Timestamp offset from UTC'; comment on column release.name is 'Name'; comment on column release.comment is 'Comment'; comment on column release.author is 'Author'; comment on column release.synthetic is 'Indicates if created by Software Heritage'; comment on column release.object_id is 'Object identifier'; comment on column release.target_type is 'Object type (''content'', ''directory'', ''revision'', ''release'', ''snapshot'')'; comment on column release.date_neg_utc_offset is 'True indicates -0 UTC offset for release timestamp'; -- Tools create table metadata_fetcher ( id serial not null, name text not null, version text not null, metadata jsonb not null ); comment on table metadata_fetcher is 'Tools used to retrieve metadata'; comment on column metadata_fetcher.id is 'Internal identifier of the fetcher'; comment on column metadata_fetcher.name is 'Fetcher name'; comment on column metadata_fetcher.version is 'Fetcher version'; comment on column metadata_fetcher.metadata is 'Extra information about the fetcher'; create table metadata_authority ( id serial not null, type text not null, url text not null, metadata jsonb not null ); comment on table metadata_authority is 'Metadata authority information'; comment on column metadata_authority.id is 'Internal identifier of the authority'; comment on column metadata_authority.type is 'Type of authority (deposit/forge/registry)'; comment on column metadata_authority.url is 'Authority''s uri'; comment on column metadata_authority.metadata is 'Other metadata about authority'; -- Extrinsic metadata on a DAG objects and origins. create table object_metadata ( type text not null, id text not null, -- metadata source authority_id bigint not null, fetcher_id bigint not null, discovery_date timestamptz not null, -- metadata itself format text not null, - metadata bytea not null + metadata bytea not null, + + -- context + origin text, + visit bigint, + snapshot swhid, + release swhid, + revision swhid, + path bytea, + directory swhid ); comment on table object_metadata is 'keeps all metadata found concerning an object'; comment on column object_metadata.type is 'the type of object (content/directory/revision/release/snapshot/origin) the metadata is on'; comment on column object_metadata.id is 'the SWHID or origin URL for which the metadata was found'; comment on column object_metadata.discovery_date is 'the date of retrieval'; comment on column object_metadata.authority_id is 'the metadata provider: github, openhub, deposit, etc.'; comment on column object_metadata.fetcher_id is 'the tool used for extracting metadata: loaders, crawlers, etc.'; comment on column object_metadata.format is 'name of the format of metadata, used by readers to interpret it.'; comment on column object_metadata.metadata is 'original metadata in opaque format'; -- Keep a cache of object counts create table object_counts ( object_type text, -- table for which we're counting objects (PK) value bigint, -- count of objects in the table last_update timestamptz, -- last update for the object count in this table single_update boolean -- whether we update this table standalone (true) or through bucketed counts (false) ); comment on table object_counts is 'Cache of object counts'; comment on column object_counts.object_type is 'Object type (''content'', ''directory'', ''revision'', ''release'', ''snapshot'')'; comment on column object_counts.value is 'Count of objects in the table'; comment on column object_counts.last_update is 'Last update for object count'; comment on column object_counts.single_update is 'standalone (true) or bucketed counts (false)'; create table object_counts_bucketed ( line serial not null, -- PK object_type text not null, -- table for which we're counting objects identifier text not null, -- identifier across which we're bucketing objects bucket_start bytea, -- lower bound (inclusive) for the bucket bucket_end bytea, -- upper bound (exclusive) for the bucket value bigint, -- count of objects in the bucket last_update timestamptz -- last update for the object count in this bucket ); comment on table object_counts_bucketed is 'Bucketed count for objects ordered by type'; comment on column object_counts_bucketed.line is 'Auto incremented idenitfier value'; comment on column object_counts_bucketed.object_type is 'Object type (''content'', ''directory'', ''revision'', ''release'', ''snapshot'')'; comment on column object_counts_bucketed.identifier is 'Common identifier for bucketed objects'; comment on column object_counts_bucketed.bucket_start is 'Lower bound (inclusive) for the bucket'; comment on column object_counts_bucketed.bucket_end is 'Upper bound (exclusive) for the bucket'; comment on column object_counts_bucketed.value is 'Count of objects in the bucket'; comment on column object_counts_bucketed.last_update is 'Last update for the object count in this bucket'; diff --git a/swh/storage/storage.py b/swh/storage/storage.py index 31d47745..70967ab8 100644 --- a/swh/storage/storage.py +++ b/swh/storage/storage.py @@ -1,1376 +1,1399 @@ # Copyright (C) 2015-2020 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import contextlib import datetime import itertools from collections import defaultdict from contextlib import contextmanager from deprecated import deprecated -from typing import Any, Dict, Iterable, List, Optional +from typing import Any, Dict, Iterable, List, Optional, Union import attr import psycopg2 import psycopg2.pool import psycopg2.errors from swh.core.api.serializers import msgpack_loads, msgpack_dumps from swh.model.model import ( Content, Directory, Origin, OriginVisit, OriginVisitStatus, Revision, Release, SkippedContent, Snapshot, SHA1_SIZE, ) from swh.model.hashutil import DEFAULT_ALGORITHMS, hash_to_bytes, hash_to_hex from swh.storage.objstorage import ObjStorage from swh.storage.validate import VALIDATION_EXCEPTIONS from swh.storage.utils import now from . import converters +from .extrinsic_metadata import ( + check_extrinsic_metadata_context, + CONTEXT_KEYS, +) from .common import db_transaction_generator, db_transaction from .db import Db from .exc import StorageArgumentException, StorageDBError, HashCollision from .algos import diff from .metrics import timed, send_metric, process_metrics from .utils import get_partition_bounds_bytes, extract_collision_hash from .writer import JournalWriter # Max block size of contents to return BULK_BLOCK_CONTENT_LEN_MAX = 10000 EMPTY_SNAPSHOT_ID = hash_to_bytes("1a8893e6a86f444e8be8e7bda6cb34fb1735a00e") """Identifier for the empty snapshot""" VALIDATION_EXCEPTIONS = VALIDATION_EXCEPTIONS + [ psycopg2.errors.CheckViolation, psycopg2.errors.IntegrityError, psycopg2.errors.InvalidTextRepresentation, psycopg2.errors.NotNullViolation, psycopg2.errors.NumericValueOutOfRange, psycopg2.errors.UndefinedFunction, # (raised on wrong argument typs) ] """Exceptions raised by postgresql when validation of the arguments failed.""" @contextlib.contextmanager def convert_validation_exceptions(): """Catches postgresql errors related to invalid arguments, and re-raises a StorageArgumentException.""" try: yield except tuple(VALIDATION_EXCEPTIONS) as e: raise StorageArgumentException(str(e)) class Storage: """SWH storage proxy, encompassing DB and object storage """ def __init__( self, db, objstorage, min_pool_conns=1, max_pool_conns=10, journal_writer=None ): """ Args: db_conn: either a libpq connection string, or a psycopg2 connection obj_root: path to the root of the object storage """ try: if isinstance(db, psycopg2.extensions.connection): self._pool = None self._db = Db(db) else: self._pool = psycopg2.pool.ThreadedConnectionPool( min_pool_conns, max_pool_conns, db ) self._db = None except psycopg2.OperationalError as e: raise StorageDBError(e) self.journal_writer = JournalWriter(journal_writer) self.objstorage = ObjStorage(objstorage) def get_db(self): if self._db: return self._db else: return Db.from_pool(self._pool) def put_db(self, db): if db is not self._db: db.put_conn() @contextmanager def db(self): db = None try: db = self.get_db() yield db finally: if db: self.put_db(db) @timed @db_transaction() def check_config(self, *, check_write, db=None, cur=None): if not self.objstorage.check_config(check_write=check_write): return False # Check permissions on one of the tables if check_write: check = "INSERT" else: check = "SELECT" cur.execute("select has_table_privilege(current_user, 'content', %s)", (check,)) return cur.fetchone()[0] def _content_unique_key(self, hash, db): """Given a hash (tuple or dict), return a unique key from the aggregation of keys. """ keys = db.content_hash_keys if isinstance(hash, tuple): return hash return tuple([hash[k] for k in keys]) def _content_add_metadata(self, db, cur, content): """Add content to the postgresql database but not the object storage. """ # create temporary table for metadata injection db.mktemp("content", cur) db.copy_to( (c.to_dict() for c in content), "tmp_content", db.content_add_keys, cur ) # move metadata in place try: db.content_add_from_temp(cur) except psycopg2.IntegrityError as e: if e.diag.sqlstate == "23505" and e.diag.table_name == "content": message_detail = e.diag.message_detail if message_detail: hash_name, hash_id = extract_collision_hash(message_detail) collision_contents_hashes = [ c.hashes() for c in content if c.get_hash(hash_name) == hash_id ] else: constraint_to_hash_name = { "content_pkey": "sha1", "content_sha1_git_idx": "sha1_git", "content_sha256_idx": "sha256", } hash_name = constraint_to_hash_name.get(e.diag.constraint_name) hash_id = None collision_contents_hashes = None raise HashCollision( hash_name, hash_id, collision_contents_hashes ) from None else: raise @timed @process_metrics def content_add(self, content: Iterable[Content]) -> Dict: ctime = now() contents = [attr.evolve(c, ctime=ctime) for c in content] objstorage_summary = self.objstorage.content_add(contents) with self.db() as db: with db.transaction() as cur: missing = list( self.content_missing( map(Content.to_dict, contents), key_hash="sha1_git", db=db, cur=cur, ) ) contents = [c for c in contents if c.sha1_git in missing] self.journal_writer.content_add(contents) self._content_add_metadata(db, cur, contents) return { "content:add": len(contents), "content:add:bytes": objstorage_summary["content:add:bytes"], } @timed @db_transaction() def content_update(self, content, keys=[], db=None, cur=None): # TODO: Add a check on input keys. How to properly implement # this? We don't know yet the new columns. self.journal_writer.content_update(content) db.mktemp("content", cur) select_keys = list(set(db.content_get_metadata_keys).union(set(keys))) with convert_validation_exceptions(): db.copy_to(content, "tmp_content", select_keys, cur) db.content_update_from_temp(keys_to_update=keys, cur=cur) @timed @process_metrics @db_transaction() def content_add_metadata( self, content: Iterable[Content], db=None, cur=None ) -> Dict: contents = list(content) missing = self.content_missing( (c.to_dict() for c in contents), key_hash="sha1_git", db=db, cur=cur, ) contents = [c for c in contents if c.sha1_git in missing] self.journal_writer.content_add_metadata(contents) self._content_add_metadata(db, cur, contents) return { "content:add": len(contents), } @timed def content_get(self, content): # FIXME: Make this method support slicing the `data`. if len(content) > BULK_BLOCK_CONTENT_LEN_MAX: raise StorageArgumentException( "Send at maximum %s contents." % BULK_BLOCK_CONTENT_LEN_MAX ) yield from self.objstorage.content_get(content) @timed @db_transaction() def content_get_range(self, start, end, limit=1000, db=None, cur=None): if limit is None: raise StorageArgumentException("limit should not be None") contents = [] next_content = None for counter, content_row in enumerate( db.content_get_range(start, end, limit + 1, cur) ): content = dict(zip(db.content_get_metadata_keys, content_row)) if counter >= limit: # take the last commit for the next page starting from this next_content = content["sha1"] break contents.append(content) return { "contents": contents, "next": next_content, } @timed def content_get_partition( self, partition_id: int, nb_partitions: int, limit: int = 1000, page_token: str = None, ): if limit is None: raise StorageArgumentException("limit should not be None") (start, end) = get_partition_bounds_bytes( partition_id, nb_partitions, SHA1_SIZE ) if page_token: start = hash_to_bytes(page_token) if end is None: end = b"\xff" * SHA1_SIZE result = self.content_get_range(start, end, limit) result2 = { "contents": result["contents"], "next_page_token": None, } if result["next"]: result2["next_page_token"] = hash_to_hex(result["next"]) return result2 @timed @db_transaction(statement_timeout=500) def content_get_metadata( self, contents: List[bytes], db=None, cur=None ) -> Dict[bytes, List[Dict]]: result: Dict[bytes, List[Dict]] = {sha1: [] for sha1 in contents} for row in db.content_get_metadata_from_sha1s(contents, cur): content_meta = dict(zip(db.content_get_metadata_keys, row)) result[content_meta["sha1"]].append(content_meta) return result @timed @db_transaction_generator() def content_missing(self, content, key_hash="sha1", db=None, cur=None): keys = db.content_hash_keys if key_hash not in keys: raise StorageArgumentException("key_hash should be one of %s" % keys) key_hash_idx = keys.index(key_hash) if not content: return for obj in db.content_missing_from_list(content, cur): yield obj[key_hash_idx] @timed @db_transaction_generator() def content_missing_per_sha1(self, contents, db=None, cur=None): for obj in db.content_missing_per_sha1(contents, cur): yield obj[0] @timed @db_transaction_generator() def content_missing_per_sha1_git(self, contents, db=None, cur=None): for obj in db.content_missing_per_sha1_git(contents, cur): yield obj[0] @timed @db_transaction() def content_find(self, content, db=None, cur=None): if not set(content).intersection(DEFAULT_ALGORITHMS): raise StorageArgumentException( "content keys must contain at least one of: " "sha1, sha1_git, sha256, blake2s256" ) contents = db.content_find( sha1=content.get("sha1"), sha1_git=content.get("sha1_git"), sha256=content.get("sha256"), blake2s256=content.get("blake2s256"), cur=cur, ) return [dict(zip(db.content_find_cols, content)) for content in contents] @timed @db_transaction() def content_get_random(self, db=None, cur=None): return db.content_get_random(cur) @staticmethod def _skipped_content_normalize(d): d = d.copy() if d.get("status") is None: d["status"] = "absent" if d.get("length") is None: d["length"] = -1 return d @staticmethod def _skipped_content_validate(d): """Sanity checks on status / reason / length, that postgresql doesn't enforce.""" if d["status"] != "absent": raise StorageArgumentException( "Invalid content status: {}".format(d["status"]) ) if d.get("reason") is None: raise StorageArgumentException( "Must provide a reason if content is absent." ) if d["length"] < -1: raise StorageArgumentException("Content length must be positive or -1.") def _skipped_content_add_metadata(self, db, cur, content: Iterable[SkippedContent]): origin_ids = db.origin_id_get_by_url([cont.origin for cont in content], cur=cur) content = [ attr.evolve(c, origin=origin_id) for (c, origin_id) in zip(content, origin_ids) ] db.mktemp("skipped_content", cur) db.copy_to( [c.to_dict() for c in content], "tmp_skipped_content", db.skipped_content_keys, cur, ) # move metadata in place db.skipped_content_add_from_temp(cur) @timed @process_metrics @db_transaction() def skipped_content_add( self, content: Iterable[SkippedContent], db=None, cur=None ) -> Dict: ctime = now() content = [attr.evolve(c, ctime=ctime) for c in content] missing_contents = self.skipped_content_missing( (c.to_dict() for c in content), db=db, cur=cur, ) content = [ c for c in content if any( all( c.get_hash(algo) == missing_content.get(algo) for algo in DEFAULT_ALGORITHMS ) for missing_content in missing_contents ) ] self.journal_writer.skipped_content_add(content) self._skipped_content_add_metadata(db, cur, content) return { "skipped_content:add": len(content), } @timed @db_transaction_generator() def skipped_content_missing(self, contents, db=None, cur=None): contents = list(contents) for content in db.skipped_content_missing(contents, cur): yield dict(zip(db.content_hash_keys, content)) @timed @process_metrics @db_transaction() def directory_add( self, directories: Iterable[Directory], db=None, cur=None ) -> Dict: directories = list(directories) summary = {"directory:add": 0} dirs = set() dir_entries: Dict[str, defaultdict] = { "file": defaultdict(list), "dir": defaultdict(list), "rev": defaultdict(list), } for cur_dir in directories: dir_id = cur_dir.id dirs.add(dir_id) for src_entry in cur_dir.entries: entry = src_entry.to_dict() entry["dir_id"] = dir_id dir_entries[entry["type"]][dir_id].append(entry) dirs_missing = set(self.directory_missing(dirs, db=db, cur=cur)) if not dirs_missing: return summary self.journal_writer.directory_add( dir_ for dir_ in directories if dir_.id in dirs_missing ) # Copy directory ids dirs_missing_dict = ({"id": dir} for dir in dirs_missing) db.mktemp("directory", cur) db.copy_to(dirs_missing_dict, "tmp_directory", ["id"], cur) # Copy entries for entry_type, entry_list in dir_entries.items(): entries = itertools.chain.from_iterable( entries_for_dir for dir_id, entries_for_dir in entry_list.items() if dir_id in dirs_missing ) db.mktemp_dir_entry(entry_type) db.copy_to( entries, "tmp_directory_entry_%s" % entry_type, ["target", "name", "perms", "dir_id"], cur, ) # Do the final copy db.directory_add_from_temp(cur) summary["directory:add"] = len(dirs_missing) return summary @timed @db_transaction_generator() def directory_missing(self, directories, db=None, cur=None): for obj in db.directory_missing_from_list(directories, cur): yield obj[0] @timed @db_transaction_generator(statement_timeout=20000) def directory_ls(self, directory, recursive=False, db=None, cur=None): if recursive: res_gen = db.directory_walk(directory, cur=cur) else: res_gen = db.directory_walk_one(directory, cur=cur) for line in res_gen: yield dict(zip(db.directory_ls_cols, line)) @timed @db_transaction(statement_timeout=2000) def directory_entry_get_by_path(self, directory, paths, db=None, cur=None): res = db.directory_entry_get_by_path(directory, paths, cur) if res: return dict(zip(db.directory_ls_cols, res)) @timed @db_transaction() def directory_get_random(self, db=None, cur=None): return db.directory_get_random(cur) @timed @process_metrics @db_transaction() def revision_add(self, revisions: Iterable[Revision], db=None, cur=None) -> Dict: revisions = list(revisions) summary = {"revision:add": 0} revisions_missing = set( self.revision_missing( set(revision.id for revision in revisions), db=db, cur=cur ) ) if not revisions_missing: return summary db.mktemp_revision(cur) revisions_filtered = [ revision for revision in revisions if revision.id in revisions_missing ] self.journal_writer.revision_add(revisions_filtered) revisions_filtered = list(map(converters.revision_to_db, revisions_filtered)) parents_filtered: List[bytes] = [] with convert_validation_exceptions(): db.copy_to( revisions_filtered, "tmp_revision", db.revision_add_cols, cur, lambda rev: parents_filtered.extend(rev["parents"]), ) db.revision_add_from_temp(cur) db.copy_to( parents_filtered, "revision_history", ["id", "parent_id", "parent_rank"], cur, ) return {"revision:add": len(revisions_missing)} @timed @db_transaction_generator() def revision_missing(self, revisions, db=None, cur=None): if not revisions: return for obj in db.revision_missing_from_list(revisions, cur): yield obj[0] @timed @db_transaction_generator(statement_timeout=1000) def revision_get(self, revisions, db=None, cur=None): for line in db.revision_get_from_list(revisions, cur): data = converters.db_to_revision(dict(zip(db.revision_get_cols, line))) if not data["type"]: yield None continue yield data @timed @db_transaction_generator(statement_timeout=2000) def revision_log(self, revisions, limit=None, db=None, cur=None): for line in db.revision_log(revisions, limit, cur): data = converters.db_to_revision(dict(zip(db.revision_get_cols, line))) if not data["type"]: yield None continue yield data @timed @db_transaction_generator(statement_timeout=2000) def revision_shortlog(self, revisions, limit=None, db=None, cur=None): yield from db.revision_shortlog(revisions, limit, cur) @timed @db_transaction() def revision_get_random(self, db=None, cur=None): return db.revision_get_random(cur) @timed @process_metrics @db_transaction() def release_add(self, releases: Iterable[Release], db=None, cur=None) -> Dict: releases = list(releases) summary = {"release:add": 0} release_ids = set(release.id for release in releases) releases_missing = set(self.release_missing(release_ids, db=db, cur=cur)) if not releases_missing: return summary db.mktemp_release(cur) releases_filtered = [ release for release in releases if release.id in releases_missing ] self.journal_writer.release_add(releases_filtered) releases_filtered = list(map(converters.release_to_db, releases_filtered)) with convert_validation_exceptions(): db.copy_to(releases_filtered, "tmp_release", db.release_add_cols, cur) db.release_add_from_temp(cur) return {"release:add": len(releases_missing)} @timed @db_transaction_generator() def release_missing(self, releases, db=None, cur=None): if not releases: return for obj in db.release_missing_from_list(releases, cur): yield obj[0] @timed @db_transaction_generator(statement_timeout=500) def release_get(self, releases, db=None, cur=None): for release in db.release_get_from_list(releases, cur): data = converters.db_to_release(dict(zip(db.release_get_cols, release))) yield data if data["target_type"] else None @timed @db_transaction() def release_get_random(self, db=None, cur=None): return db.release_get_random(cur) @timed @process_metrics @db_transaction() def snapshot_add(self, snapshots: Iterable[Snapshot], db=None, cur=None) -> Dict: created_temp_table = False count = 0 for snapshot in snapshots: if not db.snapshot_exists(snapshot.id, cur): if not created_temp_table: db.mktemp_snapshot_branch(cur) created_temp_table = True with convert_validation_exceptions(): db.copy_to( ( { "name": name, "target": info.target if info else None, "target_type": ( info.target_type.value if info else None ), } for name, info in snapshot.branches.items() ), "tmp_snapshot_branch", ["name", "target", "target_type"], cur, ) self.journal_writer.snapshot_add([snapshot]) db.snapshot_add(snapshot.id, cur) count += 1 return {"snapshot:add": count} @timed @db_transaction_generator() def snapshot_missing(self, snapshots, db=None, cur=None): for obj in db.snapshot_missing_from_list(snapshots, cur): yield obj[0] @timed @db_transaction(statement_timeout=2000) def snapshot_get(self, snapshot_id, db=None, cur=None): return self.snapshot_get_branches(snapshot_id, db=db, cur=cur) @timed @db_transaction(statement_timeout=2000) def snapshot_get_by_origin_visit(self, origin, visit, db=None, cur=None): snapshot_id = db.snapshot_get_by_origin_visit(origin, visit, cur) if snapshot_id: return self.snapshot_get(snapshot_id, db=db, cur=cur) return None @timed @db_transaction(statement_timeout=2000) def snapshot_count_branches(self, snapshot_id, db=None, cur=None): return dict([bc for bc in db.snapshot_count_branches(snapshot_id, cur)]) @timed @db_transaction(statement_timeout=2000) def snapshot_get_branches( self, snapshot_id, branches_from=b"", branches_count=1000, target_types=None, db=None, cur=None, ): if snapshot_id == EMPTY_SNAPSHOT_ID: return { "id": snapshot_id, "branches": {}, "next_branch": None, } branches = {} next_branch = None fetched_branches = list( db.snapshot_get_by_id( snapshot_id, branches_from=branches_from, branches_count=branches_count + 1, target_types=target_types, cur=cur, ) ) for branch in fetched_branches[:branches_count]: branch = dict(zip(db.snapshot_get_cols, branch)) del branch["snapshot_id"] name = branch.pop("name") if branch == {"target": None, "target_type": None}: branch = None branches[name] = branch if len(fetched_branches) > branches_count: branch = dict(zip(db.snapshot_get_cols, fetched_branches[-1])) next_branch = branch["name"] if branches: return { "id": snapshot_id, "branches": branches, "next_branch": next_branch, } return None @timed @db_transaction() def snapshot_get_random(self, db=None, cur=None): return db.snapshot_get_random(cur) @timed @db_transaction() def origin_visit_add( self, visits: Iterable[OriginVisit], db=None, cur=None ) -> Iterable[OriginVisit]: for visit in visits: origin = self.origin_get({"url": visit.origin}, db=db, cur=cur) if not origin: # Cannot add a visit without an origin raise StorageArgumentException("Unknown origin %s", visit.origin) all_visits = [] nb_visits = 0 for visit in visits: nb_visits += 1 if not visit.visit: with convert_validation_exceptions(): visit_id = db.origin_visit_add( visit.origin, visit.date, visit.type, cur=cur ) visit = attr.evolve(visit, visit=visit_id) else: db.origin_visit_upsert(visit) assert visit.visit is not None all_visits.append(visit) # Forced to write after for the case when the visit has no id self.journal_writer.origin_visit_add([visit]) visit_status = OriginVisitStatus( origin=visit.origin, visit=visit.visit, date=visit.date, status="created", snapshot=None, ) self._origin_visit_status_add(visit_status, db=db, cur=cur) send_metric("origin_visit:add", count=nb_visits, method_name="origin_visit") return all_visits def _origin_visit_status_add( self, visit_status: OriginVisitStatus, db, cur ) -> None: """Add an origin visit status""" self.journal_writer.origin_visit_status_add([visit_status]) db.origin_visit_status_add(visit_status, cur=cur) send_metric( "origin_visit_status:add", count=1, method_name="origin_visit_status" ) @timed @db_transaction() def origin_visit_status_add( self, visit_statuses: Iterable[OriginVisitStatus], db=None, cur=None, ) -> None: # First round to check existence (fail early if any is ko) for visit_status in visit_statuses: origin_url = self.origin_get({"url": visit_status.origin}, db=db, cur=cur) if not origin_url: raise StorageArgumentException(f"Unknown origin {visit_status.origin}") for visit_status in visit_statuses: self._origin_visit_status_add(visit_status, db, cur) @timed @db_transaction() def origin_visit_status_get_latest( self, origin_url: str, visit: int, allowed_statuses: Optional[List[str]] = None, require_snapshot: bool = False, db=None, cur=None, ) -> Optional[OriginVisitStatus]: row = db.origin_visit_status_get_latest( origin_url, visit, allowed_statuses, require_snapshot, cur=cur ) if not row: return None return OriginVisitStatus.from_dict(row) def _origin_visit_get_updated( self, origin: str, visit_id: int, db, cur ) -> Optional[Dict[str, Any]]: """Retrieve origin visit and latest origin visit status and merge them into an origin visit. """ row_visit = db.origin_visit_get(origin, visit_id) if row_visit is None: return None visit = dict(zip(db.origin_visit_get_cols, row_visit)) return self._origin_visit_apply_update(visit, db=db, cur=cur) def _origin_visit_apply_update( self, visit: Dict[str, Any], db, cur=None ) -> Dict[str, Any]: """Retrieve the latest visit status information for the origin visit. Then merge it with the visit and return it. """ visit_status = db.origin_visit_status_get_latest( visit["origin"], visit["visit"], cur=cur ) return self._origin_visit_merge(visit, visit_status) def _origin_visit_merge( self, visit: Dict[str, Any], visit_status: Dict[str, Any] ) -> Dict[str, Any]: """Merge origin_visit and origin_visit_status together. """ return OriginVisit.from_dict( { # default to the values in visit **visit, # override with the last update **visit_status, # visit['origin'] is the URL (via a join), while # visit_status['origin'] is only an id. "origin": visit["origin"], # but keep the date of the creation of the origin visit "date": visit["date"], } ).to_dict() @timed @db_transaction_generator(statement_timeout=500) def origin_visit_get( self, origin: str, last_visit: Optional[int] = None, limit: Optional[int] = None, order: str = "asc", db=None, cur=None, ) -> Iterable[Dict[str, Any]]: assert order in ["asc", "desc"] lines = db.origin_visit_get_all( origin, last_visit=last_visit, limit=limit, order=order, cur=cur ) for line in lines: visit = dict(zip(db.origin_visit_get_cols, line)) yield self._origin_visit_apply_update(visit, db) @timed @db_transaction(statement_timeout=500) def origin_visit_find_by_date( self, origin: str, visit_date: datetime.datetime, db=None, cur=None ) -> Optional[Dict[str, Any]]: visit = db.origin_visit_find_by_date(origin, visit_date, cur=cur) if visit: return self._origin_visit_apply_update(visit, db) return None @timed @db_transaction(statement_timeout=500) def origin_visit_get_by( self, origin: str, visit: int, db=None, cur=None ) -> Optional[Dict[str, Any]]: row = db.origin_visit_get(origin, visit, cur) if row: visit_dict = dict(zip(db.origin_visit_get_cols, row)) return self._origin_visit_apply_update(visit_dict, db) return None @timed @db_transaction(statement_timeout=4000) def origin_visit_get_latest( self, origin: str, type: Optional[str] = None, allowed_statuses: Optional[List[str]] = None, require_snapshot: bool = False, db=None, cur=None, ) -> Optional[Dict[str, Any]]: row = db.origin_visit_get_latest( origin, type=type, allowed_statuses=allowed_statuses, require_snapshot=require_snapshot, cur=cur, ) if row: visit = dict(zip(db.origin_visit_get_cols, row)) return self._origin_visit_apply_update(visit, db) return None @timed @db_transaction() def origin_visit_get_random( self, type: str, db=None, cur=None ) -> Optional[Dict[str, Any]]: row = db.origin_visit_get_random(type, cur) if row: visit = dict(zip(db.origin_visit_get_cols, row)) return self._origin_visit_apply_update(visit, db) return None @timed @db_transaction(statement_timeout=2000) def object_find_by_sha1_git(self, ids, db=None, cur=None): ret = {id: [] for id in ids} for retval in db.object_find_by_sha1_git(ids, cur=cur): if retval[1]: ret[retval[0]].append( dict(zip(db.object_find_by_sha1_git_cols, retval)) ) return ret @timed @db_transaction(statement_timeout=500) def origin_get(self, origins, db=None, cur=None): if isinstance(origins, dict): # Old API return_single = True origins = [origins] elif len(origins) == 0: return [] else: return_single = False origin_urls = [origin["url"] for origin in origins] results = db.origin_get_by_url(origin_urls, cur) results = [dict(zip(db.origin_cols, result)) for result in results] if return_single: assert len(results) == 1 if results[0]["url"] is not None: return results[0] else: return None else: return [None if res["url"] is None else res for res in results] @timed @db_transaction_generator(statement_timeout=500) def origin_get_by_sha1(self, sha1s, db=None, cur=None): for line in db.origin_get_by_sha1(sha1s, cur): if line[0] is not None: yield dict(zip(db.origin_cols, line)) else: yield None @timed @db_transaction_generator() def origin_get_range(self, origin_from=1, origin_count=100, db=None, cur=None): for origin in db.origin_get_range(origin_from, origin_count, cur): yield dict(zip(db.origin_get_range_cols, origin)) @timed @db_transaction() def origin_list( self, page_token: Optional[str] = None, limit: int = 100, *, db=None, cur=None ) -> dict: page_token = page_token or "0" if not isinstance(page_token, str): raise StorageArgumentException("page_token must be a string.") origin_from = int(page_token) result: Dict[str, Any] = { "origins": [ dict(zip(db.origin_get_range_cols, origin)) for origin in db.origin_get_range(origin_from, limit, cur) ], } assert len(result["origins"]) <= limit if len(result["origins"]) == limit: result["next_page_token"] = str(result["origins"][limit - 1]["id"] + 1) for origin in result["origins"]: del origin["id"] return result @timed @db_transaction_generator() def origin_search( self, url_pattern, offset=0, limit=50, regexp=False, with_visit=False, db=None, cur=None, ): for origin in db.origin_search( url_pattern, offset, limit, regexp, with_visit, cur ): yield dict(zip(db.origin_cols, origin)) @timed @db_transaction() def origin_count( self, url_pattern, regexp=False, with_visit=False, db=None, cur=None ): return db.origin_count(url_pattern, regexp, with_visit, cur) @timed @db_transaction() def origin_add( self, origins: Iterable[Origin], db=None, cur=None ) -> Dict[str, int]: urls = [o.url for o in origins] known_origins = set(url for (url,) in db.origin_get_by_url(urls, cur)) # use lists here to keep origins sorted; some tests depend on this to_add = [url for url in urls if url not in known_origins] self.journal_writer.origin_add([Origin(url=url) for url in to_add]) added = 0 for url in to_add: if db.origin_add(url, cur): added += 1 return {"origin:add": added} @deprecated("Use origin_add([origin]) instead") @timed @db_transaction() def origin_add_one(self, origin: Origin, db=None, cur=None) -> str: stats = self.origin_add([origin]) if stats.get("origin:add", 0): send_metric("origin:add", count=1, method_name="origin_add_one") return origin.url @db_transaction(statement_timeout=500) def stat_counters(self, db=None, cur=None): return {k: v for (k, v) in db.stat_counters()} @db_transaction() def refresh_stat_counters(self, db=None, cur=None): keys = [ "content", "directory", "directory_entry_dir", "directory_entry_file", "directory_entry_rev", "origin", "origin_visit", "person", "release", "revision", "revision_history", "skipped_content", "snapshot", ] for key in keys: cur.execute("select * from swh_update_counter(%s)", (key,)) @timed @db_transaction() def origin_metadata_add( self, origin_url: str, discovery_date: datetime.datetime, authority: Dict[str, Any], fetcher: Dict[str, Any], format: str, metadata: bytes, db=None, cur=None, ) -> None: + context: Dict[str, Union[str, bytes, int]] = {} # origins have no context + self._object_metadata_add( "origin", origin_url, + context, discovery_date, authority, fetcher, format, metadata, db, cur, ) def _object_metadata_add( self, object_type: str, id: str, + context: Dict[str, Union[str, bytes, int]], discovery_date: datetime.datetime, authority: Dict[str, Any], fetcher: Dict[str, Any], format: str, metadata: bytes, db, cur, ) -> None: + check_extrinsic_metadata_context(object_type, context) + authority_id = self._get_authority_id(authority, db, cur) fetcher_id = self._get_fetcher_id(fetcher, db, cur) if not isinstance(metadata, bytes): raise StorageArgumentException( "metadata must be bytes, not %r" % (metadata,) ) db.object_metadata_add( object_type, id, + context, discovery_date, authority_id, fetcher_id, format, metadata, cur, ) send_metric( f"{object_type}_metadata:add", count=1, method_name=f"{object_type}_metadata_add", ) @timed @db_transaction(statement_timeout=500) def origin_metadata_get( self, origin_url: str, authority: Dict[str, str], after: Optional[datetime.datetime] = None, page_token: Optional[bytes] = None, limit: int = 1000, db=None, cur=None, ) -> Dict[str, Any]: result = self._object_metadata_get( "origin", origin_url, authority, after, page_token, limit, db, cur ) for res in result["results"]: res.pop("id") res["origin_url"] = origin_url return result def _object_metadata_get( self, object_type: str, id: str, authority: Dict[str, str], after: Optional[datetime.datetime], page_token: Optional[bytes], limit: int, db, cur, ) -> Dict[str, Any]: if page_token: (after_time, after_fetcher) = msgpack_loads(page_token) if after and after_time < after: raise StorageArgumentException( "page_token is inconsistent with the value of 'after'." ) else: after_time = after after_fetcher = None authority_id = db.metadata_authority_get_id( authority["type"], authority["url"], cur ) if not authority_id: return { "next_page_token": None, "results": [], } rows = db.object_metadata_get( object_type, id, authority_id, after_time, after_fetcher, limit + 1, cur ) rows = [dict(zip(db.object_metadata_get_cols, row)) for row in rows] results = [] for row in rows: row = row.copy() row.pop("metadata_fetcher.id") + context = {} + for key in CONTEXT_KEYS[object_type]: + value = row[key] + if value is not None: + context[key] = value + result = { + "id": row["id"], "authority": { "type": row.pop("metadata_authority.type"), "url": row.pop("metadata_authority.url"), }, "fetcher": { "name": row.pop("metadata_fetcher.name"), "version": row.pop("metadata_fetcher.version"), }, - **row, + "discovery_date": row["discovery_date"], + "format": row["format"], + "metadata": row["metadata"], } + if CONTEXT_KEYS[object_type]: + result["context"] = context + results.append(result) if len(results) > limit: results.pop() assert len(results) == limit last_returned_row = rows[-2] # rows[-1] corresponds to the popped result next_page_token: Optional[bytes] = msgpack_dumps( ( last_returned_row["discovery_date"], last_returned_row["metadata_fetcher.id"], ) ) else: next_page_token = None return { "next_page_token": next_page_token, "results": results, } @timed @db_transaction() def metadata_fetcher_add( self, name: str, version: str, metadata: Dict[str, Any], db=None, cur=None ) -> None: db.metadata_fetcher_add(name, version, metadata) send_metric("metadata_fetcher:add", count=1, method_name="metadata_fetcher") @timed @db_transaction(statement_timeout=500) def metadata_fetcher_get( self, name: str, version: str, db=None, cur=None ) -> Optional[Dict[str, Any]]: row = db.metadata_fetcher_get(name, version, cur=cur) if not row: return None return dict(zip(db.metadata_fetcher_cols, row)) @timed @db_transaction() def metadata_authority_add( self, type: str, url: str, metadata: Dict[str, Any], db=None, cur=None ) -> None: db.metadata_authority_add(type, url, metadata, cur) send_metric("metadata_authority:add", count=1, method_name="metadata_authority") @timed @db_transaction() def metadata_authority_get( self, type: str, url: str, db=None, cur=None ) -> Optional[Dict[str, Any]]: row = db.metadata_authority_get(type, url, cur=cur) if not row: return None return dict(zip(db.metadata_authority_cols, row)) @timed def diff_directories(self, from_dir, to_dir, track_renaming=False): return diff.diff_directories(self, from_dir, to_dir, track_renaming) @timed def diff_revisions(self, from_rev, to_rev, track_renaming=False): return diff.diff_revisions(self, from_rev, to_rev, track_renaming) @timed def diff_revision(self, revision, track_renaming=False): return diff.diff_revision(self, revision, track_renaming) def clear_buffers(self, object_types: Optional[Iterable[str]] = None) -> None: """Do nothing """ return None def flush(self, object_types: Optional[Iterable[str]] = None) -> Dict: return {} def _get_authority_id(self, authority: Dict[str, Any], db, cur): authority_id = db.metadata_authority_get_id( authority["type"], authority["url"], cur ) if not authority_id: raise StorageArgumentException(f"Unknown authority {authority}") return authority_id def _get_fetcher_id(self, fetcher: Dict[str, Any], db, cur): fetcher_id = db.metadata_fetcher_get_id( fetcher["name"], fetcher["version"], cur ) if not fetcher_id: raise StorageArgumentException(f"Unknown fetcher {fetcher}") return fetcher_id