diff --git a/requirements-swh.txt b/requirements-swh.txt index 336e8cf5..b9282ae0 100644 --- a/requirements-swh.txt +++ b/requirements-swh.txt @@ -1,4 +1,4 @@ -swh.core[db,http] >= 2 +swh.core[db,http] >= 2.9 swh.counters >= v0.8.0 swh.model >= 6.0.0 swh.objstorage >= 0.2.2 diff --git a/swh/storage/postgresql/db.py b/swh/storage/postgresql/db.py index 969fd8e9..9edf8b94 100644 --- a/swh/storage/postgresql/db.py +++ b/swh/storage/postgresql/db.py @@ -1,1570 +1,1543 @@ # Copyright (C) 2015-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import datetime import logging import random from typing import Any, Dict, Iterable, List, Optional, Tuple from swh.core.db import BaseDb from swh.core.db.db_utils import execute_values_generator from swh.core.db.db_utils import jsonize as _jsonize from swh.core.db.db_utils import stored_procedure from swh.model.hashutil import DEFAULT_ALGORITHMS from swh.model.model import SHA1_SIZE, OriginVisit, OriginVisitStatus, Sha1Git from swh.model.swhids import ObjectType from swh.storage.interface import ListOrder logger = logging.getLogger(__name__) def jsonize(d): return _jsonize(dict(d) if d is not None else None) class Db(BaseDb): """Proxy to the SWH DB, with wrappers around stored procedures""" - current_version = 182 - def mktemp_dir_entry(self, entry_type, cur=None): self._cursor(cur).execute( "SELECT swh_mktemp_dir_entry(%s)", (("directory_entry_%s" % entry_type),) ) @stored_procedure("swh_mktemp_revision") def mktemp_revision(self, cur=None): pass @stored_procedure("swh_mktemp_release") def mktemp_release(self, cur=None): pass @stored_procedure("swh_mktemp_snapshot_branch") def mktemp_snapshot_branch(self, cur=None): pass @stored_procedure("swh_content_add") def content_add_from_temp(self, cur=None): pass @stored_procedure("swh_directory_add") def directory_add_from_temp(self, cur=None): pass @stored_procedure("swh_skipped_content_add") def skipped_content_add_from_temp(self, cur=None): pass @stored_procedure("swh_revision_add") def revision_add_from_temp(self, cur=None): pass @stored_procedure("swh_extid_add") def extid_add_from_temp(self, cur=None): pass @stored_procedure("swh_release_add") def release_add_from_temp(self, cur=None): pass def content_update_from_temp(self, keys_to_update, cur=None): cur = self._cursor(cur) cur.execute( """select swh_content_update(ARRAY[%s] :: text[])""" % keys_to_update ) content_get_metadata_keys = [ "sha1", "sha1_git", "sha256", "blake2s256", "length", "status", ] content_add_keys = content_get_metadata_keys + ["ctime"] skipped_content_keys = [ "sha1", "sha1_git", "sha256", "blake2s256", "length", "reason", "status", "origin", ] def content_get_metadata_from_hashes( self, hashes: List[bytes], algo: str, cur=None ): cur = self._cursor(cur) assert algo in DEFAULT_ALGORITHMS query = f""" select {", ".join(self.content_get_metadata_keys)} from (values %s) as t (hash) inner join content on (content.{algo}=hash) """ yield from execute_values_generator( cur, query, ((hash_,) for hash_ in hashes), ) def content_get_range(self, start, end, limit=None, cur=None): """Retrieve contents within range [start, end].""" cur = self._cursor(cur) query = """select %s from content where %%s <= sha1 and sha1 <= %%s order by sha1 limit %%s""" % ", ".join( self.content_get_metadata_keys ) cur.execute(query, (start, end, limit)) yield from cur content_hash_keys = ["sha1", "sha1_git", "sha256", "blake2s256"] def content_missing_from_list(self, contents, cur=None): cur = self._cursor(cur) keys = ", ".join(self.content_hash_keys) equality = " AND ".join( ("t.%s = c.%s" % (key, key)) for key in self.content_hash_keys ) yield from execute_values_generator( cur, """ SELECT %s FROM (VALUES %%s) as t(%s) WHERE NOT EXISTS ( SELECT 1 FROM content c WHERE %s ) """ % (keys, keys, equality), (tuple(c[key] for key in self.content_hash_keys) for c in contents), ) def content_missing_per_sha1(self, sha1s, cur=None): cur = self._cursor(cur) yield from execute_values_generator( cur, """ SELECT t.sha1 FROM (VALUES %s) AS t(sha1) WHERE NOT EXISTS ( SELECT 1 FROM content c WHERE c.sha1 = t.sha1 )""", ((sha1,) for sha1 in sha1s), ) def content_missing_per_sha1_git(self, contents, cur=None): cur = self._cursor(cur) yield from execute_values_generator( cur, """ SELECT t.sha1_git FROM (VALUES %s) AS t(sha1_git) WHERE NOT EXISTS ( SELECT 1 FROM content c WHERE c.sha1_git = t.sha1_git )""", ((sha1,) for sha1 in contents), ) def skipped_content_missing(self, contents, cur=None): if not contents: return [] cur = self._cursor(cur) query = """SELECT * FROM (VALUES %s) AS t (%s) WHERE not exists (SELECT 1 FROM skipped_content s WHERE s.sha1 is not distinct from t.sha1::sha1 and s.sha1_git is not distinct from t.sha1_git::sha1 and s.sha256 is not distinct from t.sha256::bytea);""" % ( (", ".join("%s" for _ in contents)), ", ".join(self.content_hash_keys), ) cur.execute( query, [tuple(cont[key] for key in self.content_hash_keys) for cont in contents], ) yield from cur def snapshot_exists(self, snapshot_id, cur=None): """Check whether a snapshot with the given id exists""" cur = self._cursor(cur) cur.execute("""SELECT 1 FROM snapshot where id=%s""", (snapshot_id,)) return bool(cur.fetchone()) def snapshot_missing_from_list(self, snapshots, cur=None): cur = self._cursor(cur) yield from execute_values_generator( cur, """ SELECT id FROM (VALUES %s) as t(id) WHERE NOT EXISTS ( SELECT 1 FROM snapshot d WHERE d.id = t.id ) """, ((id,) for id in snapshots), ) def snapshot_add(self, snapshot_id, cur=None): """Add a snapshot from the temporary table""" cur = self._cursor(cur) cur.execute("""SELECT swh_snapshot_add(%s)""", (snapshot_id,)) snapshot_count_cols = ["target_type", "count"] def snapshot_count_branches( self, snapshot_id, branch_name_exclude_prefix=None, cur=None, ): cur = self._cursor(cur) query = """\ SELECT %s FROM swh_snapshot_count_branches(%%s, %%s) """ % ", ".join( self.snapshot_count_cols ) cur.execute(query, (snapshot_id, branch_name_exclude_prefix)) yield from cur snapshot_get_cols = ["snapshot_id", "name", "target", "target_type"] def snapshot_get_by_id( self, snapshot_id, branches_from=b"", branches_count=None, target_types=None, branch_name_include_substring=None, branch_name_exclude_prefix=None, cur=None, ): cur = self._cursor(cur) query = """\ SELECT %s FROM swh_snapshot_get_by_id(%%s, %%s, %%s, %%s :: snapshot_target[], %%s, %%s) """ % ", ".join( self.snapshot_get_cols ) cur.execute( query, ( snapshot_id, branches_from, branches_count, target_types, branch_name_include_substring, branch_name_exclude_prefix, ), ) yield from cur def snapshot_get_random(self, cur=None): return self._get_random_row_from_table("snapshot", ["id"], "id", cur) content_find_cols = [ "sha1", "sha1_git", "sha256", "blake2s256", "length", "ctime", "status", ] def content_find( self, sha1: Optional[bytes] = None, sha1_git: Optional[bytes] = None, sha256: Optional[bytes] = None, blake2s256: Optional[bytes] = None, cur=None, ): """Find the content optionally on a combination of the following checksums sha1, sha1_git, sha256 or blake2s256. Args: sha1: sha1 content git_sha1: the sha1 computed `a la git` sha1 of the content sha256: sha256 content blake2s256: blake2s256 content Returns: The tuple (sha1, sha1_git, sha256, blake2s256) if found or None. """ cur = self._cursor(cur) checksum_dict = { "sha1": sha1, "sha1_git": sha1_git, "sha256": sha256, "blake2s256": blake2s256, } query_parts = [f"SELECT {','.join(self.content_find_cols)} FROM content WHERE "] query_params = [] where_parts = [] # Adds only those keys which have values exist for algorithm in checksum_dict: if checksum_dict[algorithm] is not None: where_parts.append(f"{algorithm} = %s") query_params.append(checksum_dict[algorithm]) query_parts.append(" AND ".join(where_parts)) query = "\n".join(query_parts) cur.execute(query, query_params) content = cur.fetchall() return content def content_get_random(self, cur=None): return self._get_random_row_from_table("content", ["sha1_git"], "sha1_git", cur) def directory_missing_from_list(self, directories, cur=None): cur = self._cursor(cur) yield from execute_values_generator( cur, """ SELECT id FROM (VALUES %s) as t(id) WHERE NOT EXISTS ( SELECT 1 FROM directory d WHERE d.id = t.id ) """, ((id,) for id in directories), ) directory_ls_cols = [ "dir_id", "type", "target", "name", "perms", "status", "sha1", "sha1_git", "sha256", "length", ] def directory_walk_one(self, directory, cur=None): cur = self._cursor(cur) cols = ", ".join(self.directory_ls_cols) query = "SELECT %s FROM swh_directory_walk_one(%%s)" % cols cur.execute(query, (directory,)) yield from cur def directory_walk(self, directory, cur=None): cur = self._cursor(cur) cols = ", ".join(self.directory_ls_cols) query = "SELECT %s FROM swh_directory_walk(%%s)" % cols cur.execute(query, (directory,)) yield from cur def directory_entry_get_by_path(self, directory, paths, cur=None): """Retrieve a directory entry by path.""" cur = self._cursor(cur) cols = ", ".join(self.directory_ls_cols) query = "SELECT %s FROM swh_find_directory_entry_by_path(%%s, %%s)" % cols cur.execute(query, (directory, paths)) data = cur.fetchone() if set(data) == {None}: return None return data directory_get_entries_cols = ["type", "target", "name", "perms"] def directory_get_entries(self, directory: Sha1Git, cur=None) -> List[Tuple]: cur = self._cursor(cur) cur.execute( "SELECT * FROM swh_directory_get_entries(%s::sha1_git)", (directory,) ) return list(cur) def directory_get_raw_manifest( self, directory_ids: List[Sha1Git], cur=None ) -> Iterable[Tuple[Sha1Git, bytes]]: cur = self._cursor(cur) yield from execute_values_generator( cur, """ SELECT t.id, raw_manifest FROM (VALUES %s) as t(id) INNER JOIN directory ON (t.id=directory.id) """, ((id_,) for id_ in directory_ids), ) def directory_get_random(self, cur=None): return self._get_random_row_from_table("directory", ["id"], "id", cur) def revision_missing_from_list(self, revisions, cur=None): cur = self._cursor(cur) yield from execute_values_generator( cur, """ SELECT id FROM (VALUES %s) as t(id) WHERE NOT EXISTS ( SELECT 1 FROM revision r WHERE r.id = t.id ) """, ((id,) for id in revisions), ) revision_add_cols = [ "id", "date", "date_offset", "date_neg_utc_offset", "date_offset_bytes", "committer_date", "committer_date_offset", "committer_date_neg_utc_offset", "committer_date_offset_bytes", "type", "directory", "message", "author_fullname", "author_name", "author_email", "committer_fullname", "committer_name", "committer_email", "metadata", "synthetic", "extra_headers", "raw_manifest", ] revision_get_cols = revision_add_cols + ["parents"] def origin_visit_add(self, origin, ts, type, cur=None): """Add a new origin_visit for origin origin at timestamp ts. Args: origin: origin concerned by the visit ts: the date of the visit type: type of loader for the visit Returns: The new visit index step for that origin """ cur = self._cursor(cur) self._cursor(cur).execute( "SELECT swh_origin_visit_add(%s, %s, %s)", (origin, ts, type) ) return cur.fetchone()[0] origin_visit_status_cols = [ "origin", "visit", "date", "type", "status", "snapshot", "metadata", ] def origin_visit_status_add( self, visit_status: OriginVisitStatus, cur=None ) -> None: """Add new origin visit status""" assert self.origin_visit_status_cols[0] == "origin" assert self.origin_visit_status_cols[-1] == "metadata" cols = self.origin_visit_status_cols[1:-1] cur = self._cursor(cur) cur.execute( f"WITH origin_id as (select id from origin where url=%s) " f"INSERT INTO origin_visit_status " f"(origin, {', '.join(cols)}, metadata) " f"VALUES ((select id from origin_id), " f"{', '.join(['%s']*len(cols))}, %s) " f"ON CONFLICT (origin, visit, date) do nothing", [visit_status.origin] + [getattr(visit_status, key) for key in cols] + [jsonize(visit_status.metadata)], ) origin_visit_cols = ["origin", "visit", "date", "type"] def origin_visit_add_with_id(self, origin_visit: OriginVisit, cur=None) -> None: """Insert origin visit when id are already set""" ov = origin_visit assert ov.visit is not None cur = self._cursor(cur) query = """INSERT INTO origin_visit ({cols}) VALUES ((select id from origin where url=%s), {values}) ON CONFLICT (origin, visit) DO NOTHING""".format( cols=", ".join(self.origin_visit_cols), values=", ".join("%s" for col in self.origin_visit_cols[1:]), ) cur.execute(query, (ov.origin, ov.visit, ov.date, ov.type)) origin_visit_get_cols = [ "origin", "visit", "date", "type", "status", "metadata", "snapshot", ] origin_visit_select_cols = [ "o.url AS origin", "ov.visit", "ov.date", "ov.type AS type", "ovs.status", "ovs.snapshot", "ovs.metadata", ] origin_visit_status_select_cols = [ "o.url AS origin", "ovs.visit", "ovs.date", "ovs.type AS type", "ovs.status", "ovs.snapshot", "ovs.metadata", ] def _make_origin_visit_status( self, row: Optional[Tuple[Any]] ) -> Optional[Dict[str, Any]]: """Make an origin_visit_status dict out of a row""" if not row: return None return dict(zip(self.origin_visit_status_cols, row)) def origin_visit_status_get_latest( self, origin_url: str, visit: int, allowed_statuses: Optional[List[str]] = None, require_snapshot: bool = False, cur=None, ) -> Optional[Dict[str, Any]]: """Given an origin visit id, return its latest origin_visit_status""" cur = self._cursor(cur) query_parts = [ "SELECT %s" % ", ".join(self.origin_visit_status_select_cols), "FROM origin_visit_status ovs ", "INNER JOIN origin o ON o.id = ovs.origin", ] query_parts.append("WHERE o.url = %s") query_params: List[Any] = [origin_url] query_parts.append("AND ovs.visit = %s") query_params.append(visit) if require_snapshot: query_parts.append("AND ovs.snapshot is not null") if allowed_statuses: query_parts.append("AND ovs.status IN %s") query_params.append(tuple(allowed_statuses)) query_parts.append("ORDER BY ovs.date DESC LIMIT 1") query = "\n".join(query_parts) cur.execute(query, tuple(query_params)) row = cur.fetchone() return self._make_origin_visit_status(row) def origin_visit_status_get_range( self, origin: str, visit: int, date_from: Optional[datetime.datetime], order: ListOrder, limit: int, cur=None, ): """Retrieve visit_status rows for visit (origin, visit) in a paginated way.""" cur = self._cursor(cur) query_parts = [ f"SELECT {', '.join(self.origin_visit_status_select_cols)} " "FROM origin_visit_status ovs ", "INNER JOIN origin o ON o.id = ovs.origin ", ] query_parts.append("WHERE o.url = %s AND ovs.visit = %s ") query_params: List[Any] = [origin, visit] if date_from is not None: op_comparison = ">=" if order == ListOrder.ASC else "<=" query_parts.append(f"and ovs.date {op_comparison} %s ") query_params.append(date_from) if order == ListOrder.ASC: query_parts.append("ORDER BY ovs.date ASC ") elif order == ListOrder.DESC: query_parts.append("ORDER BY ovs.date DESC ") else: assert False query_parts.append("LIMIT %s") query_params.append(limit) query = "\n".join(query_parts) cur.execute(query, tuple(query_params)) yield from cur def origin_visit_get_range( self, origin: str, visit_from: int, order: ListOrder, limit: int, cur=None, ): cur = self._cursor(cur) origin_visit_cols = ["o.url as origin", "ov.visit", "ov.date", "ov.type"] query_parts = [ f"SELECT {', '.join(origin_visit_cols)} FROM origin_visit ov ", "INNER JOIN origin o ON o.id = ov.origin ", ] query_parts.append("WHERE ov.origin = (select id from origin where url = %s)") query_params: List[Any] = [origin] if visit_from > 0: op_comparison = ">" if order == ListOrder.ASC else "<" query_parts.append(f"and ov.visit {op_comparison} %s") query_params.append(visit_from) if order == ListOrder.ASC: query_parts.append("ORDER BY ov.visit ASC") elif order == ListOrder.DESC: query_parts.append("ORDER BY ov.visit DESC") query_parts.append("LIMIT %s") query_params.append(limit) query = "\n".join(query_parts) cur.execute(query, tuple(query_params)) yield from cur def origin_visit_status_get_all_in_range( self, origin: str, allowed_statuses: Optional[List[str]], require_snapshot: bool, visit_from: int, visit_to: int, cur=None, ): cur = self._cursor(cur) query_parts = [ f"SELECT {', '.join(self.origin_visit_status_select_cols)}", " FROM origin_visit_status ovs", " INNER JOIN origin o ON o.id = ovs.origin", ] query_parts.append("WHERE ovs.origin = (select id from origin where url = %s)") query_params: List[Any] = [origin] assert visit_from <= visit_to query_parts.append("AND ovs.visit >= %s") query_params.append(visit_from) query_parts.append("AND ovs.visit <= %s") query_params.append(visit_to) if require_snapshot: query_parts.append("AND ovs.snapshot is not null") if allowed_statuses: query_parts.append("AND ovs.status IN %s") query_params.append(tuple(allowed_statuses)) query_parts.append("ORDER BY ovs.visit ASC, ovs.date ASC") query = "\n".join(query_parts) cur.execute(query, tuple(query_params)) yield from cur def origin_visit_get(self, origin_id, visit_id, cur=None): """Retrieve information on visit visit_id of origin origin_id. Args: origin_id: the origin concerned visit_id: The visit step for that origin Returns: The origin_visit information """ cur = self._cursor(cur) query = """\ SELECT %s FROM origin_visit ov INNER JOIN origin o ON o.id = ov.origin INNER JOIN origin_visit_status ovs USING (origin, visit) WHERE ov.origin = (select id from origin where url = %%s) AND ov.visit = %%s ORDER BY ovs.date DESC LIMIT 1 """ % ( ", ".join(self.origin_visit_select_cols) ) cur.execute(query, (origin_id, visit_id)) r = cur.fetchall() if not r: return None return r[0] def origin_visit_find_by_date(self, origin, visit_date, cur=None): cur = self._cursor(cur) cur.execute( "SELECT * FROM swh_visit_find_by_date(%s, %s)", (origin, visit_date) ) rows = cur.fetchall() if rows: visit = dict(zip(self.origin_visit_get_cols, rows[0])) visit["origin"] = origin return visit def origin_visit_exists(self, origin_id, visit_id, cur=None): """Check whether an origin visit with the given ids exists""" cur = self._cursor(cur) query = "SELECT 1 FROM origin_visit where origin = %s AND visit = %s" cur.execute(query, (origin_id, visit_id)) return bool(cur.fetchone()) def origin_visit_get_latest( self, origin_id: str, type: Optional[str], allowed_statuses: Optional[Iterable[str]], require_snapshot: bool, cur=None, ): """Retrieve the most recent origin_visit of the given origin, with optional filters. Args: origin_id: the origin concerned type: Optional visit type to filter on allowed_statuses: the visit statuses allowed for the returned visit require_snapshot (bool): If True, only a visit with a known snapshot will be returned. Returns: The origin_visit information, or None if no visit matches. """ cur = self._cursor(cur) query_parts = [ "SELECT %s" % ", ".join(self.origin_visit_select_cols), "FROM origin_visit ov ", "INNER JOIN origin o ON o.id = ov.origin", "INNER JOIN origin_visit_status ovs USING (origin, visit)", ] query_parts.append( "WHERE ov.origin = (SELECT id FROM origin o WHERE o.url = %s)" ) query_params: List[Any] = [origin_id] if type is not None: query_parts.append("AND ov.type = %s") query_params.append(type) if require_snapshot: query_parts.append("AND ovs.snapshot is not null") if allowed_statuses: query_parts.append("AND ovs.status IN %s") query_params.append(tuple(allowed_statuses)) query_parts.append("ORDER BY ov.visit DESC, ovs.date DESC LIMIT 1") query = "\n".join(query_parts) cur.execute(query, tuple(query_params)) r = cur.fetchone() if not r: return None return r def origin_visit_get_random(self, type, cur=None): """Randomly select one origin visit that was full and in the last 3 months """ cur = self._cursor(cur) columns = ",".join(self.origin_visit_select_cols) query = f"""select {columns} from origin_visit ov inner join origin o on ov.origin=o.id inner join origin_visit_status ovs using (origin, visit) where ovs.status='full' and ov.type=%s and ov.date > now() - '3 months'::interval and random() < 0.1 limit 1 """ cur.execute(query, (type,)) return cur.fetchone() @staticmethod def mangle_query_key(key, main_table, ignore_displayname=False): if key == "id": return "t.id" if key == "parents": return """ ARRAY( SELECT rh.parent_id::bytea FROM revision_history rh WHERE rh.id = t.id ORDER BY rh.parent_rank )""" if "_" not in key: return f"{main_table}.{key}" head, tail = key.split("_", 1) if head not in ("author", "committer") or tail not in ( "name", "email", "id", "fullname", ): return f"{main_table}.{key}" if ignore_displayname: return f"{head}.{tail}" else: if tail == "id": return f"{head}.{tail}" elif tail in ("name", "email"): # These fields get populated again from fullname by # converters.db_to_author if they're None, so we can just NULLify them # when displayname is set. return ( f"CASE" f" WHEN {head}.displayname IS NULL THEN {head}.{tail} " f" ELSE NULL " f"END AS {key}" ) elif tail == "fullname": return f"COALESCE({head}.displayname, {head}.fullname) AS {key}" assert False, "All cases should have been handled here" def revision_get_from_list(self, revisions, ignore_displayname=False, cur=None): cur = self._cursor(cur) query_keys = ", ".join( self.mangle_query_key(k, "revision", ignore_displayname) for k in self.revision_get_cols ) yield from execute_values_generator( cur, """ SELECT %s FROM (VALUES %%s) as t(sortkey, id) LEFT JOIN revision ON t.id = revision.id LEFT JOIN person author ON revision.author = author.id LEFT JOIN person committer ON revision.committer = committer.id ORDER BY sortkey """ % query_keys, ((sortkey, id) for sortkey, id in enumerate(revisions)), ) extid_cols = ["extid", "extid_version", "extid_type", "target", "target_type"] def extid_get_from_extid_list( self, extid_type: str, ids: List[bytes], version: Optional[int] = None, cur=None ): cur = self._cursor(cur) query_keys = ", ".join( self.mangle_query_key(k, "extid") for k in self.extid_cols ) filter_query = "" if version is not None: filter_query = cur.mogrify( f"WHERE extid_version={version}", (version,) ).decode() sql = f""" SELECT {query_keys} FROM (VALUES %s) as t(sortkey, extid, extid_type) LEFT JOIN extid USING (extid, extid_type) {filter_query} ORDER BY sortkey """ yield from execute_values_generator( cur, sql, (((sortkey, extid, extid_type) for sortkey, extid in enumerate(ids))), ) def extid_get_from_swhid_list( self, target_type: str, ids: List[bytes], extid_version: Optional[int] = None, extid_type: Optional[str] = None, cur=None, ): cur = self._cursor(cur) target_type = ObjectType( target_type ).name.lower() # aka "rev" -> "revision", ... query_keys = ", ".join( self.mangle_query_key(k, "extid") for k in self.extid_cols ) filter_query = "" if extid_version is not None and extid_type is not None: filter_query = cur.mogrify( "WHERE extid_version=%s AND extid_type=%s", ( extid_version, extid_type, ), ).decode() sql = f""" SELECT {query_keys} FROM (VALUES %s) as t(sortkey, target, target_type) LEFT JOIN extid USING (target, target_type) {filter_query} ORDER BY sortkey """ yield from execute_values_generator( cur, sql, (((sortkey, target, target_type) for sortkey, target in enumerate(ids))), template=b"(%s,%s,%s::object_type)", ) def revision_log( self, root_revisions, ignore_displayname=False, limit=None, cur=None ): cur = self._cursor(cur) query = """\ SELECT %s FROM swh_revision_log( "root_revisions" := %%s, num_revs := %%s, "ignore_displayname" := %%s )""" % ", ".join( self.revision_get_cols ) cur.execute(query, (root_revisions, limit, ignore_displayname)) yield from cur revision_shortlog_cols = ["id", "parents"] def revision_shortlog(self, root_revisions, limit=None, cur=None): cur = self._cursor(cur) query = """SELECT %s FROM swh_revision_list(%%s, %%s) """ % ", ".join( self.revision_shortlog_cols ) cur.execute(query, (root_revisions, limit)) yield from cur def revision_get_random(self, cur=None): return self._get_random_row_from_table("revision", ["id"], "id", cur) def release_missing_from_list(self, releases, cur=None): cur = self._cursor(cur) yield from execute_values_generator( cur, """ SELECT id FROM (VALUES %s) as t(id) WHERE NOT EXISTS ( SELECT 1 FROM release r WHERE r.id = t.id ) """, ((id,) for id in releases), ) object_find_by_sha1_git_cols = ["sha1_git", "type"] def object_find_by_sha1_git(self, ids, cur=None): cur = self._cursor(cur) yield from execute_values_generator( cur, """ WITH t (sha1_git) AS (VALUES %s), known_objects as (( select id as sha1_git, 'release'::object_type as type, object_id from release r where exists (select 1 from t where t.sha1_git = r.id) ) union all ( select id as sha1_git, 'revision'::object_type as type, object_id from revision r where exists (select 1 from t where t.sha1_git = r.id) ) union all ( select id as sha1_git, 'directory'::object_type as type, object_id from directory d where exists (select 1 from t where t.sha1_git = d.id) ) union all ( select sha1_git as sha1_git, 'content'::object_type as type, object_id from content c where exists (select 1 from t where t.sha1_git = c.sha1_git) )) select t.sha1_git as sha1_git, k.type from t left join known_objects k on t.sha1_git = k.sha1_git """, ((id,) for id in ids), ) def stat_counters(self, cur=None): cur = self._cursor(cur) cur.execute("SELECT * FROM swh_stat_counters()") yield from cur def origin_add(self, url, cur=None): """Insert a new origin and return the new identifier.""" insert = """INSERT INTO origin (url) values (%s) ON CONFLICT DO NOTHING """ cur.execute(insert, (url,)) return cur.rowcount origin_cols = ["url"] def origin_get_by_url(self, origins, cur=None): """Retrieve origin `(type, url)` from urls if found.""" cur = self._cursor(cur) query = """SELECT %s FROM (VALUES %%s) as t(url) LEFT JOIN origin ON t.url = origin.url """ % ",".join( "origin." + col for col in self.origin_cols ) yield from execute_values_generator(cur, query, ((url,) for url in origins)) def origin_get_by_sha1(self, sha1s, cur=None): """Retrieve origin urls from sha1s if found.""" cur = self._cursor(cur) query = """SELECT %s FROM (VALUES %%s) as t(sha1) LEFT JOIN origin ON t.sha1 = digest(origin.url, 'sha1') """ % ",".join( "origin." + col for col in self.origin_cols ) yield from execute_values_generator(cur, query, ((sha1,) for sha1 in sha1s)) def origin_id_get_by_url(self, origins, cur=None): """Retrieve origin `(type, url)` from urls if found.""" cur = self._cursor(cur) query = """SELECT id FROM (VALUES %s) as t(url) LEFT JOIN origin ON t.url = origin.url """ for row in execute_values_generator(cur, query, ((url,) for url in origins)): yield row[0] origin_get_range_cols = ["id", "url"] def origin_get_range(self, origin_from: int = 1, origin_count: int = 100, cur=None): """Retrieve ``origin_count`` origins whose ids are greater or equal than ``origin_from``. Origins are sorted by id before retrieving them. Args: origin_from: the minimum id of origins to retrieve origin_count: the maximum number of origins to retrieve """ cur = self._cursor(cur) query = """SELECT %s FROM origin WHERE id >= %%s ORDER BY id LIMIT %%s """ % ",".join( self.origin_get_range_cols ) cur.execute(query, (origin_from, origin_count)) yield from cur def _origin_query( self, url_pattern, count=False, offset=0, limit=50, regexp=False, with_visit=False, visit_types=None, cur=None, ): """ Method factorizing query creation for searching and counting origins. """ cur = self._cursor(cur) if count: origin_cols = "COUNT(*)" order_clause = "" else: origin_cols = ",".join(self.origin_cols) order_clause = "ORDER BY id" if not regexp: operator = "ILIKE" query_params = [f"%{url_pattern}%"] else: operator = "~*" query_params = [url_pattern] query = f""" WITH filtered_origins AS ( SELECT * FROM origin WHERE url {operator} %s {order_clause} ) SELECT {origin_cols} FROM filtered_origins AS o """ if with_visit or visit_types: visit_predicat = ( """ INNER JOIN origin_visit_status ovs USING (origin, visit) INNER JOIN snapshot ON ovs.snapshot=snapshot.id """ if with_visit else "" ) type_predicat = ( f"AND ov.type=any(ARRAY{visit_types})" if visit_types else "" ) query += f""" WHERE EXISTS ( SELECT 1 FROM origin_visit ov {visit_predicat} WHERE ov.origin=o.id {type_predicat} ) """ if not count: query += "OFFSET %s LIMIT %s" query_params.extend([offset, limit]) cur.execute(query, query_params) def origin_search( self, url_pattern: str, offset: int = 0, limit: int = 50, regexp: bool = False, with_visit: bool = False, visit_types: Optional[List[str]] = None, cur=None, ): """Search for origins whose urls contain a provided string pattern or match a provided regular expression. The search is performed in a case insensitive way. Args: url_pattern: the string pattern to search for in origin urls offset: number of found origins to skip before returning results limit: the maximum number of found origins to return regexp: if True, consider the provided pattern as a regular expression and returns origins whose urls match it with_visit: if True, filter out origins with no visit """ self._origin_query( url_pattern, offset=offset, limit=limit, regexp=regexp, with_visit=with_visit, visit_types=visit_types, cur=cur, ) yield from cur def origin_count(self, url_pattern, regexp=False, with_visit=False, cur=None): """Count origins whose urls contain a provided string pattern or match a provided regular expression. The pattern search in origin urls is performed in a case insensitive way. Args: url_pattern (str): the string pattern to search for in origin urls regexp (bool): if True, consider the provided pattern as a regular expression and returns origins whose urls match it with_visit (bool): if True, filter out origins with no visit """ self._origin_query( url_pattern, count=True, regexp=regexp, with_visit=with_visit, cur=cur ) return cur.fetchone()[0] release_add_cols = [ "id", "target", "target_type", "date", "date_offset", "date_neg_utc_offset", "date_offset_bytes", "name", "comment", "synthetic", "raw_manifest", "author_fullname", "author_name", "author_email", ] release_get_cols = release_add_cols def origin_snapshot_get_all(self, origin_url: str, cur=None) -> Iterable[Sha1Git]: cur = self._cursor(cur) query = f"""\ SELECT DISTINCT snapshot FROM origin_visit_status ovs INNER JOIN origin o ON o.id = ovs.origin WHERE o.url = '{origin_url}' and snapshot IS NOT NULL; """ cur.execute(query) yield from map(lambda row: row[0], cur) def release_get_from_list(self, releases, ignore_displayname=False, cur=None): cur = self._cursor(cur) query_keys = ", ".join( self.mangle_query_key(k, "release", ignore_displayname) for k in self.release_get_cols ) yield from execute_values_generator( cur, """ SELECT %s FROM (VALUES %%s) as t(sortkey, id) LEFT JOIN release ON t.id = release.id LEFT JOIN person author ON release.author = author.id ORDER BY sortkey """ % query_keys, ((sortkey, id) for sortkey, id in enumerate(releases)), ) def release_get_random(self, cur=None): return self._get_random_row_from_table("release", ["id"], "id", cur) _raw_extrinsic_metadata_context_cols = [ "origin", "visit", "snapshot", "release", "revision", "path", "directory", ] """The list of context columns for all artifact types.""" _raw_extrinsic_metadata_insert_cols = [ "id", "type", "target", "authority_id", "fetcher_id", "discovery_date", "format", "metadata", *_raw_extrinsic_metadata_context_cols, ] """List of columns of the raw_extrinsic_metadata table, used when writing metadata.""" _raw_extrinsic_metadata_insert_query = f""" INSERT INTO raw_extrinsic_metadata ({', '.join(_raw_extrinsic_metadata_insert_cols)}) VALUES ({', '.join('%s' for _ in _raw_extrinsic_metadata_insert_cols)}) ON CONFLICT (id) DO NOTHING """ raw_extrinsic_metadata_get_cols = [ "raw_extrinsic_metadata.target", "raw_extrinsic_metadata.type", "discovery_date", "metadata_authority.type", "metadata_authority.url", "metadata_fetcher.id", "metadata_fetcher.name", "metadata_fetcher.version", *_raw_extrinsic_metadata_context_cols, "format", "raw_extrinsic_metadata.metadata", ] """List of columns of the raw_extrinsic_metadata, metadata_authority, and metadata_fetcher tables, used when reading object metadata.""" _raw_extrinsic_metadata_select_query = f""" SELECT {', '.join(raw_extrinsic_metadata_get_cols)} FROM raw_extrinsic_metadata INNER JOIN metadata_authority ON (metadata_authority.id=authority_id) INNER JOIN metadata_fetcher ON (metadata_fetcher.id=fetcher_id) """ def raw_extrinsic_metadata_add( self, id: bytes, type: str, target: str, discovery_date: datetime.datetime, authority_id: int, fetcher_id: int, format: str, metadata: bytes, origin: Optional[str], visit: Optional[int], snapshot: Optional[str], release: Optional[str], revision: Optional[str], path: Optional[bytes], directory: Optional[str], cur, ): query = self._raw_extrinsic_metadata_insert_query args: Dict[str, Any] = dict( id=id, type=type, target=target, authority_id=authority_id, fetcher_id=fetcher_id, discovery_date=discovery_date, format=format, metadata=metadata, origin=origin, visit=visit, snapshot=snapshot, release=release, revision=revision, path=path, directory=directory, ) params = [args[col] for col in self._raw_extrinsic_metadata_insert_cols] cur.execute(query, params) def raw_extrinsic_metadata_get( self, target: str, authority_id: int, after_time: Optional[datetime.datetime], after_fetcher: Optional[int], limit: int, cur, ): query_parts = [self._raw_extrinsic_metadata_select_query] query_parts.append("WHERE raw_extrinsic_metadata.target=%s AND authority_id=%s") args = [target, authority_id] if after_fetcher is not None: assert after_time query_parts.append("AND (discovery_date, fetcher_id) > (%s, %s)") args.extend([after_time, after_fetcher]) elif after_time is not None: query_parts.append("AND discovery_date > %s") args.append(after_time) query_parts.append("ORDER BY discovery_date, fetcher_id") if limit: query_parts.append("LIMIT %s") args.append(limit) cur.execute(" ".join(query_parts), args) yield from cur def raw_extrinsic_metadata_get_by_ids(self, ids: List[Sha1Git], cur=None): cur = self._cursor(cur) yield from execute_values_generator( cur, self._raw_extrinsic_metadata_select_query + "INNER JOIN (VALUES %s) AS t(id) ON t.id = raw_extrinsic_metadata.id", [(id_,) for id_ in ids], ) def raw_extrinsic_metadata_get_authorities(self, id: str, cur=None): cur = self._cursor(cur) cur.execute( """ SELECT DISTINCT metadata_authority.type, metadata_authority.url FROM raw_extrinsic_metadata INNER JOIN metadata_authority ON (metadata_authority.id=authority_id) WHERE raw_extrinsic_metadata.target = %s """, (id,), ) yield from cur metadata_fetcher_cols = ["name", "version"] def metadata_fetcher_add(self, name: str, version: str, cur=None) -> None: cur = self._cursor(cur) cur.execute( "INSERT INTO metadata_fetcher (name, version) " "VALUES (%s, %s) ON CONFLICT DO NOTHING", (name, version), ) def metadata_fetcher_get(self, name: str, version: str, cur=None): cur = self._cursor(cur) cur.execute( f"SELECT {', '.join(self.metadata_fetcher_cols)} " f"FROM metadata_fetcher " f"WHERE name=%s AND version=%s", (name, version), ) return cur.fetchone() def metadata_fetcher_get_id( self, name: str, version: str, cur=None ) -> Optional[int]: cur = self._cursor(cur) cur.execute( "SELECT id FROM metadata_fetcher WHERE name=%s AND version=%s", (name, version), ) row = cur.fetchone() if row: return row[0] else: return None metadata_authority_cols = ["type", "url"] def metadata_authority_add(self, type: str, url: str, cur=None) -> None: cur = self._cursor(cur) cur.execute( "INSERT INTO metadata_authority (type, url) " "VALUES (%s, %s) ON CONFLICT DO NOTHING", (type, url), ) def metadata_authority_get(self, type: str, url: str, cur=None): cur = self._cursor(cur) cur.execute( f"SELECT {', '.join(self.metadata_authority_cols)} " f"FROM metadata_authority " f"WHERE type=%s AND url=%s", (type, url), ) return cur.fetchone() def metadata_authority_get_id(self, type: str, url: str, cur=None) -> Optional[int]: cur = self._cursor(cur) cur.execute( "SELECT id FROM metadata_authority WHERE type=%s AND url=%s", (type, url) ) row = cur.fetchone() if row: return row[0] else: return None def _get_random_row_from_table(self, table_name, cols, id_col, cur=None): random_sha1 = bytes(random.randint(0, 255) for _ in range(SHA1_SIZE)) cur = self._cursor(cur) query = """ (SELECT {cols} FROM {table} WHERE {id_col} >= %s ORDER BY {id_col} LIMIT 1) UNION (SELECT {cols} FROM {table} WHERE {id_col} < %s ORDER BY {id_col} DESC LIMIT 1) LIMIT 1 """.format( cols=", ".join(cols), table=table_name, id_col=id_col ) cur.execute(query, (random_sha1, random_sha1)) row = cur.fetchone() if row: return row[0] - - dbversion_cols = ["version", "release", "description"] - - def dbversion(self): - with self.transaction() as cur: - cur.execute( - f""" - SELECT {', '.join(self.dbversion_cols)} - FROM dbversion - ORDER BY version DESC - LIMIT 1 - """ - ) - return dict(zip(self.dbversion_cols, cur.fetchone())) - - def check_dbversion(self): - dbversion = self.dbversion()["version"] - if dbversion != self.current_version: - logger.warning( - "database dbversion (%s) != %s current_version (%s)", - dbversion, - __name__, - self.current_version, - ) - return dbversion == self.current_version diff --git a/swh/storage/postgresql/storage.py b/swh/storage/postgresql/storage.py index b83f04ee..76410e75 100644 --- a/swh/storage/postgresql/storage.py +++ b/swh/storage/postgresql/storage.py @@ -1,1679 +1,1684 @@ # Copyright (C) 2015-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import base64 from collections import defaultdict import contextlib from contextlib import contextmanager import datetime import itertools +import logging import operator from typing import Any, Counter, Dict, Iterable, List, Optional, Sequence, Tuple import attr import psycopg2 import psycopg2.errors import psycopg2.pool from swh.core.api.serializers import msgpack_dumps, msgpack_loads from swh.core.db.common import db_transaction, db_transaction_generator +from swh.core.db.db_utils import swh_db_version from swh.model.hashutil import DEFAULT_ALGORITHMS, hash_to_bytes, hash_to_hex from swh.model.model import ( SHA1_SIZE, Content, Directory, DirectoryEntry, ExtID, MetadataAuthority, MetadataAuthorityType, MetadataFetcher, Origin, OriginVisit, OriginVisitStatus, RawExtrinsicMetadata, Release, Revision, Sha1, Sha1Git, SkippedContent, Snapshot, SnapshotBranch, TargetType, ) from swh.model.swhids import ExtendedObjectType, ExtendedSWHID, ObjectType from swh.storage.exc import HashCollision, StorageArgumentException, StorageDBError from swh.storage.interface import ( VISIT_STATUSES, ListOrder, OriginVisitWithStatuses, PagedResult, PartialBranches, ) from swh.storage.objstorage import ObjStorage from swh.storage.utils import ( extract_collision_hash, get_partition_bounds_bytes, map_optional, now, ) from swh.storage.writer import JournalWriter from . import converters from .db import Db +logger = logging.getLogger(__name__) + # Max block size of contents to return BULK_BLOCK_CONTENT_LEN_MAX = 10000 EMPTY_SNAPSHOT_ID = hash_to_bytes("1a8893e6a86f444e8be8e7bda6cb34fb1735a00e") """Identifier for the empty snapshot""" VALIDATION_EXCEPTIONS = ( KeyError, TypeError, ValueError, psycopg2.errors.CheckViolation, psycopg2.errors.IntegrityError, psycopg2.errors.InvalidTextRepresentation, psycopg2.errors.NotNullViolation, psycopg2.errors.NumericValueOutOfRange, psycopg2.errors.UndefinedFunction, # (raised on wrong argument typs) ) """Exceptions raised by postgresql when validation of the arguments failed.""" @contextlib.contextmanager def convert_validation_exceptions(): """Catches postgresql errors related to invalid arguments, and re-raises a StorageArgumentException.""" try: yield except psycopg2.errors.UniqueViolation: # This only happens because of concurrent insertions, but it is # a subclass of IntegrityError; so we need to catch and reraise it # before the next clause converts it to StorageArgumentException. raise except VALIDATION_EXCEPTIONS as e: raise StorageArgumentException(str(e)) class Storage: - """SWH storage proxy, encompassing DB and object storage""" + """SWH storage datastore proxy, encompassing DB and object storage""" + + current_version: int = 183 def __init__( self, db, objstorage, min_pool_conns=1, max_pool_conns=10, journal_writer=None, query_options=None, ): """Instantiate a storage instance backed by a PostgreSQL database and an objstorage. When ``db`` is passed as a connection string, then this module automatically manages a connection pool between ``min_pool_conns`` and ``max_pool_conns``. When ``db`` is an explicit psycopg2 connection, then ``min_pool_conns`` and ``max_pool_conns`` are ignored and the connection is used directly. Args: db: either a libpq connection string, or a psycopg2 connection objstorage: configuration for the backend :class:`ObjStorage` min_pool_conns: min number of connections in the psycopg2 pool max_pool_conns: max number of connections in the psycopg2 pool journal_writer: configuration for the :class:`JournalWriter` query_options: configuration for the sql connections; keys of the dict are the method names decorated with :func:`db_transaction` or :func:`db_transaction_generator` (eg. :func:`content_find`), and values are dicts (config_name, config_value) used to configure the sql connection for the method_name. For example, using:: {"content_get": {"statement_timeout": 5000}} will override the default statement timeout for the :func:`content_get` endpoint from 500ms to 5000ms. See :mod:`swh.core.db.common` for more details. """ try: if isinstance(db, psycopg2.extensions.connection): self._pool = None self._db = Db(db) # See comment below self._db.cursor().execute("SET TIME ZONE 'UTC'") else: self._pool = psycopg2.pool.ThreadedConnectionPool( min_pool_conns, max_pool_conns, db ) self._db = None except psycopg2.OperationalError as e: raise StorageDBError(e) self.journal_writer = JournalWriter(journal_writer) self.objstorage = ObjStorage(objstorage) self.query_options = query_options def get_db(self): if self._db: return self._db else: db = Db.from_pool(self._pool) # Workaround for psycopg2 < 2.9.0 not handling fractional timezones, # which may happen on old revision/release dates on systems configured # with non-UTC timezones. # https://www.psycopg.org/docs/usage.html#time-zones-handling db.cursor().execute("SET TIME ZONE 'UTC'") return db def put_db(self, db): if db is not self._db: db.put_conn() @contextmanager def db(self): db = None try: db = self.get_db() yield db finally: if db: self.put_db(db) @db_transaction() def check_config(self, *, check_write: bool, db: Db, cur=None) -> bool: if not self.objstorage.check_config(check_write=check_write): return False - if not db.check_dbversion(): + dbversion = swh_db_version(db.conn.dsn) + if dbversion != self.current_version: + logger.warning( + "database dbversion (%s) != %s current_version (%s)", + dbversion, + __name__, + self.current_version, + ) return False # Check permissions on one of the tables - if check_write: - check = "INSERT" - else: - check = "SELECT" + check = "INSERT" if check_write else "SELECT" cur.execute("select has_table_privilege(current_user, 'content', %s)", (check,)) return cur.fetchone()[0] - @db_transaction() - def get_current_version(self, *, db: Db, cur=None): - """Returns the current code (expected) version""" - return db.current_version - def _content_unique_key(self, hash, db): """Given a hash (tuple or dict), return a unique key from the aggregation of keys. """ keys = db.content_hash_keys if isinstance(hash, tuple): return hash return tuple([hash[k] for k in keys]) def _content_add_metadata(self, db, cur, content): """Add content to the postgresql database but not the object storage.""" # create temporary table for metadata injection db.mktemp("content", cur) db.copy_to( (c.to_dict() for c in content), "tmp_content", db.content_add_keys, cur ) # move metadata in place try: db.content_add_from_temp(cur) except psycopg2.IntegrityError as e: if e.diag.sqlstate == "23505" and e.diag.table_name == "content": message_detail = e.diag.message_detail if message_detail: hash_name, hash_id = extract_collision_hash(message_detail) collision_contents_hashes = [ c.hashes() for c in content if c.get_hash(hash_name) == hash_id ] else: constraint_to_hash_name = { "content_pkey": "sha1", "content_sha1_git_idx": "sha1_git", "content_sha256_idx": "sha256", } hash_name = constraint_to_hash_name.get(e.diag.constraint_name) hash_id = None collision_contents_hashes = None raise HashCollision( hash_name, hash_id, collision_contents_hashes ) from None else: raise def content_add(self, content: List[Content]) -> Dict[str, int]: ctime = now() contents = [attr.evolve(c, ctime=ctime) for c in content] # Must add to the objstorage before the DB and journal. Otherwise: # 1. in case of a crash the DB may "believe" we have the content, but # we didn't have time to write to the objstorage before the crash # 2. the objstorage mirroring, which reads from the journal, may attempt to # read from the objstorage before we finished writing it objstorage_summary = self.objstorage.content_add(contents) with self.db() as db: with db.transaction() as cur: missing = list( self.content_missing( map(Content.to_dict, contents), key_hash="sha1_git", db=db, cur=cur, ) ) contents = [c for c in contents if c.sha1_git in missing] self.journal_writer.content_add(contents) self._content_add_metadata(db, cur, contents) return { "content:add": len(contents), "content:add:bytes": objstorage_summary["content:add:bytes"], } @db_transaction() def content_update( self, contents: List[Dict[str, Any]], keys: List[str] = [], *, db: Db, cur=None ) -> None: # TODO: Add a check on input keys. How to properly implement # this? We don't know yet the new columns. self.journal_writer.content_update(contents) db.mktemp("content", cur) select_keys = list(set(db.content_get_metadata_keys).union(set(keys))) with convert_validation_exceptions(): db.copy_to(contents, "tmp_content", select_keys, cur) db.content_update_from_temp(keys_to_update=keys, cur=cur) @db_transaction() def content_add_metadata( self, content: List[Content], *, db: Db, cur=None ) -> Dict[str, int]: missing = self.content_missing( (c.to_dict() for c in content), key_hash="sha1_git", db=db, cur=cur, ) contents = [c for c in content if c.sha1_git in missing] self.journal_writer.content_add_metadata(contents) self._content_add_metadata(db, cur, contents) return { "content:add": len(contents), } def content_get_data(self, content: Sha1) -> Optional[bytes]: # FIXME: Make this method support slicing the `data` return self.objstorage.content_get(content) @db_transaction() def content_get_partition( self, partition_id: int, nb_partitions: int, page_token: Optional[str] = None, limit: int = 1000, *, db: Db, cur=None, ) -> PagedResult[Content]: if limit is None: raise StorageArgumentException("limit should not be None") (start, end) = get_partition_bounds_bytes( partition_id, nb_partitions, SHA1_SIZE ) if page_token: start = hash_to_bytes(page_token) if end is None: end = b"\xff" * SHA1_SIZE next_page_token: Optional[str] = None contents = [] for counter, row in enumerate(db.content_get_range(start, end, limit + 1, cur)): row_d = dict(zip(db.content_get_metadata_keys, row)) content = Content(**row_d) if counter >= limit: # take the last content for the next page starting from this next_page_token = hash_to_hex(content.sha1) break contents.append(content) assert len(contents) <= limit return PagedResult(results=contents, next_page_token=next_page_token) @db_transaction(statement_timeout=500) def content_get( self, contents: List[bytes], algo: str = "sha1", *, db: Db, cur=None ) -> List[Optional[Content]]: contents_by_hash: Dict[bytes, Optional[Content]] = {} if algo not in DEFAULT_ALGORITHMS: raise StorageArgumentException( "algo should be one of {','.join(DEFAULT_ALGORITHMS)}" ) rows = db.content_get_metadata_from_hashes(contents, algo, cur) key = operator.attrgetter(algo) for row in rows: row_d = dict(zip(db.content_get_metadata_keys, row)) content = Content(**row_d) contents_by_hash[key(content)] = content return [contents_by_hash.get(sha1) for sha1 in contents] @db_transaction_generator() def content_missing( self, contents: List[Dict[str, Any]], key_hash: str = "sha1", *, db: Db, cur=None, ) -> Iterable[bytes]: if key_hash not in DEFAULT_ALGORITHMS: raise StorageArgumentException( "key_hash should be one of {','.join(DEFAULT_ALGORITHMS)}" ) keys = db.content_hash_keys key_hash_idx = keys.index(key_hash) for obj in db.content_missing_from_list(contents, cur): yield obj[key_hash_idx] @db_transaction_generator() def content_missing_per_sha1( self, contents: List[bytes], *, db: Db, cur=None ) -> Iterable[bytes]: for obj in db.content_missing_per_sha1(contents, cur): yield obj[0] @db_transaction_generator() def content_missing_per_sha1_git( self, contents: List[bytes], *, db: Db, cur=None ) -> Iterable[Sha1Git]: for obj in db.content_missing_per_sha1_git(contents, cur): yield obj[0] @db_transaction() def content_find( self, content: Dict[str, Any], *, db: Db, cur=None ) -> List[Content]: if not set(content).intersection(DEFAULT_ALGORITHMS): raise StorageArgumentException( "content keys must contain at least one " f"of: {', '.join(sorted(DEFAULT_ALGORITHMS))}" ) rows = db.content_find( sha1=content.get("sha1"), sha1_git=content.get("sha1_git"), sha256=content.get("sha256"), blake2s256=content.get("blake2s256"), cur=cur, ) contents = [] for row in rows: row_d = dict(zip(db.content_find_cols, row)) contents.append(Content(**row_d)) return contents @db_transaction() def content_get_random(self, *, db: Db, cur=None) -> Sha1Git: return db.content_get_random(cur) @staticmethod def _skipped_content_normalize(d): d = d.copy() if d.get("status") is None: d["status"] = "absent" if d.get("length") is None: d["length"] = -1 return d def _skipped_content_add_metadata(self, db, cur, content: List[SkippedContent]): origin_ids = db.origin_id_get_by_url([cont.origin for cont in content], cur=cur) content = [ attr.evolve(c, origin=origin_id) for (c, origin_id) in zip(content, origin_ids) ] db.mktemp("skipped_content", cur) db.copy_to( [c.to_dict() for c in content], "tmp_skipped_content", db.skipped_content_keys, cur, ) # move metadata in place db.skipped_content_add_from_temp(cur) @db_transaction() def skipped_content_add( self, content: List[SkippedContent], *, db: Db, cur=None ) -> Dict[str, int]: ctime = now() content = [attr.evolve(c, ctime=ctime) for c in content] missing_contents = self.skipped_content_missing( (c.to_dict() for c in content), db=db, cur=cur, ) content = [ c for c in content if any( all( c.get_hash(algo) == missing_content.get(algo) for algo in DEFAULT_ALGORITHMS ) for missing_content in missing_contents ) ] self.journal_writer.skipped_content_add(content) self._skipped_content_add_metadata(db, cur, content) return { "skipped_content:add": len(content), } @db_transaction_generator() def skipped_content_missing( self, contents: List[Dict[str, Any]], *, db: Db, cur=None ) -> Iterable[Dict[str, Any]]: contents = list(contents) for content in db.skipped_content_missing(contents, cur): yield dict(zip(db.content_hash_keys, content)) @db_transaction() def directory_add( self, directories: List[Directory], *, db: Db, cur=None ) -> Dict[str, int]: summary = {"directory:add": 0} dirs = set() dir_entries: Dict[str, defaultdict] = { "file": defaultdict(list), "dir": defaultdict(list), "rev": defaultdict(list), } for cur_dir in directories: dir_id = cur_dir.id dirs.add(dir_id) for src_entry in cur_dir.entries: entry = src_entry.to_dict() entry["dir_id"] = dir_id dir_entries[entry["type"]][dir_id].append(entry) dirs_missing = set(self.directory_missing(dirs, db=db, cur=cur)) if not dirs_missing: return summary self.journal_writer.directory_add( dir_ for dir_ in directories if dir_.id in dirs_missing ) # Copy directory metadata dirs_missing_dict = ( {"id": dir_.id, "raw_manifest": dir_.raw_manifest} for dir_ in directories if dir_.id in dirs_missing ) db.mktemp("directory", cur) db.copy_to(dirs_missing_dict, "tmp_directory", ["id", "raw_manifest"], cur) # Copy entries for entry_type, entry_list in dir_entries.items(): entries = itertools.chain.from_iterable( entries_for_dir for dir_id, entries_for_dir in entry_list.items() if dir_id in dirs_missing ) db.mktemp_dir_entry(entry_type) db.copy_to( entries, "tmp_directory_entry_%s" % entry_type, ["target", "name", "perms", "dir_id"], cur, ) # Do the final copy db.directory_add_from_temp(cur) summary["directory:add"] = len(dirs_missing) return summary @db_transaction_generator() def directory_missing( self, directories: List[Sha1Git], *, db: Db, cur=None ) -> Iterable[Sha1Git]: for obj in db.directory_missing_from_list(directories, cur): yield obj[0] @db_transaction_generator(statement_timeout=20000) def directory_ls( self, directory: Sha1Git, recursive: bool = False, *, db: Db, cur=None ) -> Iterable[Dict[str, Any]]: if recursive: res_gen = db.directory_walk(directory, cur=cur) else: res_gen = db.directory_walk_one(directory, cur=cur) for line in res_gen: yield dict(zip(db.directory_ls_cols, line)) @db_transaction(statement_timeout=4000) def directory_entry_get_by_path( self, directory: Sha1Git, paths: List[bytes], *, db: Db, cur=None ) -> Optional[Dict[str, Any]]: res = db.directory_entry_get_by_path(directory, paths, cur) return dict(zip(db.directory_ls_cols, res)) if res else None @db_transaction() def directory_get_random(self, *, db: Db, cur=None) -> Sha1Git: return db.directory_get_random(cur) @db_transaction() def directory_get_entries( self, directory_id: Sha1Git, page_token: Optional[bytes] = None, limit: int = 1000, *, db: Db, cur=None, ) -> Optional[PagedResult[DirectoryEntry]]: if list(self.directory_missing([directory_id], db=db, cur=cur)): return None if page_token is not None: raise StorageArgumentException("Unsupported page token") # TODO: actually paginate rows = db.directory_get_entries(directory_id, cur=cur) return PagedResult( results=[ DirectoryEntry(**dict(zip(db.directory_get_entries_cols, row))) for row in rows ], next_page_token=None, ) @db_transaction() def directory_get_raw_manifest( self, directory_ids: List[Sha1Git], *, db: Db, cur=None ) -> Dict[Sha1Git, Optional[bytes]]: return dict(db.directory_get_raw_manifest(directory_ids, cur=cur)) @db_transaction() def revision_add( self, revisions: List[Revision], *, db: Db, cur=None ) -> Dict[str, int]: summary = {"revision:add": 0} revisions_missing = set( self.revision_missing( set(revision.id for revision in revisions), db=db, cur=cur ) ) if not revisions_missing: return summary db.mktemp_revision(cur) revisions_filtered = [ revision for revision in revisions if revision.id in revisions_missing ] self.journal_writer.revision_add(revisions_filtered) db_revisions_filtered = list(map(converters.revision_to_db, revisions_filtered)) parents_filtered: List[Dict[str, Any]] = [] with convert_validation_exceptions(): db.copy_to( db_revisions_filtered, "tmp_revision", db.revision_add_cols, cur, lambda rev: parents_filtered.extend(rev["parents"]), ) db.revision_add_from_temp(cur) db.copy_to( parents_filtered, "revision_history", ["id", "parent_id", "parent_rank"], cur, ) return {"revision:add": len(revisions_missing)} @db_transaction_generator() def revision_missing( self, revisions: List[Sha1Git], *, db: Db, cur=None ) -> Iterable[Sha1Git]: if not revisions: return None for obj in db.revision_missing_from_list(revisions, cur): yield obj[0] @db_transaction(statement_timeout=2000) def revision_get( self, revision_ids: List[Sha1Git], ignore_displayname: bool = False, *, db: Db, cur=None, ) -> List[Optional[Revision]]: revisions = [] for line in db.revision_get_from_list(revision_ids, ignore_displayname, cur): revision = converters.db_to_revision(dict(zip(db.revision_get_cols, line))) revisions.append(revision) return revisions @db_transaction_generator(statement_timeout=2000) def revision_log( self, revisions: List[Sha1Git], ignore_displayname: bool = False, limit: Optional[int] = None, *, db: Db, cur=None, ) -> Iterable[Optional[Dict[str, Any]]]: for line in db.revision_log( revisions, ignore_displayname=ignore_displayname, limit=limit, cur=cur ): data = converters.db_to_revision(dict(zip(db.revision_get_cols, line))) if not data: yield None continue yield data.to_dict() @db_transaction_generator(statement_timeout=2000) def revision_shortlog( self, revisions: List[Sha1Git], limit: Optional[int] = None, *, db: Db, cur=None ) -> Iterable[Optional[Tuple[Sha1Git, Tuple[Sha1Git, ...]]]]: yield from db.revision_shortlog(revisions, limit, cur) @db_transaction() def revision_get_random(self, *, db: Db, cur=None) -> Sha1Git: return db.revision_get_random(cur) @db_transaction() def extid_get_from_extid( self, id_type: str, ids: List[bytes], version: Optional[int] = None, *, db: Db, cur=None, ) -> List[ExtID]: extids = [] for row in db.extid_get_from_extid_list(id_type, ids, version=version, cur=cur): if row[0] is not None: extids.append(converters.db_to_extid(dict(zip(db.extid_cols, row)))) return extids @db_transaction() def extid_get_from_target( self, target_type: ObjectType, ids: List[Sha1Git], extid_type: Optional[str] = None, extid_version: Optional[int] = None, *, db: Db, cur=None, ) -> List[ExtID]: extids = [] if (extid_version is not None and extid_type is None) or ( extid_version is None and extid_type is not None ): raise ValueError("You must provide both extid_type and extid_version") for row in db.extid_get_from_swhid_list( target_type.value, ids, extid_version=extid_version, extid_type=extid_type, cur=cur, ): if row[0] is not None: extids.append(converters.db_to_extid(dict(zip(db.extid_cols, row)))) return extids @db_transaction() def extid_add(self, ids: List[ExtID], *, db: Db, cur=None) -> Dict[str, int]: extid = [ { "extid": extid.extid, "extid_type": extid.extid_type, "extid_version": getattr(extid, "extid_version", 0), "target": extid.target.object_id, "target_type": extid.target.object_type.name.lower(), # arghh } for extid in ids ] db.mktemp("extid", cur) self.journal_writer.extid_add(ids) db.copy_to(extid, "tmp_extid", db.extid_cols, cur) # move metadata in place db.extid_add_from_temp(cur) return {"extid:add": len(extid)} @db_transaction() def release_add( self, releases: List[Release], *, db: Db, cur=None ) -> Dict[str, int]: summary = {"release:add": 0} release_ids = set(release.id for release in releases) releases_missing = set(self.release_missing(release_ids, db=db, cur=cur)) if not releases_missing: return summary db.mktemp_release(cur) releases_filtered = [ release for release in releases if release.id in releases_missing ] self.journal_writer.release_add(releases_filtered) db_releases_filtered = list(map(converters.release_to_db, releases_filtered)) with convert_validation_exceptions(): db.copy_to(db_releases_filtered, "tmp_release", db.release_add_cols, cur) db.release_add_from_temp(cur) return {"release:add": len(releases_missing)} @db_transaction_generator() def release_missing( self, releases: List[Sha1Git], *, db: Db, cur=None ) -> Iterable[Sha1Git]: if not releases: return for obj in db.release_missing_from_list(releases, cur): yield obj[0] @db_transaction(statement_timeout=1000) def release_get( self, releases: List[Sha1Git], ignore_displayname: bool = False, *, db: Db, cur=None, ) -> List[Optional[Release]]: rels = [] for release in db.release_get_from_list(releases, ignore_displayname, cur): data = converters.db_to_release(dict(zip(db.release_get_cols, release))) rels.append(data if data else None) return rels @db_transaction() def release_get_random(self, *, db: Db, cur=None) -> Sha1Git: return db.release_get_random(cur) @db_transaction() def snapshot_add( self, snapshots: List[Snapshot], *, db: Db, cur=None ) -> Dict[str, int]: created_temp_table = False count = 0 for snapshot in snapshots: if not db.snapshot_exists(snapshot.id, cur): if not created_temp_table: db.mktemp_snapshot_branch(cur) created_temp_table = True with convert_validation_exceptions(): db.copy_to( ( { "name": name, "target": info.target if info else None, "target_type": ( info.target_type.value if info else None ), } for name, info in snapshot.branches.items() ), "tmp_snapshot_branch", ["name", "target", "target_type"], cur, ) self.journal_writer.snapshot_add([snapshot]) db.snapshot_add(snapshot.id, cur) count += 1 return {"snapshot:add": count} @db_transaction_generator() def snapshot_missing( self, snapshots: List[Sha1Git], *, db: Db, cur=None ) -> Iterable[Sha1Git]: for obj in db.snapshot_missing_from_list(snapshots, cur): yield obj[0] @db_transaction(statement_timeout=2000) def snapshot_get( self, snapshot_id: Sha1Git, *, db: Db, cur=None ) -> Optional[Dict[str, Any]]: d = self.snapshot_get_branches(snapshot_id) if d is None: return d return { "id": d["id"], "branches": { name: branch.to_dict() if branch else None for (name, branch) in d["branches"].items() }, "next_branch": d["next_branch"], } @db_transaction(statement_timeout=2000) def snapshot_count_branches( self, snapshot_id: Sha1Git, branch_name_exclude_prefix: Optional[bytes] = None, *, db: Db, cur=None, ) -> Optional[Dict[Optional[str], int]]: return dict( [ bc for bc in db.snapshot_count_branches( snapshot_id, branch_name_exclude_prefix, cur, ) ] ) @db_transaction(statement_timeout=2000) def snapshot_get_branches( self, snapshot_id: Sha1Git, branches_from: bytes = b"", branches_count: int = 1000, target_types: Optional[List[str]] = None, branch_name_include_substring: Optional[bytes] = None, branch_name_exclude_prefix: Optional[bytes] = None, *, db: Db, cur=None, ) -> Optional[PartialBranches]: if snapshot_id == EMPTY_SNAPSHOT_ID: return PartialBranches( id=snapshot_id, branches={}, next_branch=None, ) if list(self.snapshot_missing([snapshot_id])): return None branches = {} next_branch = None fetched_branches = list( db.snapshot_get_by_id( snapshot_id, branches_from=branches_from, # the underlying SQL query can be quite expensive to execute for small # branches_count value, so we ensure a minimum branches limit of 10 for # optimal performances branches_count=max(branches_count + 1, 10), target_types=target_types, branch_name_include_substring=branch_name_include_substring, branch_name_exclude_prefix=branch_name_exclude_prefix, cur=cur, ) ) for row in fetched_branches[:branches_count]: branch_d = dict(zip(db.snapshot_get_cols, row)) del branch_d["snapshot_id"] name = branch_d.pop("name") if branch_d["target"] is None and branch_d["target_type"] is None: branch = None else: assert branch_d["target_type"] is not None branch = SnapshotBranch( target=branch_d["target"], target_type=TargetType(branch_d["target_type"]), ) branches[name] = branch if len(fetched_branches) > branches_count: next_branch = dict( zip(db.snapshot_get_cols, fetched_branches[branches_count]) )["name"] return PartialBranches( id=snapshot_id, branches=branches, next_branch=next_branch, ) @db_transaction() def snapshot_get_random(self, *, db: Db, cur=None) -> Sha1Git: return db.snapshot_get_random(cur) @db_transaction() def origin_visit_add( self, visits: List[OriginVisit], *, db: Db, cur=None ) -> Iterable[OriginVisit]: for visit in visits: origin = self.origin_get([visit.origin], db=db, cur=cur)[0] if not origin: # Cannot add a visit without an origin raise StorageArgumentException("Unknown origin %s", visit.origin) all_visits = [] for visit in visits: if not visit.visit: with convert_validation_exceptions(): visit_id = db.origin_visit_add( visit.origin, visit.date, visit.type, cur=cur ) visit = attr.evolve(visit, visit=visit_id) else: db.origin_visit_add_with_id(visit, cur=cur) assert visit.visit is not None all_visits.append(visit) # Forced to write after for the case when the visit has no id self.journal_writer.origin_visit_add([visit]) visit_status = OriginVisitStatus( origin=visit.origin, visit=visit.visit, date=visit.date, type=visit.type, status="created", snapshot=None, ) self._origin_visit_status_add(visit_status, db=db, cur=cur) return all_visits def _origin_visit_status_add( self, visit_status: OriginVisitStatus, db, cur ) -> None: """Add an origin visit status""" self.journal_writer.origin_visit_status_add([visit_status]) db.origin_visit_status_add(visit_status, cur=cur) @db_transaction() def origin_visit_status_add( self, visit_statuses: List[OriginVisitStatus], *, db: Db, cur=None, ) -> Dict[str, int]: visit_statuses_ = [] # First round to check existence (fail early if any is ko) for visit_status in visit_statuses: origin_url = self.origin_get([visit_status.origin], db=db, cur=cur)[0] if not origin_url: raise StorageArgumentException(f"Unknown origin {visit_status.origin}") if visit_status.type is None: origin_visit = self.origin_visit_get_by( visit_status.origin, visit_status.visit, db=db, cur=cur ) if origin_visit is None: raise StorageArgumentException( f"Unknown origin visit {visit_status.visit} " f"of origin {visit_status.origin}" ) origin_visit_status = attr.evolve(visit_status, type=origin_visit.type) else: origin_visit_status = visit_status visit_statuses_.append(origin_visit_status) for visit_status in visit_statuses_: self._origin_visit_status_add(visit_status, db, cur) return {"origin_visit_status:add": len(visit_statuses_)} @db_transaction() def origin_visit_status_get_latest( self, origin_url: str, visit: int, allowed_statuses: Optional[List[str]] = None, require_snapshot: bool = False, *, db: Db, cur=None, ) -> Optional[OriginVisitStatus]: if allowed_statuses and not set(allowed_statuses).intersection(VISIT_STATUSES): raise StorageArgumentException( f"Unknown allowed statuses {','.join(allowed_statuses)}, only " f"{','.join(VISIT_STATUSES)} authorized" ) row_d = db.origin_visit_status_get_latest( origin_url, visit, allowed_statuses, require_snapshot, cur=cur ) if not row_d: return None return OriginVisitStatus(**row_d) @db_transaction(statement_timeout=500) def origin_visit_get( self, origin: str, page_token: Optional[str] = None, order: ListOrder = ListOrder.ASC, limit: int = 10, *, db: Db, cur=None, ) -> PagedResult[OriginVisit]: page_token = page_token or "0" if not isinstance(order, ListOrder): raise StorageArgumentException("order must be a ListOrder value") if not isinstance(page_token, str): raise StorageArgumentException("page_token must be a string.") next_page_token = None visit_from = int(page_token) visits: List[OriginVisit] = [] extra_limit = limit + 1 for row in db.origin_visit_get_range( origin, visit_from=visit_from, order=order, limit=extra_limit, cur=cur ): row_d = dict(zip(db.origin_visit_cols, row)) visits.append( OriginVisit( origin=row_d["origin"], visit=row_d["visit"], date=row_d["date"], type=row_d["type"], ) ) assert len(visits) <= extra_limit if len(visits) == extra_limit: visits = visits[:limit] next_page_token = str(visits[-1].visit) return PagedResult(results=visits, next_page_token=next_page_token) @db_transaction(statement_timeout=500) def origin_visit_get_with_statuses( self, origin: str, allowed_statuses: Optional[List[str]] = None, require_snapshot: bool = False, page_token: Optional[str] = None, order: ListOrder = ListOrder.ASC, limit: int = 10, *, db: Db, cur=None, ) -> PagedResult[OriginVisitWithStatuses]: page_token = page_token or "0" if not isinstance(order, ListOrder): raise StorageArgumentException("order must be a ListOrder value") if not isinstance(page_token, str): raise StorageArgumentException("page_token must be a string.") # First get visits (plus one so we can use it as the next page token if any) visits_page = self.origin_visit_get( origin=origin, page_token=page_token, order=order, limit=limit, db=db, cur=cur, ) visits = visits_page.results next_page_token = visits_page.next_page_token if visits: visit_from = min(visits[0].visit, visits[-1].visit) visit_to = max(visits[0].visit, visits[-1].visit) # Then, fetch all statuses associated to these visits visit_statuses: Dict[int, List[OriginVisitStatus]] = defaultdict(list) for row in db.origin_visit_status_get_all_in_range( origin, allowed_statuses, require_snapshot, visit_from=visit_from, visit_to=visit_to, cur=cur, ): row_d = dict(zip(db.origin_visit_status_cols, row)) visit_statuses[row_d["visit"]].append(OriginVisitStatus(**row_d)) results = [ OriginVisitWithStatuses(visit=visit, statuses=visit_statuses[visit.visit]) for visit in visits ] return PagedResult(results=results, next_page_token=next_page_token) @db_transaction(statement_timeout=1000) def origin_visit_find_by_date( self, origin: str, visit_date: datetime.datetime, *, db: Db, cur=None ) -> Optional[OriginVisit]: row_d = db.origin_visit_find_by_date(origin, visit_date, cur=cur) if not row_d: return None return OriginVisit( origin=row_d["origin"], visit=row_d["visit"], date=row_d["date"], type=row_d["type"], ) @db_transaction(statement_timeout=500) def origin_visit_get_by( self, origin: str, visit: int, *, db: Db, cur=None ) -> Optional[OriginVisit]: row = db.origin_visit_get(origin, visit, cur) if row: row_d = dict(zip(db.origin_visit_get_cols, row)) return OriginVisit( origin=row_d["origin"], visit=row_d["visit"], date=row_d["date"], type=row_d["type"], ) return None @db_transaction(statement_timeout=4000) def origin_visit_get_latest( self, origin: str, type: Optional[str] = None, allowed_statuses: Optional[List[str]] = None, require_snapshot: bool = False, *, db: Db, cur=None, ) -> Optional[OriginVisit]: if allowed_statuses and not set(allowed_statuses).intersection(VISIT_STATUSES): raise StorageArgumentException( f"Unknown allowed statuses {','.join(allowed_statuses)}, only " f"{','.join(VISIT_STATUSES)} authorized" ) row = db.origin_visit_get_latest( origin, type=type, allowed_statuses=allowed_statuses, require_snapshot=require_snapshot, cur=cur, ) if row: row_d = dict(zip(db.origin_visit_get_cols, row)) visit = OriginVisit( origin=row_d["origin"], visit=row_d["visit"], date=row_d["date"], type=row_d["type"], ) return visit return None @db_transaction(statement_timeout=500) def origin_visit_status_get( self, origin: str, visit: int, page_token: Optional[str] = None, order: ListOrder = ListOrder.ASC, limit: int = 10, *, db: Db, cur=None, ) -> PagedResult[OriginVisitStatus]: next_page_token = None date_from = None if page_token is not None: date_from = datetime.datetime.fromisoformat(page_token) visit_statuses: List[OriginVisitStatus] = [] # Take one more visit status so we can reuse it as the next page token if any for row in db.origin_visit_status_get_range( origin, visit, date_from=date_from, order=order, limit=limit + 1, cur=cur, ): row_d = dict(zip(db.origin_visit_status_cols, row)) visit_statuses.append(OriginVisitStatus(**row_d)) if len(visit_statuses) > limit: # last visit status date is the next page token next_page_token = str(visit_statuses[-1].date) # excluding that visit status from the result to respect the limit size visit_statuses = visit_statuses[:limit] return PagedResult(results=visit_statuses, next_page_token=next_page_token) @db_transaction() def origin_visit_status_get_random( self, type: str, *, db: Db, cur=None ) -> Optional[OriginVisitStatus]: row = db.origin_visit_get_random(type, cur) if row is not None: row_d = dict(zip(db.origin_visit_status_cols, row)) return OriginVisitStatus(**row_d) return None @db_transaction(statement_timeout=2000) def object_find_by_sha1_git( self, ids: List[Sha1Git], *, db: Db, cur=None ) -> Dict[Sha1Git, List[Dict]]: ret: Dict[Sha1Git, List[Dict]] = {id: [] for id in ids} for retval in db.object_find_by_sha1_git(ids, cur=cur): if retval[1]: ret[retval[0]].append( dict(zip(db.object_find_by_sha1_git_cols, retval)) ) return ret @db_transaction(statement_timeout=1000) def origin_get( self, origins: List[str], *, db: Db, cur=None ) -> Iterable[Optional[Origin]]: rows = db.origin_get_by_url(origins, cur) result: List[Optional[Origin]] = [] for row in rows: origin_d = dict(zip(db.origin_cols, row)) url = origin_d["url"] result.append(None if url is None else Origin(url=url)) return result @db_transaction(statement_timeout=1000) def origin_get_by_sha1( self, sha1s: List[bytes], *, db: Db, cur=None ) -> List[Optional[Dict[str, Any]]]: return [ dict(zip(db.origin_cols, row)) if row[0] else None for row in db.origin_get_by_sha1(sha1s, cur) ] @db_transaction_generator() def origin_get_range(self, origin_from=1, origin_count=100, *, db: Db, cur=None): for origin in db.origin_get_range(origin_from, origin_count, cur): yield dict(zip(db.origin_get_range_cols, origin)) @db_transaction() def origin_list( self, page_token: Optional[str] = None, limit: int = 100, *, db: Db, cur=None ) -> PagedResult[Origin]: page_token = page_token or "0" if not isinstance(page_token, str): raise StorageArgumentException("page_token must be a string.") origin_from = int(page_token) next_page_token = None origins: List[Origin] = [] # Take one more origin so we can reuse it as the next page token if any for row_d in self.origin_get_range(origin_from, limit + 1, db=db, cur=cur): origins.append(Origin(url=row_d["url"])) # keep the last_id for the pagination if needed last_id = row_d["id"] if len(origins) > limit: # data left for subsequent call # last origin id is the next page token next_page_token = str(last_id) # excluding that origin from the result to respect the limit size origins = origins[:limit] assert len(origins) <= limit return PagedResult(results=origins, next_page_token=next_page_token) @db_transaction() def origin_search( self, url_pattern: str, page_token: Optional[str] = None, limit: int = 50, regexp: bool = False, with_visit: bool = False, visit_types: Optional[List[str]] = None, *, db: Db, cur=None, ) -> PagedResult[Origin]: next_page_token = None offset = int(page_token) if page_token else 0 origins = [] # Take one more origin so we can reuse it as the next page token if any for origin in db.origin_search( url_pattern, offset, limit + 1, regexp, with_visit, visit_types, cur ): row_d = dict(zip(db.origin_cols, origin)) origins.append(Origin(url=row_d["url"])) if len(origins) > limit: # next offset next_page_token = str(offset + limit) # excluding that origin from the result to respect the limit size origins = origins[:limit] assert len(origins) <= limit return PagedResult(results=origins, next_page_token=next_page_token) @db_transaction() def origin_count( self, url_pattern: str, regexp: bool = False, with_visit: bool = False, *, db: Db, cur=None, ) -> int: return db.origin_count(url_pattern, regexp, with_visit, cur) @db_transaction() def origin_snapshot_get_all( self, origin_url: str, *, db: Db, cur=None ) -> List[Sha1Git]: return list(db.origin_snapshot_get_all(origin_url, cur)) @db_transaction() def origin_add(self, origins: List[Origin], *, db: Db, cur=None) -> Dict[str, int]: urls = [o.url for o in origins] known_origins = set(url for (url,) in db.origin_get_by_url(urls, cur)) # keep only one occurrence of each given origin while keeping the list # sorted as originally given to_add = sorted(set(urls) - known_origins, key=urls.index) self.journal_writer.origin_add([Origin(url=url) for url in to_add]) added = 0 for url in to_add: if db.origin_add(url, cur): added += 1 return {"origin:add": added} @db_transaction(statement_timeout=500) def stat_counters(self, *, db: Db, cur=None): return {k: v for (k, v) in db.stat_counters()} @db_transaction() def refresh_stat_counters(self, *, db: Db, cur=None): keys = [ "content", "directory", "directory_entry_dir", "directory_entry_file", "directory_entry_rev", "origin", "origin_visit", "person", "release", "revision", "revision_history", "skipped_content", "snapshot", ] for key in keys: cur.execute("select * from swh_update_counter(%s)", (key,)) @db_transaction() def raw_extrinsic_metadata_add( self, metadata: List[RawExtrinsicMetadata], db, cur, ) -> Dict[str, int]: metadata = list(metadata) self.journal_writer.raw_extrinsic_metadata_add(metadata) counter = Counter[ExtendedObjectType]() for metadata_entry in metadata: authority_id = self._get_authority_id(metadata_entry.authority, db, cur) fetcher_id = self._get_fetcher_id(metadata_entry.fetcher, db, cur) db.raw_extrinsic_metadata_add( id=metadata_entry.id, type=metadata_entry.target.object_type.name.lower(), target=str(metadata_entry.target), discovery_date=metadata_entry.discovery_date, authority_id=authority_id, fetcher_id=fetcher_id, format=metadata_entry.format, metadata=metadata_entry.metadata, origin=metadata_entry.origin, visit=metadata_entry.visit, snapshot=map_optional(str, metadata_entry.snapshot), release=map_optional(str, metadata_entry.release), revision=map_optional(str, metadata_entry.revision), path=metadata_entry.path, directory=map_optional(str, metadata_entry.directory), cur=cur, ) counter[metadata_entry.target.object_type] += 1 return { f"{type.value}_metadata:add": count for (type, count) in counter.items() } @db_transaction() def raw_extrinsic_metadata_get( self, target: ExtendedSWHID, authority: MetadataAuthority, after: Optional[datetime.datetime] = None, page_token: Optional[bytes] = None, limit: int = 1000, *, db: Db, cur=None, ) -> PagedResult[RawExtrinsicMetadata]: if page_token: (after_time, after_fetcher) = msgpack_loads(base64.b64decode(page_token)) if after and after_time < after: raise StorageArgumentException( "page_token is inconsistent with the value of 'after'." ) else: after_time = after after_fetcher = None authority_id = self._get_authority_id(authority, db, cur) if not authority_id: return PagedResult( next_page_token=None, results=[], ) rows = db.raw_extrinsic_metadata_get( str(target), authority_id, after_time, after_fetcher, limit + 1, cur, ) rows = [dict(zip(db.raw_extrinsic_metadata_get_cols, row)) for row in rows] results = [] for row in rows: assert str(target) == row["raw_extrinsic_metadata.target"] results.append(converters.db_to_raw_extrinsic_metadata(row)) if len(results) > limit: results.pop() assert len(results) == limit last_returned_row = rows[-2] # rows[-1] corresponds to the popped result next_page_token: Optional[str] = base64.b64encode( msgpack_dumps( ( last_returned_row["discovery_date"], last_returned_row["metadata_fetcher.id"], ) ) ).decode() else: next_page_token = None return PagedResult( next_page_token=next_page_token, results=results, ) @db_transaction() def raw_extrinsic_metadata_get_by_ids( self, ids: List[Sha1Git], *, db: Db, cur=None, ) -> List[RawExtrinsicMetadata]: return [ converters.db_to_raw_extrinsic_metadata( dict(zip(db.raw_extrinsic_metadata_get_cols, row)) ) for row in db.raw_extrinsic_metadata_get_by_ids(ids) ] @db_transaction() def raw_extrinsic_metadata_get_authorities( self, target: ExtendedSWHID, *, db: Db, cur=None, ) -> List[MetadataAuthority]: return [ MetadataAuthority( type=MetadataAuthorityType(authority_type), url=authority_url ) for ( authority_type, authority_url, ) in db.raw_extrinsic_metadata_get_authorities(str(target), cur) ] @db_transaction() def metadata_fetcher_add( self, fetchers: List[MetadataFetcher], *, db: Db, cur=None ) -> Dict[str, int]: fetchers = list(fetchers) self.journal_writer.metadata_fetcher_add(fetchers) count = 0 for fetcher in fetchers: db.metadata_fetcher_add(fetcher.name, fetcher.version, cur=cur) count += 1 return {"metadata_fetcher:add": count} @db_transaction(statement_timeout=500) def metadata_fetcher_get( self, name: str, version: str, *, db: Db, cur=None ) -> Optional[MetadataFetcher]: row = db.metadata_fetcher_get(name, version, cur=cur) if not row: return None return MetadataFetcher.from_dict(dict(zip(db.metadata_fetcher_cols, row))) @db_transaction() def metadata_authority_add( self, authorities: List[MetadataAuthority], *, db: Db, cur=None ) -> Dict[str, int]: authorities = list(authorities) self.journal_writer.metadata_authority_add(authorities) count = 0 for authority in authorities: db.metadata_authority_add(authority.type.value, authority.url, cur=cur) count += 1 return {"metadata_authority:add": count} @db_transaction() def metadata_authority_get( self, type: MetadataAuthorityType, url: str, *, db: Db, cur=None ) -> Optional[MetadataAuthority]: row = db.metadata_authority_get(type.value, url, cur=cur) if not row: return None return MetadataAuthority.from_dict(dict(zip(db.metadata_authority_cols, row))) def clear_buffers(self, object_types: Sequence[str] = ()) -> None: """Do nothing""" return None def flush(self, object_types: Sequence[str] = ()) -> Dict[str, int]: return {} def _get_authority_id(self, authority: MetadataAuthority, db, cur): authority_id = db.metadata_authority_get_id( authority.type.value, authority.url, cur ) if not authority_id: raise StorageArgumentException(f"Unknown authority {authority}") return authority_id def _get_fetcher_id(self, fetcher: MetadataFetcher, db, cur): fetcher_id = db.metadata_fetcher_get_id(fetcher.name, fetcher.version, cur) if not fetcher_id: raise StorageArgumentException(f"Unknown fetcher {fetcher}") return fetcher_id diff --git a/swh/storage/pytest_plugin.py b/swh/storage/pytest_plugin.py index 9d471570..c0776b78 100644 --- a/swh/storage/pytest_plugin.py +++ b/swh/storage/pytest_plugin.py @@ -1,65 +1,65 @@ # Copyright (C) 2019-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from functools import partial from os import environ import pytest from pytest_postgresql import factories from swh.core.db.pytest_plugin import initialize_database_for_module from swh.storage import get_storage -from swh.storage.postgresql.db import Db as StorageDb +from swh.storage.postgresql.storage import Storage as StorageDatastore from swh.storage.tests.storage_data import StorageData environ["LC_ALL"] = "C.UTF-8" swh_storage_postgresql_proc = factories.postgresql_proc( load=[ partial( initialize_database_for_module, modname="storage", - version=StorageDb.current_version, + version=StorageDatastore.current_version, ) ], ) swh_storage_postgresql = factories.postgresql( "swh_storage_postgresql_proc", ) @pytest.fixture def swh_storage_backend_config(swh_storage_postgresql): """Basic pg storage configuration with no journal collaborator (to avoid pulling optional dependency on clients of this fixture) """ yield { "cls": "postgresql", "db": swh_storage_postgresql.dsn, "objstorage": {"cls": "memory"}, "check_config": {"check_write": True}, } @pytest.fixture def swh_storage(swh_storage_backend_config): return get_storage(**swh_storage_backend_config) @pytest.fixture def sample_data() -> StorageData: """Pre-defined sample storage object data to manipulate Returns: StorageData whose attribute keys are data model objects. Either multiple objects: contents, directories, revisions, releases, ... or simple ones: content, directory, revision, release, ... """ return StorageData() diff --git a/swh/storage/tests/test_postgresql.py b/swh/storage/tests/test_postgresql.py index 28ec5fb5..40eb7fef 100644 --- a/swh/storage/tests/test_postgresql.py +++ b/swh/storage/tests/test_postgresql.py @@ -1,410 +1,400 @@ -# Copyright (C) 2015-2020 The Software Heritage developers +# Copyright (C) 2015-2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information from contextlib import contextmanager import queue import threading from unittest.mock import Mock import attr import pytest from swh.model.model import Person -from swh.storage.postgresql.db import Db from swh.storage.tests.storage_tests import TestStorage as _TestStorage from swh.storage.tests.storage_tests import TestStorageGeneratedData # noqa from swh.storage.utils import now @contextmanager def db_transaction(storage): with storage.db() as db: with db.transaction() as cur: yield db, cur class TestStorage(_TestStorage): @pytest.mark.skip( "Directory pagination is not implemented in the postgresql backend yet." ) def test_directory_get_entries_pagination(self): pass @pytest.mark.db class TestLocalStorage: """Test the local storage""" # This test is only relevant on the local storage, with an actual # objstorage raising an exception def test_content_add_objstorage_exception(self, swh_storage, sample_data): content = sample_data.content swh_storage.objstorage.content_add = Mock( side_effect=Exception("mocked broken objstorage") ) with pytest.raises(Exception, match="mocked broken"): swh_storage.content_add([content]) missing = list(swh_storage.content_missing([content.hashes()])) assert missing == [content.sha1] @pytest.mark.db class TestStorageRaceConditions: @pytest.mark.xfail def test_content_add_race(self, swh_storage, sample_data): content = attr.evolve(sample_data.content, ctime=now()) results = queue.Queue() def thread(): try: with db_transaction(swh_storage) as (db, cur): ret = swh_storage._content_add_metadata(db, cur, [content]) results.put((threading.get_ident(), "data", ret)) except Exception as e: results.put((threading.get_ident(), "exc", e)) t1 = threading.Thread(target=thread) t2 = threading.Thread(target=thread) t1.start() # this avoids the race condition # import time # time.sleep(1) t2.start() t1.join() t2.join() r1 = results.get(block=False) r2 = results.get(block=False) with pytest.raises(queue.Empty): results.get(block=False) assert r1[0] != r2[0] assert r1[1] == "data", "Got exception %r in Thread%s" % (r1[2], r1[0]) assert r2[1] == "data", "Got exception %r in Thread%s" % (r2[2], r2[0]) @pytest.mark.db class TestPgStorage: """This class is dedicated for the rare case where the schema needs to be altered dynamically. Otherwise, the tests could be blocking when ran altogether. """ def test_content_update_with_new_cols(self, swh_storage, sample_data): content, content2 = sample_data.contents[:2] swh_storage.journal_writer.journal = None # TODO, not supported with db_transaction(swh_storage) as (_, cur): cur.execute( """alter table content add column test text default null, add column test2 text default null""" ) swh_storage.content_add([content]) cont = content.to_dict() cont["test"] = "value-1" cont["test2"] = "value-2" swh_storage.content_update([cont], keys=["test", "test2"]) with db_transaction(swh_storage) as (_, cur): cur.execute( """SELECT sha1, sha1_git, sha256, length, status, test, test2 FROM content WHERE sha1 = %s""", (cont["sha1"],), ) datum = cur.fetchone() assert datum == ( cont["sha1"], cont["sha1_git"], cont["sha256"], cont["length"], "visible", cont["test"], cont["test2"], ) with db_transaction(swh_storage) as (_, cur): cur.execute( """alter table content drop column test, drop column test2""" ) def test_content_add_db(self, swh_storage, sample_data): content = sample_data.content actual_result = swh_storage.content_add([content]) assert actual_result == { "content:add": 1, "content:add:bytes": content.length, } if hasattr(swh_storage, "objstorage"): assert content.sha1 in swh_storage.objstorage.objstorage with db_transaction(swh_storage) as (_, cur): cur.execute( "SELECT sha1, sha1_git, sha256, length, status" " FROM content WHERE sha1 = %s", (content.sha1,), ) datum = cur.fetchone() assert datum == ( content.sha1, content.sha1_git, content.sha256, content.length, "visible", ) contents = [ obj for (obj_type, obj) in swh_storage.journal_writer.journal.objects if obj_type == "content" ] assert len(contents) == 1 assert contents[0] == attr.evolve(content, data=None) def test_content_add_metadata_db(self, swh_storage, sample_data): content = attr.evolve(sample_data.content, data=None, ctime=now()) actual_result = swh_storage.content_add_metadata([content]) assert actual_result == { "content:add": 1, } if hasattr(swh_storage, "objstorage"): assert content.sha1 not in swh_storage.objstorage.objstorage with db_transaction(swh_storage) as (_, cur): cur.execute( "SELECT sha1, sha1_git, sha256, length, status" " FROM content WHERE sha1 = %s", (content.sha1,), ) datum = cur.fetchone() assert datum == ( content.sha1, content.sha1_git, content.sha256, content.length, "visible", ) contents = [ obj for (obj_type, obj) in swh_storage.journal_writer.journal.objects if obj_type == "content" ] assert len(contents) == 1 assert contents[0] == content def test_skipped_content_add_db(self, swh_storage, sample_data): content, cont2 = sample_data.skipped_contents[:2] content2 = attr.evolve(cont2, blake2s256=None) actual_result = swh_storage.skipped_content_add([content, content, content2]) assert 2 <= actual_result.pop("skipped_content:add") <= 3 assert actual_result == {} with db_transaction(swh_storage) as (_, cur): cur.execute( "SELECT sha1, sha1_git, sha256, blake2s256, " "length, status, reason " "FROM skipped_content ORDER BY sha1_git" ) dbdata = cur.fetchall() assert len(dbdata) == 2 assert dbdata[0] == ( content.sha1, content.sha1_git, content.sha256, content.blake2s256, content.length, "absent", "Content too long", ) assert dbdata[1] == ( content2.sha1, content2.sha1_git, content2.sha256, content2.blake2s256, content2.length, "absent", "Content too long", ) def test_revision_get_displayname_behavior(self, swh_storage, sample_data): """Check revision_get behavior when displayname is set""" revision, revision2 = sample_data.revisions[:2] # Make authors and committers known revision = attr.evolve( revision, author=Person.from_fullname(b"author1 "), committer=Person.from_fullname(b"committer1 "), ) revision = attr.evolve(revision, id=revision.compute_hash()) revision2 = attr.evolve( revision2, author=Person.from_fullname(b"author2 "), committer=Person.from_fullname(b"committer2 "), ) revision2 = attr.evolve(revision2, id=revision2.compute_hash()) add_result = swh_storage.revision_add([revision, revision2]) assert add_result == {"revision:add": 2} # Before displayname change revisions = swh_storage.revision_get([revision.id, revision2.id]) assert revisions == [revision, revision2] displayname = b"Display Name " with db_transaction(swh_storage) as (_, cur): cur.execute( "UPDATE person set displayname = %s where fullname = %s", (displayname, revision.author.fullname), ) revisions = swh_storage.revision_get([revision.id, revision2.id]) assert revisions == [ attr.evolve(revision, author=Person.from_fullname(displayname)), revision2, ] revisions = swh_storage.revision_get( [revision.id, revision2.id], ignore_displayname=True ) assert revisions == [revision, revision2] def test_revision_log_displayname_behavior(self, swh_storage, sample_data): """Check revision_log behavior when displayname is set""" revision, revision2 = sample_data.revisions[:2] # Make authors, committers and parenthood relationship known # (revision2 -[parent]-> revision1) revision = attr.evolve( revision, author=Person.from_fullname(b"author1 "), committer=Person.from_fullname(b"committer1 "), ) revision = attr.evolve(revision, id=revision.compute_hash()) revision2 = attr.evolve( revision2, parents=(revision.id,), author=Person.from_fullname(b"author2 "), committer=Person.from_fullname(b"committer2 "), ) revision2 = attr.evolve(revision2, id=revision2.compute_hash()) add_result = swh_storage.revision_add([revision, revision2]) assert add_result == {"revision:add": 2} # Before displayname change revisions = swh_storage.revision_log([revision2.id]) assert list(revisions) == [revision2.to_dict(), revision.to_dict()] displayname = b"Display Name " with db_transaction(swh_storage) as (_, cur): cur.execute( "UPDATE person set displayname = %s where fullname = %s", (displayname, revision.author.fullname), ) revisions = swh_storage.revision_log([revision2.id]) assert list(revisions) == [ revision2.to_dict(), attr.evolve(revision, author=Person.from_fullname(displayname)).to_dict(), ] revisions = swh_storage.revision_log([revision2.id], ignore_displayname=True) assert list(revisions) == [revision2.to_dict(), revision.to_dict()] def test_release_get_displayname_behavior(self, swh_storage, sample_data): """Check release_get behavior when displayname is set""" release, release2 = sample_data.releases[:2] # Make authors known release = attr.evolve( release, author=Person.from_fullname(b"author1 "), ) release = attr.evolve(release, id=release.compute_hash()) release2 = attr.evolve( release2, author=Person.from_fullname(b"author2 "), ) release2 = attr.evolve(release2, id=release2.compute_hash()) add_result = swh_storage.release_add([release, release2]) assert add_result == {"release:add": 2} # Before displayname change releases = swh_storage.release_get([release.id, release2.id]) assert releases == [release, release2] displayname = b"Display Name " with db_transaction(swh_storage) as (_, cur): cur.execute( "UPDATE person set displayname = %s where fullname = %s", (displayname, release.author.fullname), ) releases = swh_storage.release_get([release.id, release2.id]) assert releases == [ attr.evolve(release, author=Person.from_fullname(displayname)), release2, ] releases = swh_storage.release_get( [release.id, release2.id], ignore_displayname=True ) assert releases == [release, release2] def test_clear_buffers(self, swh_storage): """Calling clear buffers on real storage does nothing""" assert swh_storage.clear_buffers() is None def test_flush(self, swh_storage): """Calling clear buffers on real storage does nothing""" assert swh_storage.flush() == {} - def test_dbversion(self, swh_storage): - with swh_storage.db() as db: - assert db.check_dbversion() - - def test_dbversion_mismatch(self, swh_storage, monkeypatch): - monkeypatch.setattr(Db, "current_version", -1) - with swh_storage.db() as db: - assert db.check_dbversion() is False - def test_check_config(self, swh_storage): assert swh_storage.check_config(check_write=True) assert swh_storage.check_config(check_write=False) def test_check_config_dbversion(self, swh_storage, monkeypatch): - monkeypatch.setattr(Db, "current_version", -1) + swh_storage.current_version = -1 assert swh_storage.check_config(check_write=True) is False assert swh_storage.check_config(check_write=False) is False