diff --git a/swh/scrubber/db.py b/swh/scrubber/db.py index 6b2d767..5e69fc8 100644 --- a/swh/scrubber/db.py +++ b/swh/scrubber/db.py @@ -1,452 +1,527 @@ # Copyright (C) 2022 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU General Public License version 3, or any later version # See top-level LICENSE file for more information import dataclasses import datetime import functools -from typing import Iterable, Iterator, List, Optional +from typing import Iterable, Iterator, List, Optional, Tuple import psycopg2 from swh.core.db import BaseDb from swh.model.swhids import CoreSWHID @dataclasses.dataclass(frozen=True) class Datastore: """Represents a datastore being scrubbed; eg. swh-storage or swh-journal.""" package: str """'storage', 'journal', or 'objstorage'.""" cls: str """'postgresql'/'cassandra' for storage, 'kafka' for journal, 'pathslicer'/'winery'/... for objstorage.""" instance: str """Human readable string.""" @dataclasses.dataclass(frozen=True) class CorruptObject: id: CoreSWHID datastore: Datastore first_occurrence: datetime.datetime object_: bytes @dataclasses.dataclass(frozen=True) class MissingObject: id: CoreSWHID datastore: Datastore first_occurrence: datetime.datetime @dataclasses.dataclass(frozen=True) class MissingObjectReference: missing_id: CoreSWHID reference_id: CoreSWHID datastore: Datastore first_occurrence: datetime.datetime @dataclasses.dataclass(frozen=True) class FixedObject: id: CoreSWHID object_: bytes method: str recovery_date: Optional[datetime.datetime] = None class ScrubberDb(BaseDb): - current_version = 3 + current_version = 4 #################################### # Shared tables #################################### @functools.lru_cache(1000) def datastore_get_or_add(self, datastore: Datastore) -> int: """Creates a datastore if it does not exist, and returns its id.""" with self.transaction() as cur: cur.execute( """ WITH inserted AS ( INSERT INTO datastore (package, class, instance) VALUES (%(package)s, %(cls)s, %(instance)s) ON CONFLICT DO NOTHING RETURNING id ) SELECT id FROM inserted UNION ( -- If the datastore already exists, we need to fetch its id SELECT id FROM datastore WHERE package=%(package)s AND class=%(cls)s AND instance=%(instance)s ) LIMIT 1 """, (dataclasses.asdict(datastore)), ) res = cur.fetchone() assert res is not None (id_,) = res return id_ + #################################### + # Checkpointing/progress tracking + #################################### + + def checked_range_upsert( + self, + datastore: Datastore, + range_start: CoreSWHID, + range_end: CoreSWHID, + date: datetime.datetime, + ) -> None: + """ + Records in the database the given range was last checked at the given date. + """ + datastore_id = self.datastore_get_or_add(datastore) + with self.transaction() as cur: + cur.execute( + """ + INSERT INTO checked_range(datastore, range_start, range_end, last_date) + VALUES (%s, %s, %s, %s) + ON CONFLICT (datastore, range_start, range_end) DO UPDATE + SET last_date = GREATEST(checked_range.last_date, EXCLUDED.last_date) + """, + (datastore_id, str(range_start), str(range_end), date), + ) + + def checked_range_get_last_date( + self, datastore: Datastore, range_start: CoreSWHID, range_end: CoreSWHID + ) -> Optional[datetime.datetime]: + """ + Returns the last date the given range was checked in the given datastore, + or :const:`None` if it was never checked. + + Currently, this checks range boundaries exactly, with no regard for ranges + that contain or are contained by it. + """ + datastore_id = self.datastore_get_or_add(datastore) + with self.transaction() as cur: + cur.execute( + """ + SELECT last_date + FROM checked_range + WHERE datastore=%s AND range_start=%s AND range_end=%s + """, + (datastore_id, str(range_start), str(range_end)), + ) + + res = cur.fetchone() + if res is None: + return None + else: + (date,) = res + return date + + def checked_range_iter( + self, datastore: Datastore + ) -> Iterator[Tuple[CoreSWHID, CoreSWHID, datetime.datetime]]: + datastore_id = self.datastore_get_or_add(datastore) + with self.transaction() as cur: + cur.execute( + """ + SELECT range_start, range_end, last_date + FROM checked_range + WHERE datastore=%s + """, + (datastore_id,), + ) + + for (range_start, range_end, last_date) in cur: + yield ( + CoreSWHID.from_string(range_start), + CoreSWHID.from_string(range_end), + last_date, + ) + #################################### # Inventory of objects with issues #################################### def corrupt_object_add( self, id: CoreSWHID, datastore: Datastore, serialized_object: bytes, ) -> None: datastore_id = self.datastore_get_or_add(datastore) with self.transaction() as cur: cur.execute( """ INSERT INTO corrupt_object (id, datastore, object) VALUES (%s, %s, %s) ON CONFLICT DO NOTHING """, (str(id), datastore_id, serialized_object), ) def corrupt_object_iter(self) -> Iterator[CorruptObject]: """Yields all records in the 'corrupt_object' table.""" with self.transaction() as cur: cur.execute( """ SELECT co.id, co.first_occurrence, co.object, ds.package, ds.class, ds.instance FROM corrupt_object AS co INNER JOIN datastore AS ds ON (ds.id=co.datastore) """ ) for row in cur: (id, first_occurrence, object_, ds_package, ds_class, ds_instance) = row yield CorruptObject( id=CoreSWHID.from_string(id), first_occurrence=first_occurrence, object_=object_, datastore=Datastore( package=ds_package, cls=ds_class, instance=ds_instance ), ) def _corrupt_object_list_from_cursor( self, cur: psycopg2.extensions.cursor ) -> List[CorruptObject]: results = [] for row in cur: (id, first_occurrence, object_, ds_package, ds_class, ds_instance) = row results.append( CorruptObject( id=CoreSWHID.from_string(id), first_occurrence=first_occurrence, object_=object_, datastore=Datastore( package=ds_package, cls=ds_class, instance=ds_instance ), ) ) return results def corrupt_object_get( self, start_id: CoreSWHID, end_id: CoreSWHID, limit: int = 100, ) -> List[CorruptObject]: """Yields a page of records in the 'corrupt_object' table, ordered by id. Arguments: start_id: Only return objects after this id end_id: Only return objects before this id in_origin: An origin URL. If provided, only returns objects that may be found in the given origin """ with self.transaction() as cur: cur.execute( """ SELECT co.id, co.first_occurrence, co.object, ds.package, ds.class, ds.instance FROM corrupt_object AS co INNER JOIN datastore AS ds ON (ds.id=co.datastore) WHERE co.id >= %s AND co.id <= %s ORDER BY co.id LIMIT %s """, (str(start_id), str(end_id), limit), ) return self._corrupt_object_list_from_cursor(cur) def corrupt_object_grab_by_id( self, cur: psycopg2.extensions.cursor, start_id: CoreSWHID, end_id: CoreSWHID, limit: int = 100, ) -> List[CorruptObject]: """Returns a page of records in the 'corrupt_object' table for a fixer, ordered by id These records are not already fixed (ie. do not have a corresponding entry in the 'fixed_object' table), and they are selected with an exclusive update lock. Arguments: start_id: Only return objects after this id end_id: Only return objects before this id """ cur.execute( """ SELECT co.id, co.first_occurrence, co.object, ds.package, ds.class, ds.instance FROM corrupt_object AS co INNER JOIN datastore AS ds ON (ds.id=co.datastore) WHERE co.id >= %(start_id)s AND co.id <= %(end_id)s AND NOT EXISTS (SELECT 1 FROM fixed_object WHERE fixed_object.id=co.id) ORDER BY co.id LIMIT %(limit)s FOR UPDATE SKIP LOCKED """, dict( start_id=str(start_id), end_id=str(end_id), limit=limit, ), ) return self._corrupt_object_list_from_cursor(cur) def corrupt_object_grab_by_origin( self, cur: psycopg2.extensions.cursor, origin_url: str, start_id: Optional[CoreSWHID] = None, end_id: Optional[CoreSWHID] = None, limit: int = 100, ) -> List[CorruptObject]: """Returns a page of records in the 'corrupt_object' table for a fixer, ordered by id These records are not already fixed (ie. do not have a corresponding entry in the 'fixed_object' table), and they are selected with an exclusive update lock. Arguments: origin_url: only returns objects that may be found in the given origin """ cur.execute( """ SELECT co.id, co.first_occurrence, co.object, ds.package, ds.class, ds.instance FROM corrupt_object AS co INNER JOIN datastore AS ds ON (ds.id=co.datastore) INNER JOIN object_origin AS oo ON (oo.object_id=co.id) WHERE (co.id >= %(start_id)s OR %(start_id)s IS NULL) AND (co.id <= %(end_id)s OR %(end_id)s IS NULL) AND NOT EXISTS (SELECT 1 FROM fixed_object WHERE fixed_object.id=co.id) AND oo.origin_url=%(origin_url)s ORDER BY co.id LIMIT %(limit)s FOR UPDATE SKIP LOCKED """, dict( start_id=None if start_id is None else str(start_id), end_id=None if end_id is None else str(end_id), origin_url=origin_url, limit=limit, ), ) return self._corrupt_object_list_from_cursor(cur) def missing_object_add( self, id: CoreSWHID, reference_ids: Iterable[CoreSWHID], datastore: Datastore, ) -> None: """ Adds a "hole" to the inventory, ie. an object missing from a datastore that is referenced by an other object of the same datastore. If the missing object is already known to be missing by the scrubber database, this only records the reference (which can be useful to locate an origin to recover the object from). If that reference is already known too, this is a noop. Args: id: SWHID of the missing object (the hole) reference_id: SWHID of the object referencing the missing object datastore: representation of the swh-storage/swh-journal/... instance containing this hole """ if not reference_ids: raise ValueError("reference_ids is empty") datastore_id = self.datastore_get_or_add(datastore) with self.transaction() as cur: cur.execute( """ INSERT INTO missing_object (id, datastore) VALUES (%s, %s) ON CONFLICT DO NOTHING """, (str(id), datastore_id), ) psycopg2.extras.execute_batch( cur, """ INSERT INTO missing_object_reference (missing_id, reference_id, datastore) VALUES (%s, %s, %s) ON CONFLICT DO NOTHING """, [ (str(id), str(reference_id), datastore_id) for reference_id in reference_ids ], ) def missing_object_iter(self) -> Iterator[MissingObject]: """Yields all records in the 'missing_object' table.""" with self.transaction() as cur: cur.execute( """ SELECT mo.id, mo.first_occurrence, ds.package, ds.class, ds.instance FROM missing_object AS mo INNER JOIN datastore AS ds ON (ds.id=mo.datastore) """ ) for row in cur: (id, first_occurrence, ds_package, ds_class, ds_instance) = row yield MissingObject( id=CoreSWHID.from_string(id), first_occurrence=first_occurrence, datastore=Datastore( package=ds_package, cls=ds_class, instance=ds_instance ), ) def missing_object_reference_iter( self, missing_id: CoreSWHID ) -> Iterator[MissingObjectReference]: """Yields all records in the 'missing_object_reference' table.""" with self.transaction() as cur: cur.execute( """ SELECT mor.reference_id, mor.first_occurrence, ds.package, ds.class, ds.instance FROM missing_object_reference AS mor INNER JOIN datastore AS ds ON (ds.id=mor.datastore) WHERE mor.missing_id=%s """, (str(missing_id),), ) for row in cur: ( reference_id, first_occurrence, ds_package, ds_class, ds_instance, ) = row yield MissingObjectReference( missing_id=missing_id, reference_id=CoreSWHID.from_string(reference_id), first_occurrence=first_occurrence, datastore=Datastore( package=ds_package, cls=ds_class, instance=ds_instance ), ) #################################### # Issue resolution #################################### def object_origin_add( self, cur: psycopg2.extensions.cursor, swhid: CoreSWHID, origins: List[str] ) -> None: psycopg2.extras.execute_values( cur, """ INSERT INTO object_origin (object_id, origin_url) VALUES %s ON CONFLICT DO NOTHING """, [(str(swhid), origin_url) for origin_url in origins], ) def object_origin_get(self, after: str = "", limit: int = 1000) -> List[str]: """Returns origins with non-fixed corrupt objects, ordered by URL. Arguments: after: if given, only returns origins with an URL after this value """ with self.transaction() as cur: cur.execute( """ SELECT DISTINCT origin_url FROM object_origin WHERE origin_url > %(after)s AND object_id IN ( (SELECT id FROM corrupt_object) EXCEPT (SELECT id FROM fixed_object) ) ORDER BY origin_url LIMIT %(limit)s """, dict(after=after, limit=limit), ) return [origin_url for (origin_url,) in cur] def fixed_object_add( self, cur: psycopg2.extensions.cursor, fixed_objects: List[FixedObject] ) -> None: psycopg2.extras.execute_values( cur, """ INSERT INTO fixed_object (id, object, method) VALUES %s ON CONFLICT DO NOTHING """, [ (str(fixed_object.id), fixed_object.object_, fixed_object.method) for fixed_object in fixed_objects ], ) def fixed_object_iter(self) -> Iterator[FixedObject]: with self.transaction() as cur: cur.execute("SELECT id, object, method, recovery_date FROM fixed_object") for (id, object_, method, recovery_date) in cur: yield FixedObject( id=CoreSWHID.from_string(id), object_=object_, method=method, recovery_date=recovery_date, ) diff --git a/swh/scrubber/sql/30-schema.sql b/swh/scrubber/sql/30-schema.sql index a67eb67..b28ea3c 100644 --- a/swh/scrubber/sql/30-schema.sql +++ b/swh/scrubber/sql/30-schema.sql @@ -1,90 +1,107 @@ ------------------------------------- -- Shared definitions ------------------------------------- create domain swhid as text check (value ~ '^swh:[0-9]+:.*'); create table datastore ( id bigserial not null, package datastore_type not null, class text, instance text ); comment on table datastore is 'Each row identifies a data store being scrubbed'; comment on column datastore.id is 'Internal identifier of the datastore'; comment on column datastore.package is 'Name of the component using this datastore (storage/journal/objstorage)'; comment on column datastore.class is 'For datastores with multiple backends, name of the backend (postgresql/cassandra for storage, kafka for journal, pathslicer/azure/winery/... for objstorage)'; comment on column datastore.instance is 'Human-readable way to uniquely identify the datastore; eg. its URL or DSN.'; +------------------------------------- +-- Checkpointing/progress tracking +------------------------------------- + +create table checked_range +( + datastore int not null, + range_start swhid not null, + range_end swhid not null, + last_date timestamptz not null +); + +comment on table checked_range is 'Each row represents a range of objects in a datastore that were fetched, checksumed, and checked at some point in the past.'; +comment on column checked_range.range_start is 'First SWHID of the range that was checked (inclusive, possibly non-existent).'; +comment on column checked_range.range_end is 'Last SWHID of the range that was checked (inclusive, possiby non-existent).'; +comment on column checked_range.last_date is 'Date the last scrub of that range *started*.'; + ------------------------------------- -- Inventory of objects with issues ------------------------------------- create table corrupt_object ( id swhid not null, datastore int not null, object bytea not null, first_occurrence timestamptz not null default now() ); comment on table corrupt_object is 'Each row identifies an object that was found to be corrupt'; comment on column corrupt_object.datastore is 'Datastore the corrupt object was found in.'; comment on column corrupt_object.object is 'Corrupt object, as found in the datastore (possibly msgpack-encoded, using the journal''s serializer)'; comment on column corrupt_object.first_occurrence is 'Moment the object was found to be corrupt for the first time'; create table missing_object ( id swhid not null, datastore int not null, first_occurrence timestamptz not null default now() ); comment on table missing_object is 'Each row identifies an object that are missing but referenced by another object (aka "holes")'; comment on column missing_object.datastore is 'Datastore where the hole is.'; comment on column missing_object.first_occurrence is 'Moment the object was found to be corrupt for the first time'; create table missing_object_reference ( missing_id swhid not null, reference_id swhid not null, datastore int not null, first_occurrence timestamptz not null default now() ); comment on table missing_object_reference is 'Each row identifies an object that points to an object that does not exist (aka a "hole")'; comment on column missing_object_reference.missing_id is 'SWHID of the missing object.'; comment on column missing_object_reference.reference_id is 'SWHID of the object referencing the missing object.'; comment on column missing_object_reference.datastore is 'Datastore where the referencing object is.'; comment on column missing_object_reference.first_occurrence is 'Moment the object was found to reference a missing object'; ------------------------------------- -- Issue resolution ------------------------------------- create table object_origin ( object_id swhid not null, origin_url text not null, last_attempt timestamptz -- NULL if not tried yet ); comment on table object_origin is 'Maps objects to origins they might be found in.'; create table fixed_object ( id swhid not null, object bytea not null, method text, recovery_date timestamptz not null default now() ); comment on table fixed_object is 'Each row identifies an object that was found to be corrupt, along with the original version of the object'; comment on column fixed_object.object is 'The recovered object itself, as a msgpack-encoded dict'; comment on column fixed_object.recovery_date is 'Moment the object was recovered.'; comment on column fixed_object.method is 'How the object was recovered. For example: "from_origin", "negative_utc", "capitalized_revision_parent".'; diff --git a/swh/scrubber/sql/60-indexes.sql b/swh/scrubber/sql/60-indexes.sql index 88b5e61..98694cc 100644 --- a/swh/scrubber/sql/60-indexes.sql +++ b/swh/scrubber/sql/60-indexes.sql @@ -1,59 +1,65 @@ ------------------------------------- -- Shared tables ------------------------------------- -- datastore create unique index concurrently datastore_pkey on datastore(id); alter table datastore add primary key using index datastore_pkey; create unique index concurrently datastore_package_class_instance on datastore(package, class, instance); +------------------------------------- +-- Checkpointing/progress tracking +------------------------------------- + +create unique index concurrently checked_range_pkey on checked_range(datastore, range_start, range_end); +alter table checked_range add primary key using index checked_range_pkey; ------------------------------------- -- Inventory of objects with issues ------------------------------------- -- corrupt_object alter table corrupt_object add constraint corrupt_object_datastore_fkey foreign key (datastore) references datastore(id) not valid; alter table corrupt_object validate constraint corrupt_object_datastore_fkey; create unique index concurrently corrupt_object_pkey on corrupt_object(id, datastore); alter table corrupt_object add primary key using index corrupt_object_pkey; -- missing_object alter table missing_object add constraint missing_object_datastore_fkey foreign key (datastore) references datastore(id) not valid; alter table missing_object validate constraint missing_object_datastore_fkey; create unique index concurrently missing_object_pkey on missing_object(id, datastore); alter table missing_object add primary key using index missing_object_pkey; -- missing_object_reference alter table missing_object_reference add constraint missing_object_reference_datastore_fkey foreign key (datastore) references datastore(id) not valid; alter table missing_object_reference validate constraint missing_object_reference_datastore_fkey; create unique index concurrently missing_object_reference_missing_id_reference_id_datastore on missing_object_reference(missing_id, reference_id, datastore); create unique index concurrently missing_object_reference_reference_id_missing_id_datastore on missing_object_reference(reference_id, missing_id, datastore); ------------------------------------- -- Issue resolution ------------------------------------- -- object_origin create unique index concurrently object_origin_pkey on object_origin (object_id, origin_url); create index concurrently object_origin_by_origin on object_origin (origin_url, object_id); -- FIXME: not valid, because corrupt_object(id) is not unique -- alter table object_origin add constraint object_origin_object_fkey foreign key (object_id) references corrupt_object(id) not valid; -- alter table object_origin validate constraint object_origin_object_fkey; -- fixed_object create unique index concurrently fixed_object_pkey on fixed_object(id); alter table fixed_object add primary key using index fixed_object_pkey; diff --git a/swh/scrubber/sql/upgrades/4.sql b/swh/scrubber/sql/upgrades/4.sql new file mode 100644 index 0000000..9dc7e2f --- /dev/null +++ b/swh/scrubber/sql/upgrades/4.sql @@ -0,0 +1,21 @@ +-- SWH Scrubber DB schema upgrade +-- from_version: 3 +-- to_version: 4 +-- description: Add checked_range + + +create table checked_range +( + datastore int not null, + range_start swhid not null, + range_end swhid not null, + last_date timestamptz not null +); + +comment on table checked_range is 'Each row represents a range of objects in a datastore that were fetched, checksumed, and checked at some point in the past.'; +comment on column checked_range.range_start is 'First SWHID of the range that was checked (inclusive, possibly non-existent).'; +comment on column checked_range.range_end is 'Last SWHID of the range that was checked (inclusive, possiby non-existent).'; +comment on column checked_range.last_date is 'Date the last scrub of that range *started*.'; + +create unique index concurrently checked_range_pkey on checked_range(datastore, range_start, range_end); +alter table checked_range add primary key using index checked_range_pkey; diff --git a/swh/scrubber/tests/test_db.py b/swh/scrubber/tests/test_db.py new file mode 100644 index 0000000..2406575 --- /dev/null +++ b/swh/scrubber/tests/test_db.py @@ -0,0 +1,58 @@ +# Copyright (C) 2022 The Software Heritage developers +# See the AUTHORS file at the top-level directory of this distribution +# License: GNU General Public License version 3, or any later version +# See top-level LICENSE file for more information + +import datetime + +from swh.model import swhids +from swh.scrubber.db import Datastore, ScrubberDb + +DATASTORE = Datastore(package="storage", cls="postgresql", instance="service=swh-test") +SNP_SWHID1 = swhids.CoreSWHID.from_string( + "swh:1:snp:5000000000000000000000000000000000000000" +) +SNP_SWHID2 = swhids.CoreSWHID.from_string( + "swh:1:snp:e000000000000000000000000000000000000000" +) +DATE = datetime.datetime(2022, 10, 4, 12, 1, 23, tzinfo=datetime.timezone.utc) + + +def test_checked_range_insert(scrubber_db: ScrubberDb): + scrubber_db.checked_range_upsert(DATASTORE, SNP_SWHID1, SNP_SWHID2, DATE) + + assert list(scrubber_db.checked_range_iter(DATASTORE)) == [ + (SNP_SWHID1, SNP_SWHID2, DATE) + ] + + +def test_checked_range_update(scrubber_db: ScrubberDb): + scrubber_db.checked_range_upsert(DATASTORE, SNP_SWHID1, SNP_SWHID2, DATE) + + date2 = DATE + datetime.timedelta(days=1) + scrubber_db.checked_range_upsert(DATASTORE, SNP_SWHID1, SNP_SWHID2, date2) + + assert list(scrubber_db.checked_range_iter(DATASTORE)) == [ + (SNP_SWHID1, SNP_SWHID2, date2) + ] + + date3 = DATE + datetime.timedelta(days=-1) + scrubber_db.checked_range_upsert(DATASTORE, SNP_SWHID1, SNP_SWHID2, date3) + + assert list(scrubber_db.checked_range_iter(DATASTORE)) == [ + (SNP_SWHID1, SNP_SWHID2, date2) # newest date wins + ] + + +def test_checked_range_get(scrubber_db: ScrubberDb): + assert ( + scrubber_db.checked_range_get_last_date(DATASTORE, SNP_SWHID1, SNP_SWHID2) + is None + ) + + scrubber_db.checked_range_upsert(DATASTORE, SNP_SWHID1, SNP_SWHID2, DATE) + + assert ( + scrubber_db.checked_range_get_last_date(DATASTORE, SNP_SWHID1, SNP_SWHID2) + == DATE + )