diff --git a/sql/upgrades/176.sql b/sql/upgrades/176.sql
new file mode 100644
index 00000000..56654f8a
--- /dev/null
+++ b/sql/upgrades/176.sql
@@ -0,0 +1,27 @@
+-- SWH DB schema upgrade
+-- from_version: 175
+-- to_version: 176
+-- description: add storage of the extid.extid_version field
+
+insert into dbversion(version, release, description)
+ values(176, now(), 'Work In Progress');
+
+alter table extid add column extid_version bigint not null default 0;
+
+comment on column extid.extid_version is 'Version of the extid for the given original object';
+
+create or replace function swh_extid_add()
+ returns void
+ language plpgsql
+as $$
+begin
+ insert into extid (extid_type, extid, extid_version, target_type, target)
+ select distinct t.extid_type, t.extid, t.extid_version, t.target_type, t.target
+ from tmp_extid t
+ on conflict do nothing;
+ return;
+end
+$$;
+
+create unique index concurrently on extid(extid_type, extid, extid_version, target_type, target);
+drop index extid_extid_type_extid_target_type_target_idx;
diff --git a/swh/storage/backfill.py b/swh/storage/backfill.py
index 79b14321..4315264e 100644
--- a/swh/storage/backfill.py
+++ b/swh/storage/backfill.py
@@ -1,649 +1,649 @@
# Copyright (C) 2017-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
"""Storage backfiller.
The backfiller goal is to produce back part or all of the objects
from a storage to the journal topics
Current implementation consists in the JournalBackfiller class.
It simply reads the objects from the storage and sends every object identifier back to
the journal.
"""
import logging
from typing import Any, Callable, Dict, Optional
from swh.core.db import BaseDb
from swh.model.identifiers import ExtendedObjectType
from swh.model.model import (
BaseModel,
Directory,
DirectoryEntry,
ExtID,
RawExtrinsicMetadata,
Release,
Revision,
Snapshot,
SnapshotBranch,
TargetType,
)
from swh.storage.postgresql.converters import (
db_to_extid,
db_to_raw_extrinsic_metadata,
db_to_release,
db_to_revision,
)
from swh.storage.replay import object_converter_fn
from swh.storage.writer import JournalWriter
logger = logging.getLogger(__name__)
PARTITION_KEY = {
"content": "sha1",
"skipped_content": "sha1",
"directory": "id",
"extid": "target",
"metadata_authority": "type, url",
"metadata_fetcher": "name, version",
"raw_extrinsic_metadata": "target",
"revision": "revision.id",
"release": "release.id",
"snapshot": "id",
"origin": "id",
"origin_visit": "origin_visit.origin",
"origin_visit_status": "origin_visit_status.origin",
}
COLUMNS = {
"content": [
"sha1",
"sha1_git",
"sha256",
"blake2s256",
"length",
"status",
"ctime",
],
"skipped_content": [
"sha1",
"sha1_git",
"sha256",
"blake2s256",
"length",
"ctime",
"status",
"reason",
],
"directory": ["id", "dir_entries", "file_entries", "rev_entries"],
- "extid": ["extid_type", "extid", "target_type", "target"],
+ "extid": ["extid_type", "extid", "extid_version", "target_type", "target"],
"metadata_authority": ["type", "url"],
"metadata_fetcher": ["name", "version"],
"origin": ["url"],
"origin_visit": ["visit", "type", ("origin.url", "origin"), "date",],
"origin_visit_status": [
("origin_visit_status.visit", "visit"),
("origin.url", "origin"),
("origin_visit_status.date", "date"),
"type",
"snapshot",
"status",
"metadata",
],
"raw_extrinsic_metadata": [
"raw_extrinsic_metadata.type",
"raw_extrinsic_metadata.target",
"metadata_authority.type",
"metadata_authority.url",
"metadata_fetcher.name",
"metadata_fetcher.version",
"discovery_date",
"format",
"raw_extrinsic_metadata.metadata",
"origin",
"visit",
"snapshot",
"release",
"revision",
"path",
"directory",
],
"revision": [
("revision.id", "id"),
"date",
"date_offset",
"date_neg_utc_offset",
"committer_date",
"committer_date_offset",
"committer_date_neg_utc_offset",
"type",
"directory",
"message",
"synthetic",
"metadata",
"extra_headers",
(
"array(select parent_id::bytea from revision_history rh "
"where rh.id = revision.id order by rh.parent_rank asc)",
"parents",
),
("a.id", "author_id"),
("a.name", "author_name"),
("a.email", "author_email"),
("a.fullname", "author_fullname"),
("c.id", "committer_id"),
("c.name", "committer_name"),
("c.email", "committer_email"),
("c.fullname", "committer_fullname"),
],
"release": [
("release.id", "id"),
"date",
"date_offset",
"date_neg_utc_offset",
"comment",
("release.name", "name"),
"synthetic",
"target",
"target_type",
("a.id", "author_id"),
("a.name", "author_name"),
("a.email", "author_email"),
("a.fullname", "author_fullname"),
],
"snapshot": ["id", "object_id"],
}
JOINS = {
"release": ["person a on release.author=a.id"],
"revision": [
"person a on revision.author=a.id",
"person c on revision.committer=c.id",
],
"origin_visit": ["origin on origin_visit.origin=origin.id"],
"origin_visit_status": ["origin on origin_visit_status.origin=origin.id",],
"raw_extrinsic_metadata": [
"metadata_authority on "
"raw_extrinsic_metadata.authority_id=metadata_authority.id",
"metadata_fetcher on raw_extrinsic_metadata.fetcher_id=metadata_fetcher.id",
],
}
def directory_converter(db: BaseDb, directory_d: Dict[str, Any]) -> Directory:
"""Convert directory from the flat representation to swh model
compatible objects.
"""
columns = ["target", "name", "perms"]
query_template = """
select %(columns)s
from directory_entry_%(type)s
where id in %%s
"""
types = ["file", "dir", "rev"]
entries = []
with db.cursor() as cur:
for type in types:
ids = directory_d.pop("%s_entries" % type)
if not ids:
continue
query = query_template % {
"columns": ",".join(columns),
"type": type,
}
cur.execute(query, (tuple(ids),))
for row in cur:
entry_d = dict(zip(columns, row))
entry = DirectoryEntry(
name=entry_d["name"],
type=type,
target=entry_d["target"],
perms=entry_d["perms"],
)
entries.append(entry)
return Directory(id=directory_d["id"], entries=tuple(entries),)
def raw_extrinsic_metadata_converter(
db: BaseDb, metadata: Dict[str, Any]
) -> RawExtrinsicMetadata:
"""Convert a raw extrinsic metadata from the flat representation to swh model
compatible objects.
"""
return db_to_raw_extrinsic_metadata(metadata)
def extid_converter(db: BaseDb, extid: Dict[str, Any]) -> ExtID:
"""Convert an extid from the flat representation to swh model
compatible objects.
"""
return db_to_extid(extid)
def revision_converter(db: BaseDb, revision_d: Dict[str, Any]) -> Revision:
"""Convert revision from the flat representation to swh model
compatible objects.
"""
revision = db_to_revision(revision_d)
assert revision is not None, revision_d["id"]
return revision
def release_converter(db: BaseDb, release_d: Dict[str, Any]) -> Release:
"""Convert release from the flat representation to swh model
compatible objects.
"""
release = db_to_release(release_d)
assert release is not None, release_d["id"]
return release
def snapshot_converter(db: BaseDb, snapshot_d: Dict[str, Any]) -> Snapshot:
"""Convert snapshot from the flat representation to swh model
compatible objects.
"""
columns = ["name", "target", "target_type"]
query = """
select %s
from snapshot_branches sbs
inner join snapshot_branch sb on sb.object_id=sbs.branch_id
where sbs.snapshot_id=%%s
""" % ", ".join(
columns
)
with db.cursor() as cur:
cur.execute(query, (snapshot_d["object_id"],))
branches = {}
for name, *row in cur:
branch_d = dict(zip(columns[1:], row))
if branch_d["target"] is not None and branch_d["target_type"] is not None:
branch: Optional[SnapshotBranch] = SnapshotBranch(
target=branch_d["target"],
target_type=TargetType(branch_d["target_type"]),
)
else:
branch = None
branches[name] = branch
return Snapshot(id=snapshot_d["id"], branches=branches,)
CONVERTERS: Dict[str, Callable[[BaseDb, Dict[str, Any]], BaseModel]] = {
"directory": directory_converter,
"extid": extid_converter,
"raw_extrinsic_metadata": raw_extrinsic_metadata_converter,
"revision": revision_converter,
"release": release_converter,
"snapshot": snapshot_converter,
}
def object_to_offset(object_id, numbits):
"""Compute the index of the range containing object id, when dividing
space into 2^numbits.
Args:
object_id (str): The hex representation of object_id
numbits (int): Number of bits in which we divide input space
Returns:
The index of the range containing object id
"""
q, r = divmod(numbits, 8)
length = q + (r != 0)
shift_bits = 8 - r if r else 0
truncated_id = object_id[: length * 2]
if len(truncated_id) < length * 2:
truncated_id += "0" * (length * 2 - len(truncated_id))
truncated_id_bytes = bytes.fromhex(truncated_id)
return int.from_bytes(truncated_id_bytes, byteorder="big") >> shift_bits
def byte_ranges(numbits, start_object=None, end_object=None):
"""Generate start/end pairs of bytes spanning numbits bits and
constrained by optional start_object and end_object.
Args:
numbits (int): Number of bits in which we divide input space
start_object (str): Hex object id contained in the first range
returned
end_object (str): Hex object id contained in the last range
returned
Yields:
2^numbits pairs of bytes
"""
q, r = divmod(numbits, 8)
length = q + (r != 0)
shift_bits = 8 - r if r else 0
def to_bytes(i):
return int.to_bytes(i << shift_bits, length=length, byteorder="big")
start_offset = 0
end_offset = 1 << numbits
if start_object is not None:
start_offset = object_to_offset(start_object, numbits)
if end_object is not None:
end_offset = object_to_offset(end_object, numbits) + 1
for start in range(start_offset, end_offset):
end = start + 1
if start == 0:
yield None, to_bytes(end)
elif end == 1 << numbits:
yield to_bytes(start), None
else:
yield to_bytes(start), to_bytes(end)
def raw_extrinsic_metadata_target_ranges(start_object=None, end_object=None):
"""Generate ranges of values for the `target` attribute of `raw_extrinsic_metadata`
objects.
This generates one range for all values before the first SWHID (which would
correspond to raw origin URLs), then a number of hex-based ranges for each
known type of SWHID (2**12 ranges for directories, 2**8 ranges for all other
types). Finally, it generates one extra range for values above all possible
SWHIDs.
"""
if start_object is None:
start_object = ""
swhid_target_types = sorted(type.value for type in ExtendedObjectType)
first_swhid = f"swh:1:{swhid_target_types[0]}:"
# Generate a range for url targets, if the starting object is before SWHIDs
if start_object < first_swhid:
yield start_object, (
first_swhid
if end_object is None or end_object >= first_swhid
else end_object
)
if end_object is not None and end_object <= first_swhid:
return
# Prime the following loop, which uses the upper bound of the previous range
# as lower bound, to account for potential targets between two valid types
# of SWHIDs (even though they would eventually be rejected by the
# RawExtrinsicMetadata parser, they /might/ exist...)
end_swhid = first_swhid
# Generate ranges for swhid targets
for target_type in swhid_target_types:
finished = False
base_swhid = f"swh:1:{target_type}:"
last_swhid = base_swhid + ("f" * 40)
if start_object > last_swhid:
continue
# Generate 2**8 or 2**12 ranges
for _, end in byte_ranges(12 if target_type == "dir" else 8):
# Reuse previous uppper bound
start_swhid = end_swhid
# Use last_swhid for this object type if on the last byte range
end_swhid = (base_swhid + end.hex()) if end is not None else last_swhid
# Ignore out of bounds ranges
if start_object >= end_swhid:
continue
# Potentially clamp start of range to the first object requested
start_swhid = max(start_swhid, start_object)
# Handle ending the loop early if the last requested object id is in
# the current range
if end_object is not None and end_swhid >= end_object:
end_swhid = end_object
finished = True
yield start_swhid, end_swhid
if finished:
return
# Generate one final range for potential raw origin URLs after the last
# valid SWHID
start_swhid = max(start_object, end_swhid)
yield start_swhid, end_object
def integer_ranges(start, end, block_size=1000):
for start in range(start, end, block_size):
if start == 0:
yield None, block_size
elif start + block_size > end:
yield start, end
else:
yield start, start + block_size
RANGE_GENERATORS = {
"content": lambda start, end: byte_ranges(24, start, end),
"skipped_content": lambda start, end: [(None, None)],
"directory": lambda start, end: byte_ranges(24, start, end),
"extid": lambda start, end: byte_ranges(24, start, end),
"revision": lambda start, end: byte_ranges(24, start, end),
"release": lambda start, end: byte_ranges(16, start, end),
"raw_extrinsic_metadata": raw_extrinsic_metadata_target_ranges,
"snapshot": lambda start, end: byte_ranges(16, start, end),
"origin": integer_ranges,
"origin_visit": integer_ranges,
"origin_visit_status": integer_ranges,
}
def compute_query(obj_type, start, end):
columns = COLUMNS.get(obj_type)
join_specs = JOINS.get(obj_type, [])
join_clause = "\n".join("left join %s" % clause for clause in join_specs)
where = []
where_args = []
if start:
where.append("%(keys)s >= %%s")
where_args.append(start)
if end:
where.append("%(keys)s < %%s")
where_args.append(end)
where_clause = ""
if where:
where_clause = ("where " + " and ".join(where)) % {
"keys": "(%s)" % PARTITION_KEY[obj_type]
}
column_specs = []
column_aliases = []
for column in columns:
if isinstance(column, str):
column_specs.append(column)
column_aliases.append(column)
else:
column_specs.append("%s as %s" % column)
column_aliases.append(column[1])
query = """
select %(columns)s
from %(table)s
%(join)s
%(where)s
""" % {
"columns": ",".join(column_specs),
"table": obj_type,
"join": join_clause,
"where": where_clause,
}
return query, where_args, column_aliases
def fetch(db, obj_type, start, end):
"""Fetch all obj_type's identifiers from db.
This opens one connection, stream objects and when done, close
the connection.
Args:
db (BaseDb): Db connection object
obj_type (str): Object type
start (Union[bytes|Tuple]): Range start identifier
end (Union[bytes|Tuple]): Range end identifier
Raises:
ValueError if obj_type is not supported
Yields:
Objects in the given range
"""
query, where_args, column_aliases = compute_query(obj_type, start, end)
converter = CONVERTERS.get(obj_type)
with db.cursor() as cursor:
logger.debug("Fetching data for table %s", obj_type)
logger.debug("query: %s %s", query, where_args)
cursor.execute(query, where_args)
for row in cursor:
record = dict(zip(column_aliases, row))
if converter:
record = converter(db, record)
else:
record = object_converter_fn[obj_type](record)
logger.debug("record: %s", record)
yield record
def _format_range_bound(bound):
if isinstance(bound, bytes):
return bound.hex()
else:
return str(bound)
MANDATORY_KEYS = ["storage", "journal_writer"]
class JournalBackfiller:
"""Class in charge of reading the storage's objects and sends those
back to the journal's topics.
This is designed to be run periodically.
"""
def __init__(self, config=None):
self.config = config
self.check_config(config)
def check_config(self, config):
missing_keys = []
for key in MANDATORY_KEYS:
if not config.get(key):
missing_keys.append(key)
if missing_keys:
raise ValueError(
"Configuration error: The following keys must be"
" provided: %s" % (",".join(missing_keys),)
)
if "cls" not in config["storage"] or config["storage"]["cls"] not in (
"local",
"postgresql",
):
raise ValueError(
"swh storage backfiller must be configured to use a local"
" (PostgreSQL) storage"
)
def parse_arguments(self, object_type, start_object, end_object):
"""Parse arguments
Raises:
ValueError for unsupported object type
ValueError if object ids are not parseable
Returns:
Parsed start and end object ids
"""
if object_type not in COLUMNS:
raise ValueError(
"Object type %s is not supported. "
"The only possible values are %s"
% (object_type, ", ".join(sorted(COLUMNS.keys())))
)
if object_type in ["origin", "origin_visit", "origin_visit_status"]:
if start_object:
start_object = int(start_object)
else:
start_object = 0
if end_object:
end_object = int(end_object)
else:
end_object = 100 * 1000 * 1000 # hard-coded limit
return start_object, end_object
def run(self, object_type, start_object, end_object, dry_run=False):
"""Reads storage's subscribed object types and send them to the
journal's reading topic.
"""
start_object, end_object = self.parse_arguments(
object_type, start_object, end_object
)
db = BaseDb.connect(self.config["storage"]["db"])
writer = JournalWriter({"cls": "kafka", **self.config["journal_writer"]})
assert writer.journal is not None
for range_start, range_end in RANGE_GENERATORS[object_type](
start_object, end_object
):
logger.info(
"Processing %s range %s to %s",
object_type,
_format_range_bound(range_start),
_format_range_bound(range_end),
)
objects = fetch(db, object_type, start=range_start, end=range_end)
if not dry_run:
writer.write_additions(object_type, objects)
else:
# only consume the objects iterator to check for any potential
# decoding/encoding errors
for obj in objects:
pass
if __name__ == "__main__":
print('Please use the "swh-journal backfiller run" command')
diff --git a/swh/storage/cassandra/cql.py b/swh/storage/cassandra/cql.py
index 40ea3aa5..10c5ba86 100644
--- a/swh/storage/cassandra/cql.py
+++ b/swh/storage/cassandra/cql.py
@@ -1,1264 +1,1278 @@
# Copyright (C) 2019-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
from collections import Counter
import dataclasses
import datetime
import functools
import logging
import random
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
Optional,
Tuple,
Type,
TypeVar,
Union,
)
from cassandra import ConsistencyLevel, CoordinationFailure
from cassandra.cluster import EXEC_PROFILE_DEFAULT, Cluster, ExecutionProfile, ResultSet
from cassandra.policies import DCAwareRoundRobinPolicy, TokenAwarePolicy
from cassandra.query import BoundStatement, PreparedStatement, dict_factory
from mypy_extensions import NamedArg
from tenacity import (
retry,
retry_if_exception_type,
stop_after_attempt,
wait_random_exponential,
)
from swh.model.identifiers import CoreSWHID
from swh.model.model import (
Content,
Person,
Sha1Git,
SkippedContent,
Timestamp,
TimestampWithTimezone,
)
from swh.storage.interface import ListOrder
from ..utils import remove_keys
from .common import TOKEN_BEGIN, TOKEN_END, hash_url
from .model import (
MAGIC_NULL_PK,
BaseRow,
ContentRow,
DirectoryEntryRow,
DirectoryRow,
ExtIDByTargetRow,
ExtIDRow,
MetadataAuthorityRow,
MetadataFetcherRow,
ObjectCountRow,
OriginRow,
OriginVisitRow,
OriginVisitStatusRow,
RawExtrinsicMetadataByIdRow,
RawExtrinsicMetadataRow,
ReleaseRow,
RevisionParentRow,
RevisionRow,
SkippedContentRow,
SnapshotBranchRow,
SnapshotRow,
content_index_table_name,
)
from .schema import CREATE_TABLES_QUERIES, HASH_ALGORITHMS
logger = logging.getLogger(__name__)
def get_execution_profiles(
consistency_level: str = "ONE",
) -> Dict[object, ExecutionProfile]:
if consistency_level not in ConsistencyLevel.name_to_value:
raise ValueError(
f"Configuration error: Unknown consistency level '{consistency_level}'"
)
return {
EXEC_PROFILE_DEFAULT: ExecutionProfile(
load_balancing_policy=TokenAwarePolicy(DCAwareRoundRobinPolicy()),
row_factory=dict_factory,
consistency_level=ConsistencyLevel.name_to_value[consistency_level],
)
}
# Configuration for cassandra-driver's access to servers:
# * hit the right server directly when sending a query (TokenAwarePolicy),
# * if there's more than one, then pick one at random that's in the same
# datacenter as the client (DCAwareRoundRobinPolicy)
def create_keyspace(
hosts: List[str], keyspace: str, port: int = 9042, *, durable_writes=True
):
cluster = Cluster(hosts, port=port, execution_profiles=get_execution_profiles())
session = cluster.connect()
extra_params = ""
if not durable_writes:
extra_params = "AND durable_writes = false"
session.execute(
"""CREATE KEYSPACE IF NOT EXISTS "%s"
WITH REPLICATION = {
'class' : 'SimpleStrategy',
'replication_factor' : 1
} %s;
"""
% (keyspace, extra_params)
)
session.execute('USE "%s"' % keyspace)
for query in CREATE_TABLES_QUERIES:
session.execute(query)
TRet = TypeVar("TRet")
def _prepared_statement(
query: str,
) -> Callable[[Callable[..., TRet]], Callable[..., TRet]]:
"""Returns a decorator usable on methods of CqlRunner, to
inject them with a 'statement' argument, that is a prepared
statement corresponding to the query.
This only works on methods of CqlRunner, as preparing a
statement requires a connection to a Cassandra server."""
def decorator(f):
@functools.wraps(f)
def newf(self, *args, **kwargs) -> TRet:
if f.__name__ not in self._prepared_statements:
statement: PreparedStatement = self._session.prepare(query)
self._prepared_statements[f.__name__] = statement
return f(
self, *args, **kwargs, statement=self._prepared_statements[f.__name__]
)
return newf
return decorator
TArg = TypeVar("TArg")
TSelf = TypeVar("TSelf")
def _prepared_insert_statement(
row_class: Type[BaseRow],
) -> Callable[
[Callable[[TSelf, TArg, NamedArg(Any, "statement")], TRet]], # noqa
Callable[[TSelf, TArg], TRet],
]:
"""Shorthand for using `_prepared_statement` for `INSERT INTO`
statements."""
columns = row_class.cols()
return _prepared_statement(
"INSERT INTO %s (%s) VALUES (%s)"
% (row_class.TABLE, ", ".join(columns), ", ".join("?" for _ in columns),)
)
def _prepared_exists_statement(
table_name: str,
) -> Callable[
[Callable[[TSelf, TArg, NamedArg(Any, "statement")], TRet]], # noqa
Callable[[TSelf, TArg], TRet],
]:
"""Shorthand for using `_prepared_statement` for queries that only
check which ids in a list exist in the table."""
return _prepared_statement(f"SELECT id FROM {table_name} WHERE id IN ?")
def _prepared_select_statement(
row_class: Type[BaseRow], clauses: str = "", cols: Optional[List[str]] = None,
) -> Callable[[Callable[..., TRet]], Callable[..., TRet]]:
if cols is None:
cols = row_class.cols()
return _prepared_statement(
f"SELECT {', '.join(cols)} FROM {row_class.TABLE} {clauses}"
)
def _prepared_select_statements(
row_class: Type[BaseRow], queries: Dict[Any, str],
) -> Callable[[Callable[..., TRet]], Callable[..., TRet]]:
"""Like _prepared_statement, but supports multiple statements, passed a dict,
and passes a dict of prepared statements to the decorated method"""
cols = row_class.cols()
statement_start = f"SELECT {', '.join(cols)} FROM {row_class.TABLE} "
def decorator(f):
@functools.wraps(f)
def newf(self, *args, **kwargs) -> TRet:
if f.__name__ not in self._prepared_statements:
self._prepared_statements[f.__name__] = {
key: self._session.prepare(statement_start + query)
for (key, query) in queries.items()
}
return f(
self, *args, **kwargs, statements=self._prepared_statements[f.__name__]
)
return newf
return decorator
def _next_bytes_value(value: bytes) -> bytes:
"""Returns the next bytes value by incrementing the integer
representation of the provided value and converting it back
to bytes.
For instance when prefix is b"abcd", it returns b"abce".
"""
next_value_int = int.from_bytes(value, byteorder="big") + 1
return next_value_int.to_bytes(
(next_value_int.bit_length() + 7) // 8, byteorder="big"
)
class CqlRunner:
"""Class managing prepared statements and building queries to be sent
to Cassandra."""
def __init__(
self, hosts: List[str], keyspace: str, port: int, consistency_level: str
):
self._cluster = Cluster(
hosts,
port=port,
execution_profiles=get_execution_profiles(consistency_level),
)
self._session = self._cluster.connect(keyspace)
self._cluster.register_user_type(
keyspace, "microtimestamp_with_timezone", TimestampWithTimezone
)
self._cluster.register_user_type(keyspace, "microtimestamp", Timestamp)
self._cluster.register_user_type(keyspace, "person", Person)
# directly a PreparedStatement for methods decorated with
# @_prepared_statements (and its wrappers, _prepared_insert_statement,
# _prepared_exists_statement, and _prepared_select_statement);
# and a dict of PreparedStatements with @_prepared_select_statements
self._prepared_statements: Dict[
str, Union[PreparedStatement, Dict[Any, PreparedStatement]]
] = {}
##########################
# Common utility functions
##########################
MAX_RETRIES = 3
@retry(
wait=wait_random_exponential(multiplier=1, max=10),
stop=stop_after_attempt(MAX_RETRIES),
retry=retry_if_exception_type(CoordinationFailure),
)
def _execute_with_retries(self, statement, args) -> ResultSet:
return self._session.execute(statement, args, timeout=1000.0)
@_prepared_statement(
"UPDATE object_count SET count = count + ? "
"WHERE partition_key = 0 AND object_type = ?"
)
def _increment_counter(
self, object_type: str, nb: int, *, statement: PreparedStatement
) -> None:
self._execute_with_retries(statement, [nb, object_type])
def _add_one(self, statement, obj: BaseRow) -> None:
self._increment_counter(obj.TABLE, 1)
self._execute_with_retries(statement, dataclasses.astuple(obj))
_T = TypeVar("_T", bound=BaseRow)
def _get_random_row(self, row_class: Type[_T], statement) -> Optional[_T]: # noqa
"""Takes a prepared statement of the form
"SELECT * FROM
WHERE token() > ? LIMIT 1"
and uses it to return a random row"""
token = random.randint(TOKEN_BEGIN, TOKEN_END)
rows = self._execute_with_retries(statement, [token])
if not rows:
# There are no row with a greater token; wrap around to get
# the row with the smallest token
rows = self._execute_with_retries(statement, [TOKEN_BEGIN])
if rows:
return row_class.from_dict(rows.one()) # type: ignore
else:
return None
def _missing(self, statement, ids):
rows = self._execute_with_retries(statement, [ids])
found_ids = {row["id"] for row in rows}
return [id_ for id_ in ids if id_ not in found_ids]
##########################
# 'content' table
##########################
def _content_add_finalize(self, statement: BoundStatement) -> None:
"""Returned currified by content_add_prepare, to be called when the
content row should be added to the primary table."""
self._execute_with_retries(statement, None)
self._increment_counter("content", 1)
@_prepared_insert_statement(ContentRow)
def content_add_prepare(
self, content: ContentRow, *, statement
) -> Tuple[int, Callable[[], None]]:
"""Prepares insertion of a Content to the main 'content' table.
Returns a token (to be used in secondary tables), and a function to be
called to perform the insertion in the main table."""
statement = statement.bind(dataclasses.astuple(content))
# Type used for hashing keys (usually, it will be
# cassandra.metadata.Murmur3Token)
token_class = self._cluster.metadata.token_map.token_class
# Token of the row when it will be inserted. This is equivalent to
# "SELECT token({', '.join(ContentRow.PARTITION_KEY)}) FROM content WHERE ..."
# after the row is inserted; but we need the token to insert in the
# index tables *before* inserting to the main 'content' table
token = token_class.from_key(statement.routing_key).value
assert TOKEN_BEGIN <= token <= TOKEN_END
# Function to be called after the indexes contain their respective
# row
finalizer = functools.partial(self._content_add_finalize, statement)
return (token, finalizer)
@_prepared_select_statement(
ContentRow, f"WHERE {' AND '.join(map('%s = ?'.__mod__, HASH_ALGORITHMS))}"
)
def content_get_from_pk(
self, content_hashes: Dict[str, bytes], *, statement
) -> Optional[ContentRow]:
rows = list(
self._execute_with_retries(
statement, [content_hashes[algo] for algo in HASH_ALGORITHMS]
)
)
assert len(rows) <= 1
if rows:
return ContentRow(**rows[0])
else:
return None
@_prepared_select_statement(
ContentRow, f"WHERE token({', '.join(ContentRow.PARTITION_KEY)}) = ?"
)
def content_get_from_token(self, token, *, statement) -> Iterable[ContentRow]:
return map(ContentRow.from_dict, self._execute_with_retries(statement, [token]))
@_prepared_select_statement(
ContentRow, f"WHERE token({', '.join(ContentRow.PARTITION_KEY)}) > ? LIMIT 1"
)
def content_get_random(self, *, statement) -> Optional[ContentRow]:
return self._get_random_row(ContentRow, statement)
@_prepared_statement(
"""
SELECT token({pk}) AS tok, {cols} FROM {table}
WHERE token({pk}) >= ? AND token({pk}) <= ? LIMIT ?
""".format(
pk=", ".join(ContentRow.PARTITION_KEY),
cols=", ".join(ContentRow.cols()),
table=ContentRow.TABLE,
)
)
def content_get_token_range(
self, start: int, end: int, limit: int, *, statement
) -> Iterable[Tuple[int, ContentRow]]:
"""Returns an iterable of (token, row)"""
return (
(row["tok"], ContentRow.from_dict(remove_keys(row, ("tok",))))
for row in self._execute_with_retries(statement, [start, end, limit])
)
##########################
# 'content_by_*' tables
##########################
@_prepared_statement(
f"""
SELECT sha1_git AS id
FROM {content_index_table_name("sha1_git", skipped_content=False)}
WHERE sha1_git IN ?
"""
)
def content_missing_by_sha1_git(
self, ids: List[bytes], *, statement
) -> List[bytes]:
return self._missing(statement, ids)
def content_index_add_one(self, algo: str, content: Content, token: int) -> None:
"""Adds a row mapping content[algo] to the token of the Content in
the main 'content' table."""
query = f"""
INSERT INTO {content_index_table_name(algo, skipped_content=False)}
({algo}, target_token)
VALUES (%s, %s)
"""
self._execute_with_retries(query, [content.get_hash(algo), token])
def content_get_tokens_from_single_hash(
self, algo: str, hash_: bytes
) -> Iterable[int]:
assert algo in HASH_ALGORITHMS
query = f"""
SELECT target_token
FROM {content_index_table_name(algo, skipped_content=False)}
WHERE {algo} = %s
"""
return (
row["target_token"] for row in self._execute_with_retries(query, [hash_])
)
##########################
# 'skipped_content' table
##########################
def _skipped_content_add_finalize(self, statement: BoundStatement) -> None:
"""Returned currified by skipped_content_add_prepare, to be called
when the content row should be added to the primary table."""
self._execute_with_retries(statement, None)
self._increment_counter("skipped_content", 1)
@_prepared_insert_statement(SkippedContentRow)
def skipped_content_add_prepare(
self, content, *, statement
) -> Tuple[int, Callable[[], None]]:
"""Prepares insertion of a Content to the main 'skipped_content' table.
Returns a token (to be used in secondary tables), and a function to be
called to perform the insertion in the main table."""
# Replace NULLs (which are not allowed in the partition key) with
# an empty byte string
for key in SkippedContentRow.PARTITION_KEY:
if getattr(content, key) is None:
setattr(content, key, MAGIC_NULL_PK)
statement = statement.bind(dataclasses.astuple(content))
# Type used for hashing keys (usually, it will be
# cassandra.metadata.Murmur3Token)
token_class = self._cluster.metadata.token_map.token_class
# Token of the row when it will be inserted. This is equivalent to
# "SELECT token({', '.join(SkippedContentRow.PARTITION_KEY)})
# FROM skipped_content WHERE ..."
# after the row is inserted; but we need the token to insert in the
# index tables *before* inserting to the main 'skipped_content' table
token = token_class.from_key(statement.routing_key).value
assert TOKEN_BEGIN <= token <= TOKEN_END
# Function to be called after the indexes contain their respective
# row
finalizer = functools.partial(self._skipped_content_add_finalize, statement)
return (token, finalizer)
@_prepared_select_statement(
SkippedContentRow,
f"WHERE {' AND '.join(map('%s = ?'.__mod__, HASH_ALGORITHMS))}",
)
def skipped_content_get_from_pk(
self, content_hashes: Dict[str, bytes], *, statement
) -> Optional[SkippedContentRow]:
rows = list(
self._execute_with_retries(
statement,
[content_hashes[algo] or MAGIC_NULL_PK for algo in HASH_ALGORITHMS],
)
)
assert len(rows) <= 1
if rows:
return SkippedContentRow.from_dict(rows[0])
else:
return None
@_prepared_select_statement(
SkippedContentRow,
f"WHERE token({', '.join(SkippedContentRow.PARTITION_KEY)}) = ?",
)
def skipped_content_get_from_token(
self, token, *, statement
) -> Iterable[SkippedContentRow]:
return map(
SkippedContentRow.from_dict, self._execute_with_retries(statement, [token])
)
##########################
# 'skipped_content_by_*' tables
##########################
def skipped_content_index_add_one(
self, algo: str, content: SkippedContent, token: int
) -> None:
"""Adds a row mapping content[algo] to the token of the SkippedContent
in the main 'skipped_content' table."""
query = (
f"INSERT INTO skipped_content_by_{algo} ({algo}, target_token) "
f"VALUES (%s, %s)"
)
self._execute_with_retries(
query, [content.get_hash(algo) or MAGIC_NULL_PK, token]
)
def skipped_content_get_tokens_from_single_hash(
self, algo: str, hash_: bytes
) -> Iterable[int]:
assert algo in HASH_ALGORITHMS
query = f"""
SELECT target_token
FROM {content_index_table_name(algo, skipped_content=True)}
WHERE {algo} = %s
"""
return (
row["target_token"] for row in self._execute_with_retries(query, [hash_])
)
##########################
# 'revision' table
##########################
@_prepared_exists_statement("revision")
def revision_missing(self, ids: List[bytes], *, statement) -> List[bytes]:
return self._missing(statement, ids)
@_prepared_insert_statement(RevisionRow)
def revision_add_one(self, revision: RevisionRow, *, statement) -> None:
self._add_one(statement, revision)
@_prepared_statement(f"SELECT id FROM {RevisionRow.TABLE} WHERE id IN ?")
def revision_get_ids(self, revision_ids, *, statement) -> Iterable[int]:
return (
row["id"] for row in self._execute_with_retries(statement, [revision_ids])
)
@_prepared_select_statement(RevisionRow, "WHERE id IN ?")
def revision_get(
self, revision_ids: List[Sha1Git], *, statement
) -> Iterable[RevisionRow]:
return map(
RevisionRow.from_dict, self._execute_with_retries(statement, [revision_ids])
)
@_prepared_select_statement(RevisionRow, "WHERE token(id) > ? LIMIT 1")
def revision_get_random(self, *, statement) -> Optional[RevisionRow]:
return self._get_random_row(RevisionRow, statement)
##########################
# 'revision_parent' table
##########################
@_prepared_insert_statement(RevisionParentRow)
def revision_parent_add_one(
self, revision_parent: RevisionParentRow, *, statement
) -> None:
self._add_one(statement, revision_parent)
@_prepared_statement(
f"SELECT parent_id FROM {RevisionParentRow.TABLE} WHERE id = ?"
)
def revision_parent_get(
self, revision_id: Sha1Git, *, statement
) -> Iterable[bytes]:
return (
row["parent_id"]
for row in self._execute_with_retries(statement, [revision_id])
)
##########################
# 'release' table
##########################
@_prepared_exists_statement("release")
def release_missing(self, ids: List[bytes], *, statement) -> List[bytes]:
return self._missing(statement, ids)
@_prepared_insert_statement(ReleaseRow)
def release_add_one(self, release: ReleaseRow, *, statement) -> None:
self._add_one(statement, release)
@_prepared_select_statement(ReleaseRow, "WHERE id in ?")
def release_get(self, release_ids: List[str], *, statement) -> Iterable[ReleaseRow]:
return map(
ReleaseRow.from_dict, self._execute_with_retries(statement, [release_ids])
)
@_prepared_select_statement(ReleaseRow, "WHERE token(id) > ? LIMIT 1")
def release_get_random(self, *, statement) -> Optional[ReleaseRow]:
return self._get_random_row(ReleaseRow, statement)
##########################
# 'directory' table
##########################
@_prepared_exists_statement("directory")
def directory_missing(self, ids: List[bytes], *, statement) -> List[bytes]:
return self._missing(statement, ids)
@_prepared_insert_statement(DirectoryRow)
def directory_add_one(self, directory: DirectoryRow, *, statement) -> None:
"""Called after all calls to directory_entry_add_one, to
commit/finalize the directory."""
self._add_one(statement, directory)
@_prepared_select_statement(DirectoryRow, "WHERE token(id) > ? LIMIT 1")
def directory_get_random(self, *, statement) -> Optional[DirectoryRow]:
return self._get_random_row(DirectoryRow, statement)
##########################
# 'directory_entry' table
##########################
@_prepared_insert_statement(DirectoryEntryRow)
def directory_entry_add_one(self, entry: DirectoryEntryRow, *, statement) -> None:
self._add_one(statement, entry)
@_prepared_select_statement(DirectoryEntryRow, "WHERE directory_id IN ?")
def directory_entry_get(
self, directory_ids, *, statement
) -> Iterable[DirectoryEntryRow]:
return map(
DirectoryEntryRow.from_dict,
self._execute_with_retries(statement, [directory_ids]),
)
@_prepared_select_statement(
DirectoryEntryRow, "WHERE directory_id = ? AND name >= ? LIMIT ?"
)
def directory_entry_get_from_name(
self, directory_id: Sha1Git, from_: bytes, limit: int, *, statement
) -> Iterable[DirectoryEntryRow]:
return map(
DirectoryEntryRow.from_dict,
self._execute_with_retries(statement, [directory_id, from_, limit]),
)
##########################
# 'snapshot' table
##########################
@_prepared_exists_statement("snapshot")
def snapshot_missing(self, ids: List[bytes], *, statement) -> List[bytes]:
return self._missing(statement, ids)
@_prepared_insert_statement(SnapshotRow)
def snapshot_add_one(self, snapshot: SnapshotRow, *, statement) -> None:
self._add_one(statement, snapshot)
@_prepared_select_statement(SnapshotRow, "WHERE token(id) > ? LIMIT 1")
def snapshot_get_random(self, *, statement) -> Optional[SnapshotRow]:
return self._get_random_row(SnapshotRow, statement)
##########################
# 'snapshot_branch' table
##########################
@_prepared_insert_statement(SnapshotBranchRow)
def snapshot_branch_add_one(self, branch: SnapshotBranchRow, *, statement) -> None:
self._add_one(statement, branch)
@_prepared_statement(
f"""
SELECT ascii_bins_count(target_type) AS counts
FROM {SnapshotBranchRow.TABLE}
WHERE snapshot_id = ? AND name >= ?
"""
)
def snapshot_count_branches_from_name(
self, snapshot_id: Sha1Git, from_: bytes, *, statement
) -> Dict[Optional[str], int]:
row = self._execute_with_retries(statement, [snapshot_id, from_]).one()
(nb_none, counts) = row["counts"]
return {None: nb_none, **counts}
@_prepared_statement(
f"""
SELECT ascii_bins_count(target_type) AS counts
FROM {SnapshotBranchRow.TABLE}
WHERE snapshot_id = ? AND name < ?
"""
)
def snapshot_count_branches_before_name(
self, snapshot_id: Sha1Git, before: bytes, *, statement,
) -> Dict[Optional[str], int]:
row = self._execute_with_retries(statement, [snapshot_id, before]).one()
(nb_none, counts) = row["counts"]
return {None: nb_none, **counts}
def snapshot_count_branches(
self, snapshot_id: Sha1Git, branch_name_exclude_prefix: Optional[bytes] = None,
) -> Dict[Optional[str], int]:
"""Returns a dictionary from type names to the number of branches
of that type."""
prefix = branch_name_exclude_prefix
if prefix is None:
return self.snapshot_count_branches_from_name(snapshot_id, b"")
else:
# counts branches before exclude prefix
counts = Counter(
self.snapshot_count_branches_before_name(snapshot_id, prefix)
)
# no need to execute that part if each bit of the prefix equals 1
if prefix.replace(b"\xff", b"") != b"":
# counts branches after exclude prefix and update counters
counts.update(
self.snapshot_count_branches_from_name(
snapshot_id, _next_bytes_value(prefix)
)
)
return counts
@_prepared_select_statement(
SnapshotBranchRow, "WHERE snapshot_id = ? AND name >= ? LIMIT ?"
)
def snapshot_branch_get_from_name(
self, snapshot_id: Sha1Git, from_: bytes, limit: int, *, statement
) -> Iterable[SnapshotBranchRow]:
return map(
SnapshotBranchRow.from_dict,
self._execute_with_retries(statement, [snapshot_id, from_, limit]),
)
@_prepared_select_statement(
SnapshotBranchRow, "WHERE snapshot_id = ? AND name >= ? AND name < ? LIMIT ?"
)
def snapshot_branch_get_range(
self,
snapshot_id: Sha1Git,
from_: bytes,
before: bytes,
limit: int,
*,
statement,
) -> Iterable[SnapshotBranchRow]:
return map(
SnapshotBranchRow.from_dict,
self._execute_with_retries(statement, [snapshot_id, from_, before, limit]),
)
def snapshot_branch_get(
self,
snapshot_id: Sha1Git,
from_: bytes,
limit: int,
branch_name_exclude_prefix: Optional[bytes] = None,
) -> Iterable[SnapshotBranchRow]:
prefix = branch_name_exclude_prefix
if prefix is None:
return self.snapshot_branch_get_from_name(snapshot_id, from_, limit)
else:
# get branches before the exclude prefix
branches = list(
self.snapshot_branch_get_range(snapshot_id, from_, prefix, limit)
)
nb_branches = len(branches)
# no need to execute that part if limit is reached
# or if each bit of the prefix equals 1
if nb_branches < limit and prefix.replace(b"\xff", b"") != b"":
# get branches after the exclude prefix and update list to return
branches.extend(
self.snapshot_branch_get_from_name(
snapshot_id, _next_bytes_value(prefix), limit - nb_branches
)
)
return branches
##########################
# 'origin' table
##########################
@_prepared_insert_statement(OriginRow)
def origin_add_one(self, origin: OriginRow, *, statement) -> None:
self._add_one(statement, origin)
@_prepared_select_statement(OriginRow, "WHERE sha1 = ?")
def origin_get_by_sha1(self, sha1: bytes, *, statement) -> Iterable[OriginRow]:
return map(OriginRow.from_dict, self._execute_with_retries(statement, [sha1]))
def origin_get_by_url(self, url: str) -> Iterable[OriginRow]:
return self.origin_get_by_sha1(hash_url(url))
@_prepared_statement(
f"""
SELECT token(sha1) AS tok, {", ".join(OriginRow.cols())}
FROM {OriginRow.TABLE}
WHERE token(sha1) >= ? LIMIT ?
"""
)
def origin_list(
self, start_token: int, limit: int, *, statement
) -> Iterable[Tuple[int, OriginRow]]:
"""Returns an iterable of (token, origin)"""
return (
(row["tok"], OriginRow.from_dict(remove_keys(row, ("tok",))))
for row in self._execute_with_retries(statement, [start_token, limit])
)
@_prepared_select_statement(OriginRow)
def origin_iter_all(self, *, statement) -> Iterable[OriginRow]:
return map(OriginRow.from_dict, self._execute_with_retries(statement, []))
@_prepared_statement(f"SELECT next_visit_id FROM {OriginRow.TABLE} WHERE sha1 = ?")
def _origin_get_next_visit_id(self, origin_sha1: bytes, *, statement) -> int:
rows = list(self._execute_with_retries(statement, [origin_sha1]))
assert len(rows) == 1 # TODO: error handling
return rows[0]["next_visit_id"]
@_prepared_statement(
f"""
UPDATE {OriginRow.TABLE}
SET next_visit_id=?
WHERE sha1 = ? IF next_visit_id=?
"""
)
def origin_generate_unique_visit_id(self, origin_url: str, *, statement) -> int:
origin_sha1 = hash_url(origin_url)
next_id = self._origin_get_next_visit_id(origin_sha1)
while True:
res = list(
self._execute_with_retries(
statement, [next_id + 1, origin_sha1, next_id]
)
)
assert len(res) == 1
if res[0]["[applied]"]:
# No data race
return next_id
else:
# Someone else updated it before we did, let's try again
next_id = res[0]["next_visit_id"]
# TODO: abort after too many attempts
return next_id
##########################
# 'origin_visit' table
##########################
@_prepared_select_statements(
OriginVisitRow,
{
(True, ListOrder.ASC): (
"WHERE origin = ? AND visit > ? ORDER BY visit ASC LIMIT ?"
),
(True, ListOrder.DESC): (
"WHERE origin = ? AND visit < ? ORDER BY visit DESC LIMIT ?"
),
(False, ListOrder.ASC): "WHERE origin = ? ORDER BY visit ASC LIMIT ?",
(False, ListOrder.DESC): "WHERE origin = ? ORDER BY visit DESC LIMIT ?",
},
)
def origin_visit_get(
self,
origin_url: str,
last_visit: Optional[int],
limit: int,
order: ListOrder,
*,
statements,
) -> Iterable[OriginVisitRow]:
args: List[Any] = [origin_url]
if last_visit is not None:
args.append(last_visit)
args.append(limit)
statement = statements[(last_visit is not None, order)]
return map(
OriginVisitRow.from_dict, self._execute_with_retries(statement, args)
)
@_prepared_insert_statement(OriginVisitRow)
def origin_visit_add_one(self, visit: OriginVisitRow, *, statement) -> None:
self._add_one(statement, visit)
@_prepared_select_statement(OriginVisitRow, "WHERE origin = ? AND visit = ?")
def origin_visit_get_one(
self, origin_url: str, visit_id: int, *, statement
) -> Optional[OriginVisitRow]:
# TODO: error handling
rows = list(self._execute_with_retries(statement, [origin_url, visit_id]))
if rows:
return OriginVisitRow.from_dict(rows[0])
else:
return None
@_prepared_select_statement(OriginVisitRow, "WHERE origin = ?")
def origin_visit_get_all(
self, origin_url: str, *, statement
) -> Iterable[OriginVisitRow]:
return map(
OriginVisitRow.from_dict,
self._execute_with_retries(statement, [origin_url]),
)
@_prepared_select_statement(OriginVisitRow, "WHERE token(origin) >= ?")
def _origin_visit_iter_from(
self, min_token: int, *, statement
) -> Iterable[OriginVisitRow]:
return map(
OriginVisitRow.from_dict, self._execute_with_retries(statement, [min_token])
)
@_prepared_select_statement(OriginVisitRow, "WHERE token(origin) < ?")
def _origin_visit_iter_to(
self, max_token: int, *, statement
) -> Iterable[OriginVisitRow]:
return map(
OriginVisitRow.from_dict, self._execute_with_retries(statement, [max_token])
)
def origin_visit_iter(self, start_token: int) -> Iterator[OriginVisitRow]:
"""Returns all origin visits in order from this token,
and wraps around the token space."""
yield from self._origin_visit_iter_from(start_token)
yield from self._origin_visit_iter_to(start_token)
##########################
# 'origin_visit_status' table
##########################
@_prepared_select_statements(
OriginVisitStatusRow,
{
(True, ListOrder.ASC): (
"WHERE origin = ? AND visit = ? AND date >= ? "
"ORDER BY visit ASC LIMIT ?"
),
(True, ListOrder.DESC): (
"WHERE origin = ? AND visit = ? AND date <= ? "
"ORDER BY visit DESC LIMIT ?"
),
(False, ListOrder.ASC): (
"WHERE origin = ? AND visit = ? ORDER BY visit ASC LIMIT ?"
),
(False, ListOrder.DESC): (
"WHERE origin = ? AND visit = ? ORDER BY visit DESC LIMIT ?"
),
},
)
def origin_visit_status_get_range(
self,
origin: str,
visit: int,
date_from: Optional[datetime.datetime],
limit: int,
order: ListOrder,
*,
statements,
) -> Iterable[OriginVisitStatusRow]:
args: List[Any] = [origin, visit]
if date_from is not None:
args.append(date_from)
args.append(limit)
statement = statements[(date_from is not None, order)]
return map(
OriginVisitStatusRow.from_dict, self._execute_with_retries(statement, args)
)
@_prepared_insert_statement(OriginVisitStatusRow)
def origin_visit_status_add_one(
self, visit_update: OriginVisitStatusRow, *, statement
) -> None:
self._add_one(statement, visit_update)
def origin_visit_status_get_latest(
self, origin: str, visit: int,
) -> Optional[OriginVisitStatusRow]:
"""Given an origin visit id, return its latest origin_visit_status
"""
return next(self.origin_visit_status_get(origin, visit), None)
@_prepared_select_statement(
OriginVisitStatusRow,
# 'visit DESC,' is optional with Cassandra 4, but ScyllaDB needs it
"WHERE origin = ? AND visit = ? ORDER BY visit DESC, date DESC",
)
def origin_visit_status_get(
self, origin: str, visit: int, *, statement,
) -> Iterator[OriginVisitStatusRow]:
"""Return all origin visit statuses for a given visit
"""
return map(
OriginVisitStatusRow.from_dict,
self._execute_with_retries(statement, [origin, visit]),
)
##########################
# 'metadata_authority' table
##########################
@_prepared_insert_statement(MetadataAuthorityRow)
def metadata_authority_add(self, authority: MetadataAuthorityRow, *, statement):
self._add_one(statement, authority)
@_prepared_select_statement(MetadataAuthorityRow, "WHERE type = ? AND url = ?")
def metadata_authority_get(
self, type, url, *, statement
) -> Optional[MetadataAuthorityRow]:
rows = list(self._execute_with_retries(statement, [type, url]))
if rows:
return MetadataAuthorityRow.from_dict(rows[0])
else:
return None
##########################
# 'metadata_fetcher' table
##########################
@_prepared_insert_statement(MetadataFetcherRow)
def metadata_fetcher_add(self, fetcher, *, statement):
self._add_one(statement, fetcher)
@_prepared_select_statement(MetadataFetcherRow, "WHERE name = ? AND version = ?")
def metadata_fetcher_get(
self, name, version, *, statement
) -> Optional[MetadataFetcherRow]:
rows = list(self._execute_with_retries(statement, [name, version]))
if rows:
return MetadataFetcherRow.from_dict(rows[0])
else:
return None
#########################
# 'raw_extrinsic_metadata_by_id' table
#########################
@_prepared_insert_statement(RawExtrinsicMetadataByIdRow)
def raw_extrinsic_metadata_by_id_add(self, row, *, statement):
self._add_one(statement, row)
@_prepared_select_statement(RawExtrinsicMetadataByIdRow, "WHERE id IN ?")
def raw_extrinsic_metadata_get_by_ids(
self, ids: List[Sha1Git], *, statement
) -> Iterable[RawExtrinsicMetadataByIdRow]:
return map(
RawExtrinsicMetadataByIdRow.from_dict,
self._execute_with_retries(statement, [ids]),
)
#########################
# 'raw_extrinsic_metadata' table
#########################
@_prepared_insert_statement(RawExtrinsicMetadataRow)
def raw_extrinsic_metadata_add(self, raw_extrinsic_metadata, *, statement):
self._add_one(statement, raw_extrinsic_metadata)
@_prepared_select_statement(
RawExtrinsicMetadataRow,
"WHERE target=? AND authority_url=? AND discovery_date>? AND authority_type=?",
)
def raw_extrinsic_metadata_get_after_date(
self,
target: str,
authority_type: str,
authority_url: str,
after: datetime.datetime,
*,
statement,
) -> Iterable[RawExtrinsicMetadataRow]:
return map(
RawExtrinsicMetadataRow.from_dict,
self._execute_with_retries(
statement, [target, authority_url, after, authority_type]
),
)
@_prepared_select_statement(
RawExtrinsicMetadataRow,
# This is equivalent to:
# WHERE target=? AND authority_type = ? AND authority_url = ? "
# AND (discovery_date, id) > (?, ?)"
# but it needs to be written this way to work with ScyllaDB.
"WHERE target=? AND (authority_type, authority_url) <= (?, ?) "
"AND (authority_type, authority_url, discovery_date, id) > (?, ?, ?, ?)",
)
def raw_extrinsic_metadata_get_after_date_and_id(
self,
target: str,
authority_type: str,
authority_url: str,
after_date: datetime.datetime,
after_id: bytes,
*,
statement,
) -> Iterable[RawExtrinsicMetadataRow]:
return map(
RawExtrinsicMetadataRow.from_dict,
self._execute_with_retries(
statement,
[
target,
authority_type,
authority_url,
authority_type,
authority_url,
after_date,
after_id,
],
),
)
@_prepared_select_statement(
RawExtrinsicMetadataRow,
"WHERE target=? AND authority_url=? AND authority_type=?",
)
def raw_extrinsic_metadata_get(
self, target: str, authority_type: str, authority_url: str, *, statement
) -> Iterable[RawExtrinsicMetadataRow]:
return map(
RawExtrinsicMetadataRow.from_dict,
self._execute_with_retries(
statement, [target, authority_url, authority_type]
),
)
@_prepared_statement(
"SELECT authority_type, authority_url FROM raw_extrinsic_metadata "
"WHERE target = ?"
)
def raw_extrinsic_metadata_get_authorities(
self, target: str, *, statement
) -> Iterable[Tuple[str, str]]:
return (
(entry["authority_type"], entry["authority_url"])
for entry in self._execute_with_retries(statement, [target])
)
##########################
# 'extid' table
##########################
def _extid_add_finalize(self, statement: BoundStatement) -> None:
"""Returned currified by extid_add_prepare, to be called when the
extid row should be added to the primary table."""
self._execute_with_retries(statement, None)
self._increment_counter("extid", 1)
@_prepared_insert_statement(ExtIDRow)
def extid_add_prepare(
self, extid: ExtIDRow, *, statement
) -> Tuple[int, Callable[[], None]]:
statement = statement.bind(dataclasses.astuple(extid))
token_class = self._cluster.metadata.token_map.token_class
token = token_class.from_key(statement.routing_key).value
assert TOKEN_BEGIN <= token <= TOKEN_END
# Function to be called after the indexes contain their respective
# row
finalizer = functools.partial(self._extid_add_finalize, statement)
return (token, finalizer)
@_prepared_select_statement(
- ExtIDRow, "WHERE extid_type=? AND extid=? AND target_type=? AND target=?",
+ ExtIDRow,
+ "WHERE extid_type=? AND extid=? AND extid_version=? "
+ "AND target_type=? AND target=?",
)
def extid_get_from_pk(
- self, extid_type: str, extid: bytes, target: CoreSWHID, *, statement,
+ self,
+ extid_type: str,
+ extid: bytes,
+ extid_version: int,
+ target: CoreSWHID,
+ *,
+ statement,
) -> Optional[ExtIDRow]:
rows = list(
self._execute_with_retries(
statement,
- [extid_type, extid, target.object_type.value, target.object_id],
+ [
+ extid_type,
+ extid,
+ extid_version,
+ target.object_type.value,
+ target.object_id,
+ ],
),
)
assert len(rows) <= 1
if rows:
return ExtIDRow(**rows[0])
else:
return None
@_prepared_select_statement(
ExtIDRow, "WHERE token(extid_type, extid) = ?",
)
def extid_get_from_token(self, token: int, *, statement) -> Iterable[ExtIDRow]:
return map(ExtIDRow.from_dict, self._execute_with_retries(statement, [token]),)
@_prepared_select_statement(
ExtIDRow, "WHERE extid_type=? AND extid=?",
)
def extid_get_from_extid(
self, extid_type: str, extid: bytes, *, statement
) -> Iterable[ExtIDRow]:
return map(
ExtIDRow.from_dict,
self._execute_with_retries(statement, [extid_type, extid]),
)
def extid_get_from_target(
self, target_type: str, target: bytes
) -> Iterable[ExtIDRow]:
for token in self._extid_get_tokens_from_target(target_type, target):
if token is not None:
for extid in self.extid_get_from_token(token):
# re-check the extid against target (in case of murmur3 collision)
if (
extid is not None
and extid.target_type == target_type
and extid.target == target
):
yield extid
##########################
# 'extid_by_target' table
##########################
@_prepared_insert_statement(ExtIDByTargetRow)
def extid_index_add_one(self, row: ExtIDByTargetRow, *, statement) -> None:
"""Adds a row mapping extid[target_type, target] to the token of the ExtID in
the main 'extid' table."""
self._add_one(statement, row)
@_prepared_statement(
f"""
SELECT target_token
FROM {ExtIDByTargetRow.TABLE}
WHERE target_type = ? AND target = ?
"""
)
def _extid_get_tokens_from_target(
self, target_type: str, target: bytes, *, statement
) -> Iterable[int]:
return (
row["target_token"]
for row in self._execute_with_retries(statement, [target_type, target])
)
##########################
# Miscellaneous
##########################
@_prepared_statement("SELECT uuid() FROM revision LIMIT 1;")
def check_read(self, *, statement):
self._execute_with_retries(statement, [])
@_prepared_select_statement(ObjectCountRow, "WHERE partition_key=0")
def stat_counters(self, *, statement) -> Iterable[ObjectCountRow]:
return map(ObjectCountRow.from_dict, self._execute_with_retries(statement, []))
diff --git a/swh/storage/cassandra/model.py b/swh/storage/cassandra/model.py
index 89075447..9b63f2e1 100644
--- a/swh/storage/cassandra/model.py
+++ b/swh/storage/cassandra/model.py
@@ -1,333 +1,334 @@
# Copyright (C) 2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
"""Classes representing tables in the Cassandra database.
They are very close to classes found in swh.model.model, but most of
them are subtly different:
* Large objects are split into other classes (eg. RevisionRow has no
'parents' field, because parents are stored in a different table,
represented by RevisionParentRow)
* They have a "cols" field, which returns the list of column names
of the table
* They only use types that map directly to Cassandra's schema (ie. no enums)
Therefore, this model doesn't reuse swh.model.model, except for types
that can be mapped to UDTs (Person and TimestampWithTimezone).
"""
import dataclasses
import datetime
from typing import Any, ClassVar, Dict, List, Optional, Tuple, Type, TypeVar
from swh.model.model import Person, TimestampWithTimezone
MAGIC_NULL_PK = b""
"""
NULLs (or all-empty blobs) are not allowed in primary keys; instead we use a
special value that can't possibly be a valid hash.
"""
T = TypeVar("T", bound="BaseRow")
def content_index_table_name(algo: str, skipped_content: bool) -> str:
"""Given an algorithm name, returns the name of one of the 'content_by_*'
and 'skipped_content_by_*' tables that serve as index for the 'content'
and 'skipped_content' tables based on this algorithm's hashes.
For now it is a simple substitution, but future versions may append a version
number to it, if needed for schema updates."""
if skipped_content:
return f"skipped_content_by_{algo}"
else:
return f"content_by_{algo}"
class BaseRow:
TABLE: ClassVar[str]
PARTITION_KEY: ClassVar[Tuple[str, ...]]
CLUSTERING_KEY: ClassVar[Tuple[str, ...]] = ()
@classmethod
def from_dict(cls: Type[T], d: Dict[str, Any]) -> T:
return cls(**d) # type: ignore
@classmethod
def cols(cls) -> List[str]:
return [field.name for field in dataclasses.fields(cls)]
def to_dict(self) -> Dict[str, Any]:
return dataclasses.asdict(self)
@dataclasses.dataclass
class ContentRow(BaseRow):
TABLE = "content"
PARTITION_KEY: ClassVar[Tuple[str, ...]] = (
"sha1",
"sha1_git",
"sha256",
"blake2s256",
)
sha1: bytes
sha1_git: bytes
sha256: bytes
blake2s256: bytes
length: int
ctime: datetime.datetime
status: str
@dataclasses.dataclass
class SkippedContentRow(BaseRow):
TABLE = "skipped_content"
PARTITION_KEY = ("sha1", "sha1_git", "sha256", "blake2s256")
sha1: Optional[bytes]
sha1_git: Optional[bytes]
sha256: Optional[bytes]
blake2s256: Optional[bytes]
length: Optional[int]
ctime: Optional[datetime.datetime]
status: str
reason: str
origin: str
@classmethod
def from_dict(cls, d: Dict[str, Any]) -> "SkippedContentRow":
d = d.copy()
for k in ("sha1", "sha1_git", "sha256", "blake2s256"):
if d[k] == MAGIC_NULL_PK:
d[k] = None
return super().from_dict(d)
@dataclasses.dataclass
class DirectoryRow(BaseRow):
TABLE = "directory"
PARTITION_KEY = ("id",)
id: bytes
@dataclasses.dataclass
class DirectoryEntryRow(BaseRow):
TABLE = "directory_entry"
PARTITION_KEY = ("directory_id",)
CLUSTERING_KEY = ("name",)
directory_id: bytes
name: bytes
target: bytes
perms: int
type: str
@dataclasses.dataclass
class RevisionRow(BaseRow):
TABLE = "revision"
PARTITION_KEY = ("id",)
id: bytes
date: Optional[TimestampWithTimezone]
committer_date: Optional[TimestampWithTimezone]
type: str
directory: bytes
message: bytes
author: Person
committer: Person
synthetic: bool
metadata: str
extra_headers: dict
@dataclasses.dataclass
class RevisionParentRow(BaseRow):
TABLE = "revision_parent"
PARTITION_KEY = ("id",)
CLUSTERING_KEY = ("parent_rank",)
id: bytes
parent_rank: int
parent_id: bytes
@dataclasses.dataclass
class ReleaseRow(BaseRow):
TABLE = "release"
PARTITION_KEY = ("id",)
id: bytes
target_type: str
target: bytes
date: TimestampWithTimezone
name: bytes
message: bytes
author: Person
synthetic: bool
@dataclasses.dataclass
class SnapshotRow(BaseRow):
TABLE = "snapshot"
PARTITION_KEY = ("id",)
id: bytes
@dataclasses.dataclass
class SnapshotBranchRow(BaseRow):
TABLE = "snapshot_branch"
PARTITION_KEY = ("snapshot_id",)
CLUSTERING_KEY = ("name",)
snapshot_id: bytes
name: bytes
target_type: Optional[str]
target: Optional[bytes]
@dataclasses.dataclass
class OriginVisitRow(BaseRow):
TABLE = "origin_visit"
PARTITION_KEY = ("origin",)
CLUSTERING_KEY = ("visit",)
origin: str
visit: int
date: datetime.datetime
type: str
@dataclasses.dataclass
class OriginVisitStatusRow(BaseRow):
TABLE = "origin_visit_status"
PARTITION_KEY = ("origin",)
CLUSTERING_KEY = ("visit", "date")
origin: str
visit: int
date: datetime.datetime
type: str
status: str
metadata: str
snapshot: bytes
@classmethod
def from_dict(cls: Type[T], d: Dict[str, Any]) -> T:
return cls(**d) # type: ignore
@dataclasses.dataclass
class OriginRow(BaseRow):
TABLE = "origin"
PARTITION_KEY = ("sha1",)
sha1: bytes
url: str
next_visit_id: int
@dataclasses.dataclass
class MetadataAuthorityRow(BaseRow):
TABLE = "metadata_authority"
PARTITION_KEY = ("url",)
CLUSTERING_KEY = ("type",)
url: str
type: str
@dataclasses.dataclass
class MetadataFetcherRow(BaseRow):
TABLE = "metadata_fetcher"
PARTITION_KEY = ("name",)
CLUSTERING_KEY = ("version",)
name: str
version: str
@dataclasses.dataclass
class RawExtrinsicMetadataRow(BaseRow):
TABLE = "raw_extrinsic_metadata"
PARTITION_KEY = ("target",)
CLUSTERING_KEY = (
"authority_type",
"authority_url",
"discovery_date",
"id",
)
id: bytes
type: str
target: str
authority_type: str
authority_url: str
discovery_date: datetime.datetime
fetcher_name: str
fetcher_version: str
format: str
metadata: bytes
origin: Optional[str]
visit: Optional[int]
snapshot: Optional[str]
release: Optional[str]
revision: Optional[str]
path: Optional[bytes]
directory: Optional[str]
@dataclasses.dataclass
class RawExtrinsicMetadataByIdRow(BaseRow):
TABLE = "raw_extrinsic_metadata_by_id"
PARTITION_KEY = ("id",)
CLUSTERING_KEY = ()
id: bytes
target: str
authority_type: str
authority_url: str
@dataclasses.dataclass
class ObjectCountRow(BaseRow):
TABLE = "object_count"
PARTITION_KEY = ("partition_key",)
CLUSTERING_KEY = ("object_type",)
partition_key: int
object_type: str
count: int
@dataclasses.dataclass
class ExtIDRow(BaseRow):
TABLE = "extid"
- PARTITION_KEY = ("target", "target_type", "extid", "extid_type")
+ PARTITION_KEY = ("target", "target_type", "extid_version", "extid", "extid_type")
extid_type: str
extid: bytes
+ extid_version: int
target_type: str
target: bytes
@dataclasses.dataclass
class ExtIDByTargetRow(BaseRow):
TABLE = "extid_by_target"
PARTITION_KEY = ("target_type", "target")
CLUSTERING_KEY = ("target_token",)
target_type: str
target: bytes
target_token: int
diff --git a/swh/storage/cassandra/schema.py b/swh/storage/cassandra/schema.py
index 3d7f3e07..75d7c8eb 100644
--- a/swh/storage/cassandra/schema.py
+++ b/swh/storage/cassandra/schema.py
@@ -1,342 +1,343 @@
# Copyright (C) 2019-2021 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import os
_use_scylla = bool(os.environ.get("SWH_USE_SCYLLADB", ""))
UDF_LANGUAGE = "lua" if _use_scylla else "java"
if UDF_LANGUAGE == "java":
# For Cassandra
CREATE_TABLES_QUERIES = [
"""
CREATE OR REPLACE FUNCTION ascii_bins_count_sfunc (
state tuple>, -- (nb_none, map)
bin_name ascii
)
CALLED ON NULL INPUT
RETURNS tuple>
LANGUAGE java AS
$$
if (bin_name == null) {
state.setInt(0, state.getInt(0) + 1);
}
else {
Map counters = state.getMap(
1, String.class, Integer.class);
Integer nb = counters.get(bin_name);
if (nb == null) {
nb = 0;
}
counters.put(bin_name, nb + 1);
state.setMap(1, counters, String.class, Integer.class);
}
return state;
$$;""",
"""
CREATE OR REPLACE AGGREGATE ascii_bins_count ( ascii )
SFUNC ascii_bins_count_sfunc
STYPE tuple>
INITCOND (0, {})
;""",
]
elif UDF_LANGUAGE == "lua":
# For ScyllaDB
# TODO: this is not implementable yet, because ScyllaDB does not support
# user-defined aggregates. https://github.com/scylladb/scylla/issues/7201
CREATE_TABLES_QUERIES = []
else:
assert False, f"{UDF_LANGUAGE} must be 'lua' or 'java'"
CREATE_TABLES_QUERIES = [
*CREATE_TABLES_QUERIES,
"""
CREATE TYPE IF NOT EXISTS microtimestamp (
seconds bigint,
microseconds int
);""",
"""
CREATE TYPE IF NOT EXISTS microtimestamp_with_timezone (
timestamp frozen,
offset smallint,
negative_utc boolean
);""",
"""
CREATE TYPE IF NOT EXISTS person (
fullname blob,
name blob,
email blob
);""",
"""
CREATE TABLE IF NOT EXISTS content (
sha1 blob,
sha1_git blob,
sha256 blob,
blake2s256 blob,
length bigint,
ctime timestamp,
-- creation time, i.e. time of (first) injection into the storage
status ascii,
PRIMARY KEY ((sha1, sha1_git, sha256, blake2s256))
);""",
"""
CREATE TABLE IF NOT EXISTS skipped_content (
sha1 blob,
sha1_git blob,
sha256 blob,
blake2s256 blob,
length bigint,
ctime timestamp,
-- creation time, i.e. time of (first) injection into the storage
status ascii,
reason text,
origin text,
PRIMARY KEY ((sha1, sha1_git, sha256, blake2s256))
);""",
"""
CREATE TABLE IF NOT EXISTS revision (
id blob PRIMARY KEY,
date microtimestamp_with_timezone,
committer_date microtimestamp_with_timezone,
type ascii,
directory blob, -- source code "root" directory
message blob,
author person,
committer person,
synthetic boolean,
-- true iff revision has been created by Software Heritage
metadata text,
-- extra metadata as JSON(tarball checksums, etc...)
extra_headers frozen> >
-- extra commit information as (tuple(key, value), ...)
);""",
"""
CREATE TABLE IF NOT EXISTS revision_parent (
id blob,
parent_rank int,
-- parent position in merge commits, 0-based
parent_id blob,
PRIMARY KEY ((id), parent_rank)
);""",
"""
CREATE TABLE IF NOT EXISTS release
(
id blob PRIMARY KEY,
target_type ascii,
target blob,
date microtimestamp_with_timezone,
name blob,
message blob,
author person,
synthetic boolean,
-- true iff release has been created by Software Heritage
);""",
"""
CREATE TABLE IF NOT EXISTS directory (
id blob PRIMARY KEY,
);""",
"""
CREATE TABLE IF NOT EXISTS directory_entry (
directory_id blob,
name blob, -- path name, relative to containing dir
target blob,
perms int, -- unix-like permissions
type ascii, -- target type
PRIMARY KEY ((directory_id), name)
);""",
"""
CREATE TABLE IF NOT EXISTS snapshot (
id blob PRIMARY KEY,
);""",
"""
-- For a given snapshot_id, branches are sorted by their name,
-- allowing easy pagination.
CREATE TABLE IF NOT EXISTS snapshot_branch (
snapshot_id blob,
name blob,
target_type ascii,
target blob,
PRIMARY KEY ((snapshot_id), name)
);""",
"""
CREATE TABLE IF NOT EXISTS origin_visit (
origin text,
visit bigint,
date timestamp,
type text,
PRIMARY KEY ((origin), visit)
);""",
"""
CREATE TABLE IF NOT EXISTS origin_visit_status (
origin text,
visit bigint,
date timestamp,
type text,
status ascii,
metadata text,
snapshot blob,
PRIMARY KEY ((origin), visit, date)
)
WITH CLUSTERING ORDER BY (visit DESC, date DESC)
;""", # 'WITH CLUSTERING ORDER BY' is optional with Cassandra 4, but ScyllaDB needs it
"""
CREATE TABLE IF NOT EXISTS origin (
sha1 blob PRIMARY KEY,
url text,
next_visit_id int,
-- We need integer visit ids for compatibility with the pgsql
-- storage, so we're using lightweight transactions with this trick:
-- https://stackoverflow.com/a/29391877/539465
);""",
"""
CREATE TABLE IF NOT EXISTS metadata_authority (
url text,
type ascii,
PRIMARY KEY ((url), type)
);""",
"""
CREATE TABLE IF NOT EXISTS metadata_fetcher (
name ascii,
version ascii,
PRIMARY KEY ((name), version)
);""",
"""
CREATE TABLE IF NOT EXISTS raw_extrinsic_metadata (
id blob,
type text,
target text,
-- metadata source
authority_type text,
authority_url text,
discovery_date timestamp,
fetcher_name ascii,
fetcher_version ascii,
-- metadata itself
format ascii,
metadata blob,
-- context
origin text,
visit bigint,
snapshot text,
release text,
revision text,
path blob,
directory text,
PRIMARY KEY ((target), authority_type, authority_url, discovery_date, id)
-- An explanation is in order for this primary key:
--
-- Intuitively, the primary key should only be 'id', because two metadata
-- entries are the same iff the id is the same; and 'id' is used for
-- deduplication.
--
-- However, we also want to query by
-- (target, authority_type, authority_url, discovery_date)
-- The naive solution to this would be an extra table, to use as index;
-- but it means 1. extra code to keep them in sync 2. overhead when writing
-- 3. overhead + random reads (instead of linear) when reading.
--
-- Therefore, we use a single table for both, by adding the column
-- we want to query with before the id.
-- It solves both a) the query/order issues and b) the uniqueness issue because:
--
-- a) adding the id at the end of the primary key does not change the rows' order:
-- for two different rows, id1 != id2, so
-- (target1, ..., date1) < (target2, ..., date2)
-- <=> (target1, ..., date1, id1) < (target2, ..., date2, id2)
--
-- b) the id is a hash of all the columns, so:
-- rows are the same
-- <=> id1 == id2
-- <=> (target1, ..., date1, id1) == (target2, ..., date2, id2)
);""",
"""
CREATE TABLE IF NOT EXISTS raw_extrinsic_metadata_by_id (
id blob,
target text,
authority_type text,
authority_url text,
PRIMARY KEY ((id))
);""",
"""
CREATE TABLE IF NOT EXISTS object_count (
partition_key smallint, -- Constant, must always be 0
object_type ascii,
count counter,
PRIMARY KEY ((partition_key), object_type)
);""",
"""
CREATE TABLE IF NOT EXISTS extid (
extid_type ascii,
extid blob,
+ extid_version smallint,
target_type ascii,
target blob,
- PRIMARY KEY ((extid_type, extid), target_type, target)
+ PRIMARY KEY ((extid_type, extid), extid_version, target_type, target)
);""",
"""
CREATE TABLE IF NOT EXISTS extid_by_target (
target_type ascii,
target blob,
target_token bigint, -- value of token(pk) on the "primary" table
PRIMARY KEY ((target_type, target), target_token)
);""",
]
CONTENT_INDEX_TEMPLATE = """
-- Secondary table, used for looking up "content" from a single hash
CREATE TABLE IF NOT EXISTS content_by_{main_algo} (
{main_algo} blob,
target_token bigint, -- value of token(pk) on the "primary" table
PRIMARY KEY (({main_algo}), target_token)
);
CREATE TABLE IF NOT EXISTS skipped_content_by_{main_algo} (
{main_algo} blob,
target_token bigint, -- value of token(pk) on the "primary" table
PRIMARY KEY (({main_algo}), target_token)
);
"""
TABLES = [
"skipped_content",
"content",
"revision",
"revision_parent",
"release",
"directory",
"directory_entry",
"snapshot",
"snapshot_branch",
"origin_visit",
"origin",
"raw_extrinsic_metadata",
"object_count",
"origin_visit_status",
"metadata_authority",
"metadata_fetcher",
"extid",
"extid_by_target",
]
HASH_ALGORITHMS = ["sha1", "sha1_git", "sha256", "blake2s256"]
for main_algo in HASH_ALGORITHMS:
CREATE_TABLES_QUERIES.extend(
CONTENT_INDEX_TEMPLATE.format(
main_algo=main_algo,
other_algos=", ".join(
[algo for algo in HASH_ALGORITHMS if algo != main_algo]
),
).split("\n\n")
)
TABLES.append("content_by_%s" % main_algo)
TABLES.append("skipped_content_by_%s" % main_algo)
diff --git a/swh/storage/cassandra/storage.py b/swh/storage/cassandra/storage.py
index d63c0a9a..c41c351a 100644
--- a/swh/storage/cassandra/storage.py
+++ b/swh/storage/cassandra/storage.py
@@ -1,1530 +1,1536 @@
# Copyright (C) 2019-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import base64
import datetime
import itertools
import operator
import random
import re
from typing import (
Any,
Callable,
Counter,
Dict,
Iterable,
List,
Optional,
Sequence,
Set,
Tuple,
Union,
)
import attr
from swh.core.api.classes import stream_results
from swh.core.api.serializers import msgpack_dumps, msgpack_loads
from swh.model.hashutil import DEFAULT_ALGORITHMS, hash_to_hex
from swh.model.identifiers import CoreSWHID, ExtendedObjectType, ExtendedSWHID
from swh.model.identifiers import ObjectType as SwhidObjectType
from swh.model.model import (
Content,
Directory,
DirectoryEntry,
ExtID,
MetadataAuthority,
MetadataAuthorityType,
MetadataFetcher,
Origin,
OriginVisit,
OriginVisitStatus,
RawExtrinsicMetadata,
Release,
Revision,
Sha1Git,
SkippedContent,
Snapshot,
SnapshotBranch,
TargetType,
)
from swh.storage.interface import (
VISIT_STATUSES,
ListOrder,
PagedResult,
PartialBranches,
Sha1,
)
from swh.storage.objstorage import ObjStorage
from swh.storage.utils import map_optional, now
from swh.storage.writer import JournalWriter
from . import converters
from ..exc import HashCollision, StorageArgumentException
from ..utils import remove_keys
from .common import TOKEN_BEGIN, TOKEN_END, hash_url
from .cql import CqlRunner
from .model import (
ContentRow,
DirectoryEntryRow,
DirectoryRow,
ExtIDByTargetRow,
ExtIDRow,
MetadataAuthorityRow,
MetadataFetcherRow,
OriginRow,
OriginVisitRow,
OriginVisitStatusRow,
RawExtrinsicMetadataByIdRow,
RawExtrinsicMetadataRow,
RevisionParentRow,
SkippedContentRow,
SnapshotBranchRow,
SnapshotRow,
)
from .schema import HASH_ALGORITHMS
# Max block size of contents to return
BULK_BLOCK_CONTENT_LEN_MAX = 10000
class CassandraStorage:
def __init__(
self,
hosts,
keyspace,
objstorage,
port=9042,
journal_writer=None,
allow_overwrite=False,
consistency_level="ONE",
):
"""
A backend of swh-storage backed by Cassandra
Args:
hosts: Seed Cassandra nodes, to start connecting to the cluster
keyspace: Name of the Cassandra database to use
objstorage: Passed as argument to :class:`ObjStorage`
port: Cassandra port
journal_writer: Passed as argument to :class:`JournalWriter`
allow_overwrite: Whether ``*_add`` functions will check if an object
already exists in the database before sending it in an INSERT.
``False`` is the default as it is more efficient when there is
a moderately high probability the object is already known,
but ``True`` can be useful to overwrite existing objects
(eg. when applying a schema update),
or when the database is known to be mostly empty.
Note that a ``False`` value does not guarantee there won't be
any overwrite.
consistency_level: The default read/write consistency to use
"""
self._hosts = hosts
self._keyspace = keyspace
self._port = port
self._consistency_level = consistency_level
self._set_cql_runner()
self.journal_writer: JournalWriter = JournalWriter(journal_writer)
self.objstorage: ObjStorage = ObjStorage(objstorage)
self._allow_overwrite = allow_overwrite
def _set_cql_runner(self):
"""Used by tests when they need to reset the CqlRunner"""
self._cql_runner: CqlRunner = CqlRunner(
self._hosts, self._keyspace, self._port, self._consistency_level
)
def check_config(self, *, check_write: bool) -> bool:
self._cql_runner.check_read()
return True
def _content_get_from_hash(self, algo, hash_) -> Iterable:
"""From the name of a hash algorithm and a value of that hash,
looks up the "hash -> token" secondary table (content_by_{algo})
to get tokens.
Then, looks up the main table (content) to get all contents with
that token, and filters out contents whose hash doesn't match."""
found_tokens = self._cql_runner.content_get_tokens_from_single_hash(algo, hash_)
for token in found_tokens:
assert isinstance(token, int), found_tokens
# Query the main table ('content').
res = self._cql_runner.content_get_from_token(token)
for row in res:
# re-check the the hash (in case of murmur3 collision)
if getattr(row, algo) == hash_:
yield row
def _content_add(self, contents: List[Content], with_data: bool) -> Dict[str, int]:
# Filter-out content already in the database.
if not self._allow_overwrite:
contents = [
c
for c in contents
if not self._cql_runner.content_get_from_pk(c.to_dict())
]
if with_data:
# First insert to the objstorage, if the endpoint is
# `content_add` (as opposed to `content_add_metadata`).
# Must add to the objstorage before the DB and journal. Otherwise:
# 1. in case of a crash the DB may "believe" we have the content, but
# we didn't have time to write to the objstorage before the crash
# 2. the objstorage mirroring, which reads from the journal, may attempt to
# read from the objstorage before we finished writing it
summary = self.objstorage.content_add(
c for c in contents if c.status != "absent"
)
content_add_bytes = summary["content:add:bytes"]
self.journal_writer.content_add(contents)
content_add = 0
for content in contents:
content_add += 1
# Check for sha1 or sha1_git collisions. This test is not atomic
# with the insertion, so it won't detect a collision if both
# contents are inserted at the same time, but it's good enough.
#
# The proper way to do it would probably be a BATCH, but this
# would be inefficient because of the number of partitions we
# need to affect (len(HASH_ALGORITHMS)+1, which is currently 5)
if not self._allow_overwrite:
for algo in {"sha1", "sha1_git"}:
collisions = []
# Get tokens of 'content' rows with the same value for
# sha1/sha1_git
rows = self._content_get_from_hash(algo, content.get_hash(algo))
for row in rows:
if getattr(row, algo) != content.get_hash(algo):
# collision of token(partition key), ignore this
# row
continue
for other_algo in HASH_ALGORITHMS:
if getattr(row, other_algo) != content.get_hash(other_algo):
# This hash didn't match; discard the row.
collisions.append(
{k: getattr(row, k) for k in HASH_ALGORITHMS}
)
if collisions:
collisions.append(content.hashes())
raise HashCollision(algo, content.get_hash(algo), collisions)
(token, insertion_finalizer) = self._cql_runner.content_add_prepare(
ContentRow(**remove_keys(content.to_dict(), ("data",)))
)
# Then add to index tables
for algo in HASH_ALGORITHMS:
self._cql_runner.content_index_add_one(algo, content, token)
# Then to the main table
insertion_finalizer()
summary = {
"content:add": content_add,
}
if with_data:
summary["content:add:bytes"] = content_add_bytes
return summary
def content_add(self, content: List[Content]) -> Dict[str, int]:
to_add = {
(c.sha1, c.sha1_git, c.sha256, c.blake2s256): c for c in content
}.values()
contents = [attr.evolve(c, ctime=now()) for c in to_add]
return self._content_add(list(contents), with_data=True)
def content_update(
self, contents: List[Dict[str, Any]], keys: List[str] = []
) -> None:
raise NotImplementedError(
"content_update is not supported by the Cassandra backend"
)
def content_add_metadata(self, content: List[Content]) -> Dict[str, int]:
return self._content_add(content, with_data=False)
def content_get_data(self, content: Sha1) -> Optional[bytes]:
# FIXME: Make this method support slicing the `data`
return self.objstorage.content_get(content)
def content_get_partition(
self,
partition_id: int,
nb_partitions: int,
page_token: Optional[str] = None,
limit: int = 1000,
) -> PagedResult[Content]:
if limit is None:
raise StorageArgumentException("limit should not be None")
# Compute start and end of the range of tokens covered by the
# requested partition
partition_size = (TOKEN_END - TOKEN_BEGIN) // nb_partitions
range_start = TOKEN_BEGIN + partition_id * partition_size
range_end = TOKEN_BEGIN + (partition_id + 1) * partition_size
# offset the range start according to the `page_token`.
if page_token is not None:
if not (range_start <= int(page_token) <= range_end):
raise StorageArgumentException("Invalid page_token.")
range_start = int(page_token)
next_page_token: Optional[str] = None
rows = self._cql_runner.content_get_token_range(
range_start, range_end, limit + 1
)
contents = []
for counter, (tok, row) in enumerate(rows):
if row.status == "absent":
continue
row_d = row.to_dict()
if counter >= limit:
next_page_token = str(tok)
break
row_d.pop("ctime")
contents.append(Content(**row_d))
assert len(contents) <= limit
return PagedResult(results=contents, next_page_token=next_page_token)
def content_get(
self, contents: List[bytes], algo: str = "sha1"
) -> List[Optional[Content]]:
if algo not in DEFAULT_ALGORITHMS:
raise StorageArgumentException(
"algo should be one of {','.join(DEFAULT_ALGORITHMS)}"
)
key = operator.attrgetter(algo)
contents_by_hash: Dict[Sha1, Optional[Content]] = {}
for hash_ in contents:
# Get all (sha1, sha1_git, sha256, blake2s256) whose sha1/sha1_git
# matches the argument, from the index table ('content_by_*')
for row in self._content_get_from_hash(algo, hash_):
row_d = row.to_dict()
row_d.pop("ctime")
content = Content(**row_d)
contents_by_hash[key(content)] = content
return [contents_by_hash.get(hash_) for hash_ in contents]
def content_find(self, content: Dict[str, Any]) -> List[Content]:
# Find an algorithm that is common to all the requested contents.
# It will be used to do an initial filtering efficiently.
filter_algos = list(set(content).intersection(HASH_ALGORITHMS))
if not filter_algos:
raise StorageArgumentException(
"content keys must contain at least one "
f"of: {', '.join(sorted(HASH_ALGORITHMS))}"
)
common_algo = filter_algos[0]
results = []
rows = self._content_get_from_hash(common_algo, content[common_algo])
for row in rows:
# Re-check all the hashes, in case of collisions (either of the
# hash of the partition key, or the hashes in it)
for algo in HASH_ALGORITHMS:
if content.get(algo) and getattr(row, algo) != content[algo]:
# This hash didn't match; discard the row.
break
else:
# All hashes match, keep this row.
row_d = row.to_dict()
row_d["ctime"] = row.ctime.replace(tzinfo=datetime.timezone.utc)
results.append(Content(**row_d))
return results
def content_missing(
self, contents: List[Dict[str, Any]], key_hash: str = "sha1"
) -> Iterable[bytes]:
if key_hash not in DEFAULT_ALGORITHMS:
raise StorageArgumentException(
"key_hash should be one of {','.join(DEFAULT_ALGORITHMS)}"
)
for content in contents:
res = self.content_find(content)
if not res:
yield content[key_hash]
def content_missing_per_sha1(self, contents: List[bytes]) -> Iterable[bytes]:
return self.content_missing([{"sha1": c} for c in contents])
def content_missing_per_sha1_git(
self, contents: List[Sha1Git]
) -> Iterable[Sha1Git]:
return self.content_missing(
[{"sha1_git": c} for c in contents], key_hash="sha1_git"
)
def content_get_random(self) -> Sha1Git:
content = self._cql_runner.content_get_random()
assert content, "Could not find any content"
return content.sha1_git
def _skipped_content_add(self, contents: List[SkippedContent]) -> Dict[str, int]:
# Filter-out content already in the database.
if not self._allow_overwrite:
contents = [
c
for c in contents
if not self._cql_runner.skipped_content_get_from_pk(c.to_dict())
]
self.journal_writer.skipped_content_add(contents)
for content in contents:
# Compute token of the row in the main table
(token, insertion_finalizer) = self._cql_runner.skipped_content_add_prepare(
SkippedContentRow.from_dict({"origin": None, **content.to_dict()})
)
# Then add to index tables
for algo in HASH_ALGORITHMS:
self._cql_runner.skipped_content_index_add_one(algo, content, token)
# Then to the main table
insertion_finalizer()
return {"skipped_content:add": len(contents)}
def skipped_content_add(self, content: List[SkippedContent]) -> Dict[str, int]:
contents = [attr.evolve(c, ctime=now()) for c in content]
return self._skipped_content_add(contents)
def skipped_content_missing(
self, contents: List[Dict[str, Any]]
) -> Iterable[Dict[str, Any]]:
for content in contents:
if not self._cql_runner.skipped_content_get_from_pk(content):
yield {algo: content[algo] for algo in DEFAULT_ALGORITHMS}
def directory_add(self, directories: List[Directory]) -> Dict[str, int]:
to_add = {d.id: d for d in directories}.values()
if not self._allow_overwrite:
# Filter out directories that are already inserted.
missing = self.directory_missing([dir_.id for dir_ in to_add])
directories = [dir_ for dir_ in directories if dir_.id in missing]
self.journal_writer.directory_add(directories)
for directory in directories:
# Add directory entries to the 'directory_entry' table
for entry in directory.entries:
self._cql_runner.directory_entry_add_one(
DirectoryEntryRow(directory_id=directory.id, **entry.to_dict())
)
# Add the directory *after* adding all the entries, so someone
# calling snapshot_get_branch in the meantime won't end up
# with half the entries.
self._cql_runner.directory_add_one(DirectoryRow(id=directory.id))
return {"directory:add": len(directories)}
def directory_missing(self, directories: List[Sha1Git]) -> Iterable[Sha1Git]:
return self._cql_runner.directory_missing(directories)
def _join_dentry_to_content(self, dentry: DirectoryEntry) -> Dict[str, Any]:
contents: Union[List[Content], List[SkippedContentRow]]
keys = (
"status",
"sha1",
"sha1_git",
"sha256",
"length",
)
ret = dict.fromkeys(keys)
ret.update(dentry.to_dict())
if ret["type"] == "file":
contents = self.content_find({"sha1_git": ret["target"]})
if not contents:
tokens = list(
self._cql_runner.skipped_content_get_tokens_from_single_hash(
"sha1_git", ret["target"]
)
)
if tokens:
contents = list(
self._cql_runner.skipped_content_get_from_token(tokens[0])
)
if contents:
content = contents[0]
for key in keys:
ret[key] = getattr(content, key)
return ret
def _directory_ls(
self, directory_id: Sha1Git, recursive: bool, prefix: bytes = b""
) -> Iterable[Dict[str, Any]]:
if self.directory_missing([directory_id]):
return
rows = list(self._cql_runner.directory_entry_get([directory_id]))
for row in rows:
entry_d = row.to_dict()
# Build and yield the directory entry dict
del entry_d["directory_id"]
entry = DirectoryEntry.from_dict(entry_d)
ret = self._join_dentry_to_content(entry)
ret["name"] = prefix + ret["name"]
ret["dir_id"] = directory_id
yield ret
if recursive and ret["type"] == "dir":
yield from self._directory_ls(
ret["target"], True, prefix + ret["name"] + b"/"
)
def directory_entry_get_by_path(
self, directory: Sha1Git, paths: List[bytes]
) -> Optional[Dict[str, Any]]:
return self._directory_entry_get_by_path(directory, paths, b"")
def _directory_entry_get_by_path(
self, directory: Sha1Git, paths: List[bytes], prefix: bytes
) -> Optional[Dict[str, Any]]:
if not paths:
return None
contents = list(self.directory_ls(directory))
if not contents:
return None
def _get_entry(entries, name):
"""Finds the entry with the requested name, prepends the
prefix (to get its full path), and returns it.
If no entry has that name, returns None."""
for entry in entries:
if entry["name"] == name:
entry = entry.copy()
entry["name"] = prefix + entry["name"]
return entry
first_item = _get_entry(contents, paths[0])
if len(paths) == 1:
return first_item
if not first_item or first_item["type"] != "dir":
return None
return self._directory_entry_get_by_path(
first_item["target"], paths[1:], prefix + paths[0] + b"/"
)
def directory_ls(
self, directory: Sha1Git, recursive: bool = False
) -> Iterable[Dict[str, Any]]:
yield from self._directory_ls(directory, recursive)
def directory_get_entries(
self,
directory_id: Sha1Git,
page_token: Optional[bytes] = None,
limit: int = 1000,
) -> Optional[PagedResult[DirectoryEntry]]:
if self.directory_missing([directory_id]):
return None
entries_from: bytes = page_token or b""
rows = self._cql_runner.directory_entry_get_from_name(
directory_id, entries_from, limit + 1
)
entries = [
DirectoryEntry.from_dict(remove_keys(row.to_dict(), ("directory_id",)))
for row in rows
]
if len(entries) > limit:
last_entry = entries.pop()
next_page_token = last_entry.name
else:
next_page_token = None
return PagedResult(results=entries, next_page_token=next_page_token)
def directory_get_random(self) -> Sha1Git:
directory = self._cql_runner.directory_get_random()
assert directory, "Could not find any directory"
return directory.id
def revision_add(self, revisions: List[Revision]) -> Dict[str, int]:
# Filter-out revisions already in the database
if not self._allow_overwrite:
to_add = {r.id: r for r in revisions}.values()
missing = self.revision_missing([rev.id for rev in to_add])
revisions = [rev for rev in revisions if rev.id in missing]
self.journal_writer.revision_add(revisions)
for revision in revisions:
revobject = converters.revision_to_db(revision)
if revobject:
# Add parents first
for (rank, parent) in enumerate(revision.parents):
self._cql_runner.revision_parent_add_one(
RevisionParentRow(
id=revobject.id, parent_rank=rank, parent_id=parent
)
)
# Then write the main revision row.
# Writing this after all parents were written ensures that
# read endpoints don't return a partial view while writing
# the parents
self._cql_runner.revision_add_one(revobject)
return {"revision:add": len(revisions)}
def revision_missing(self, revisions: List[Sha1Git]) -> Iterable[Sha1Git]:
return self._cql_runner.revision_missing(revisions)
def revision_get(self, revision_ids: List[Sha1Git]) -> List[Optional[Revision]]:
rows = self._cql_runner.revision_get(revision_ids)
revisions: Dict[Sha1Git, Revision] = {}
for row in rows:
# TODO: use a single query to get all parents?
# (it might have lower latency, but requires more code and more
# bandwidth, because revision id would be part of each returned
# row)
parents = tuple(self._cql_runner.revision_parent_get(row.id))
# parent_rank is the clustering key, so results are already
# sorted by rank.
rev = converters.revision_from_db(row, parents=parents)
revisions[rev.id] = rev
return [revisions.get(rev_id) for rev_id in revision_ids]
def _get_parent_revs(
self,
rev_ids: Iterable[Sha1Git],
seen: Set[Sha1Git],
limit: Optional[int],
short: bool,
) -> Union[
Iterable[Dict[str, Any]], Iterable[Tuple[Sha1Git, Tuple[Sha1Git, ...]]],
]:
if limit and len(seen) >= limit:
return
rev_ids = [id_ for id_ in rev_ids if id_ not in seen]
if not rev_ids:
return
seen |= set(rev_ids)
# We need this query, even if short=True, to return consistent
# results (ie. not return only a subset of a revision's parents
# if it is being written)
if short:
ids = self._cql_runner.revision_get_ids(rev_ids)
for id_ in ids:
# TODO: use a single query to get all parents?
# (it might have less latency, but requires less code and more
# bandwidth (because revision id would be part of each returned
# row)
parents = tuple(self._cql_runner.revision_parent_get(id_))
# parent_rank is the clustering key, so results are already
# sorted by rank.
yield (id_, parents)
yield from self._get_parent_revs(parents, seen, limit, short)
else:
rows = self._cql_runner.revision_get(rev_ids)
for row in rows:
# TODO: use a single query to get all parents?
# (it might have less latency, but requires less code and more
# bandwidth (because revision id would be part of each returned
# row)
parents = tuple(self._cql_runner.revision_parent_get(row.id))
# parent_rank is the clustering key, so results are already
# sorted by rank.
rev = converters.revision_from_db(row, parents=parents)
yield rev.to_dict()
yield from self._get_parent_revs(parents, seen, limit, short)
def revision_log(
self, revisions: List[Sha1Git], limit: Optional[int] = None
) -> Iterable[Optional[Dict[str, Any]]]:
seen: Set[Sha1Git] = set()
yield from self._get_parent_revs(revisions, seen, limit, False)
def revision_shortlog(
self, revisions: List[Sha1Git], limit: Optional[int] = None
) -> Iterable[Optional[Tuple[Sha1Git, Tuple[Sha1Git, ...]]]]:
seen: Set[Sha1Git] = set()
yield from self._get_parent_revs(revisions, seen, limit, True)
def revision_get_random(self) -> Sha1Git:
revision = self._cql_runner.revision_get_random()
assert revision, "Could not find any revision"
return revision.id
def release_add(self, releases: List[Release]) -> Dict[str, int]:
if not self._allow_overwrite:
to_add = {r.id: r for r in releases}.values()
missing = set(self.release_missing([rel.id for rel in to_add]))
releases = [rel for rel in to_add if rel.id in missing]
self.journal_writer.release_add(releases)
for release in releases:
if release:
self._cql_runner.release_add_one(converters.release_to_db(release))
return {"release:add": len(releases)}
def release_missing(self, releases: List[Sha1Git]) -> Iterable[Sha1Git]:
return self._cql_runner.release_missing(releases)
def release_get(self, releases: List[Sha1Git]) -> List[Optional[Release]]:
rows = self._cql_runner.release_get(releases)
rels: Dict[Sha1Git, Release] = {}
for row in rows:
release = converters.release_from_db(row)
rels[row.id] = release
return [rels.get(rel_id) for rel_id in releases]
def release_get_random(self) -> Sha1Git:
release = self._cql_runner.release_get_random()
assert release, "Could not find any release"
return release.id
def snapshot_add(self, snapshots: List[Snapshot]) -> Dict[str, int]:
if not self._allow_overwrite:
to_add = {s.id: s for s in snapshots}.values()
missing = self._cql_runner.snapshot_missing([snp.id for snp in to_add])
snapshots = [snp for snp in snapshots if snp.id in missing]
for snapshot in snapshots:
self.journal_writer.snapshot_add([snapshot])
# Add branches
for (branch_name, branch) in snapshot.branches.items():
if branch is None:
target_type: Optional[str] = None
target: Optional[bytes] = None
else:
target_type = branch.target_type.value
target = branch.target
self._cql_runner.snapshot_branch_add_one(
SnapshotBranchRow(
snapshot_id=snapshot.id,
name=branch_name,
target_type=target_type,
target=target,
)
)
# Add the snapshot *after* adding all the branches, so someone
# calling snapshot_get_branch in the meantime won't end up
# with half the branches.
self._cql_runner.snapshot_add_one(SnapshotRow(id=snapshot.id))
return {"snapshot:add": len(snapshots)}
def snapshot_missing(self, snapshots: List[Sha1Git]) -> Iterable[Sha1Git]:
return self._cql_runner.snapshot_missing(snapshots)
def snapshot_get(self, snapshot_id: Sha1Git) -> Optional[Dict[str, Any]]:
d = self.snapshot_get_branches(snapshot_id)
if d is None:
return None
return {
"id": d["id"],
"branches": {
name: branch.to_dict() if branch else None
for (name, branch) in d["branches"].items()
},
"next_branch": d["next_branch"],
}
def snapshot_count_branches(
self, snapshot_id: Sha1Git, branch_name_exclude_prefix: Optional[bytes] = None,
) -> Optional[Dict[Optional[str], int]]:
if self._cql_runner.snapshot_missing([snapshot_id]):
# Makes sure we don't fetch branches for a snapshot that is
# being added.
return None
return self._cql_runner.snapshot_count_branches(
snapshot_id, branch_name_exclude_prefix
)
def snapshot_get_branches(
self,
snapshot_id: Sha1Git,
branches_from: bytes = b"",
branches_count: int = 1000,
target_types: Optional[List[str]] = None,
branch_name_include_substring: Optional[bytes] = None,
branch_name_exclude_prefix: Optional[bytes] = None,
) -> Optional[PartialBranches]:
if self._cql_runner.snapshot_missing([snapshot_id]):
# Makes sure we don't fetch branches for a snapshot that is
# being added.
return None
branches: List = []
while len(branches) < branches_count + 1:
new_branches = list(
self._cql_runner.snapshot_branch_get(
snapshot_id,
branches_from,
branches_count + 1,
branch_name_exclude_prefix,
)
)
if not new_branches:
break
branches_from = new_branches[-1].name
new_branches_filtered = new_branches
# Filter by target_type
if target_types:
new_branches_filtered = [
branch
for branch in new_branches_filtered
if branch.target is not None and branch.target_type in target_types
]
# Filter by branches_name_pattern
if branch_name_include_substring:
new_branches_filtered = [
branch
for branch in new_branches_filtered
if branch.name is not None
and (
branch_name_include_substring is None
or branch_name_include_substring in branch.name
)
]
branches.extend(new_branches_filtered)
if len(new_branches) < branches_count + 1:
break
if len(branches) > branches_count:
last_branch = branches.pop(-1).name
else:
last_branch = None
return PartialBranches(
id=snapshot_id,
branches={
branch.name: None
if branch.target is None
else SnapshotBranch(
target=branch.target, target_type=TargetType(branch.target_type)
)
for branch in branches
},
next_branch=last_branch,
)
def snapshot_get_random(self) -> Sha1Git:
snapshot = self._cql_runner.snapshot_get_random()
assert snapshot, "Could not find any snapshot"
return snapshot.id
def object_find_by_sha1_git(self, ids: List[Sha1Git]) -> Dict[Sha1Git, List[Dict]]:
results: Dict[Sha1Git, List[Dict]] = {id_: [] for id_ in ids}
missing_ids = set(ids)
# Mind the order, revision is the most likely one for a given ID,
# so we check revisions first.
queries: List[Tuple[str, Callable[[List[Sha1Git]], List[Sha1Git]]]] = [
("revision", self._cql_runner.revision_missing),
("release", self._cql_runner.release_missing),
("content", self._cql_runner.content_missing_by_sha1_git),
("directory", self._cql_runner.directory_missing),
]
for (object_type, query_fn) in queries:
found_ids = missing_ids - set(query_fn(list(missing_ids)))
for sha1_git in found_ids:
results[sha1_git].append(
{"sha1_git": sha1_git, "type": object_type,}
)
missing_ids.remove(sha1_git)
if not missing_ids:
# We found everything, skipping the next queries.
break
return results
def origin_get(self, origins: List[str]) -> Iterable[Optional[Origin]]:
return [self.origin_get_one(origin) for origin in origins]
def origin_get_one(self, origin_url: str) -> Optional[Origin]:
"""Given an origin url, return the origin if it exists, None otherwise
"""
rows = list(self._cql_runner.origin_get_by_url(origin_url))
if rows:
assert len(rows) == 1
return Origin(url=rows[0].url)
else:
return None
def origin_get_by_sha1(self, sha1s: List[bytes]) -> List[Optional[Dict[str, Any]]]:
results = []
for sha1 in sha1s:
rows = list(self._cql_runner.origin_get_by_sha1(sha1))
origin = {"url": rows[0].url} if rows else None
results.append(origin)
return results
def origin_list(
self, page_token: Optional[str] = None, limit: int = 100
) -> PagedResult[Origin]:
# Compute what token to begin the listing from
start_token = TOKEN_BEGIN
if page_token:
start_token = int(page_token)
if not (TOKEN_BEGIN <= start_token <= TOKEN_END):
raise StorageArgumentException("Invalid page_token.")
next_page_token = None
origins = []
# Take one more origin so we can reuse it as the next page token if any
for (tok, row) in self._cql_runner.origin_list(start_token, limit + 1):
origins.append(Origin(url=row.url))
# keep reference of the last id for pagination purposes
last_id = tok
if len(origins) > limit:
# last origin id is the next page token
next_page_token = str(last_id)
# excluding that origin from the result to respect the limit size
origins = origins[:limit]
assert len(origins) <= limit
return PagedResult(results=origins, next_page_token=next_page_token)
def origin_search(
self,
url_pattern: str,
page_token: Optional[str] = None,
limit: int = 50,
regexp: bool = False,
with_visit: bool = False,
visit_types: Optional[List[str]] = None,
) -> PagedResult[Origin]:
# TODO: remove this endpoint, swh-search should be used instead.
next_page_token = None
offset = int(page_token) if page_token else 0
origin_rows = [row for row in self._cql_runner.origin_iter_all()]
if regexp:
pat = re.compile(url_pattern)
origin_rows = [row for row in origin_rows if pat.search(row.url)]
else:
origin_rows = [row for row in origin_rows if url_pattern in row.url]
if with_visit:
origin_rows = [row for row in origin_rows if row.next_visit_id > 1]
if visit_types:
def _has_visit_types(origin, visit_types):
for origin_visit in stream_results(self.origin_visit_get, origin):
if origin_visit.type in visit_types:
return True
return False
origin_rows = [
row for row in origin_rows if _has_visit_types(row.url, visit_types)
]
origins = [Origin(url=row.url) for row in origin_rows]
origins = origins[offset : offset + limit + 1]
if len(origins) > limit:
# next offset
next_page_token = str(offset + limit)
# excluding that origin from the result to respect the limit size
origins = origins[:limit]
assert len(origins) <= limit
return PagedResult(results=origins, next_page_token=next_page_token)
def origin_count(
self, url_pattern: str, regexp: bool = False, with_visit: bool = False
) -> int:
raise NotImplementedError(
"The Cassandra backend does not implement origin_count"
)
def origin_add(self, origins: List[Origin]) -> Dict[str, int]:
if not self._allow_overwrite:
to_add = {o.url: o for o in origins}.values()
origins = [ori for ori in to_add if self.origin_get_one(ori.url) is None]
self.journal_writer.origin_add(origins)
for origin in origins:
self._cql_runner.origin_add_one(
OriginRow(sha1=hash_url(origin.url), url=origin.url, next_visit_id=1)
)
return {"origin:add": len(origins)}
def origin_visit_add(self, visits: List[OriginVisit]) -> Iterable[OriginVisit]:
for visit in visits:
origin = self.origin_get_one(visit.origin)
if not origin: # Cannot add a visit without an origin
raise StorageArgumentException("Unknown origin %s", visit.origin)
all_visits = []
nb_visits = 0
for visit in visits:
nb_visits += 1
if not visit.visit:
visit_id = self._cql_runner.origin_generate_unique_visit_id(
visit.origin
)
visit = attr.evolve(visit, visit=visit_id)
self.journal_writer.origin_visit_add([visit])
self._cql_runner.origin_visit_add_one(OriginVisitRow(**visit.to_dict()))
assert visit.visit is not None
all_visits.append(visit)
self._origin_visit_status_add(
OriginVisitStatus(
origin=visit.origin,
visit=visit.visit,
date=visit.date,
type=visit.type,
status="created",
snapshot=None,
)
)
return all_visits
def _origin_visit_status_add(self, visit_status: OriginVisitStatus) -> None:
"""Add an origin visit status"""
if visit_status.type is None:
visit_row = self._cql_runner.origin_visit_get_one(
visit_status.origin, visit_status.visit
)
if visit_row is None:
raise StorageArgumentException(
f"Unknown origin visit {visit_status.visit} "
f"of origin {visit_status.origin}"
)
visit_status = attr.evolve(visit_status, type=visit_row.type)
self.journal_writer.origin_visit_status_add([visit_status])
self._cql_runner.origin_visit_status_add_one(
converters.visit_status_to_row(visit_status)
)
def origin_visit_status_add(
self, visit_statuses: List[OriginVisitStatus]
) -> Dict[str, int]:
# First round to check existence (fail early if any is ko)
for visit_status in visit_statuses:
origin_url = self.origin_get_one(visit_status.origin)
if not origin_url:
raise StorageArgumentException(f"Unknown origin {visit_status.origin}")
for visit_status in visit_statuses:
self._origin_visit_status_add(visit_status)
return {"origin_visit_status:add": len(visit_statuses)}
def _origin_visit_apply_status(
self, visit: Dict[str, Any], visit_status: OriginVisitStatusRow
) -> Dict[str, Any]:
"""Retrieve the latest visit status information for the origin visit.
Then merge it with the visit and return it.
"""
return {
# default to the values in visit
**visit,
# override with the last update
**visit_status.to_dict(),
# visit['origin'] is the URL (via a join), while
# visit_status['origin'] is only an id.
"origin": visit["origin"],
# but keep the date of the creation of the origin visit
"date": visit["date"],
# We use the visit type from origin visit
# if it's not present on the origin visit status
"type": visit_status.type or visit["type"],
}
def _origin_visit_get_latest_status(self, visit: OriginVisit) -> OriginVisitStatus:
"""Retrieve the latest visit status information for the origin visit object.
"""
assert visit.visit
row = self._cql_runner.origin_visit_status_get_latest(visit.origin, visit.visit)
assert row is not None
visit_status = converters.row_to_visit_status(row)
return attr.evolve(visit_status, origin=visit.origin)
@staticmethod
def _format_origin_visit_row(visit):
return {
**visit.to_dict(),
"origin": visit.origin,
"date": visit.date.replace(tzinfo=datetime.timezone.utc),
}
def origin_visit_get(
self,
origin: str,
page_token: Optional[str] = None,
order: ListOrder = ListOrder.ASC,
limit: int = 10,
) -> PagedResult[OriginVisit]:
if not isinstance(order, ListOrder):
raise StorageArgumentException("order must be a ListOrder value")
if page_token and not isinstance(page_token, str):
raise StorageArgumentException("page_token must be a string.")
next_page_token = None
visit_from = None if page_token is None else int(page_token)
visits: List[OriginVisit] = []
extra_limit = limit + 1
rows = self._cql_runner.origin_visit_get(origin, visit_from, extra_limit, order)
for row in rows:
visits.append(converters.row_to_visit(row))
assert len(visits) <= extra_limit
if len(visits) == extra_limit:
visits = visits[:limit]
next_page_token = str(visits[-1].visit)
return PagedResult(results=visits, next_page_token=next_page_token)
def origin_visit_status_get(
self,
origin: str,
visit: int,
page_token: Optional[str] = None,
order: ListOrder = ListOrder.ASC,
limit: int = 10,
) -> PagedResult[OriginVisitStatus]:
next_page_token = None
date_from = None
if page_token is not None:
date_from = datetime.datetime.fromisoformat(page_token)
# Take one more visit status so we can reuse it as the next page token if any
rows = self._cql_runner.origin_visit_status_get_range(
origin, visit, date_from, limit + 1, order
)
visit_statuses = [converters.row_to_visit_status(row) for row in rows]
if len(visit_statuses) > limit:
# last visit status date is the next page token
next_page_token = str(visit_statuses[-1].date)
# excluding that visit status from the result to respect the limit size
visit_statuses = visit_statuses[:limit]
return PagedResult(results=visit_statuses, next_page_token=next_page_token)
def origin_visit_find_by_date(
self, origin: str, visit_date: datetime.datetime
) -> Optional[OriginVisit]:
# Iterator over all the visits of the origin
# This should be ok for now, as there aren't too many visits
# per origin.
rows = list(self._cql_runner.origin_visit_get_all(origin))
def key(visit):
dt = visit.date.replace(tzinfo=datetime.timezone.utc) - visit_date
return (abs(dt), -visit.visit)
if rows:
return converters.row_to_visit(min(rows, key=key))
return None
def origin_visit_get_by(self, origin: str, visit: int) -> Optional[OriginVisit]:
row = self._cql_runner.origin_visit_get_one(origin, visit)
if row:
return converters.row_to_visit(row)
return None
def origin_visit_get_latest(
self,
origin: str,
type: Optional[str] = None,
allowed_statuses: Optional[List[str]] = None,
require_snapshot: bool = False,
) -> Optional[OriginVisit]:
if allowed_statuses and not set(allowed_statuses).intersection(VISIT_STATUSES):
raise StorageArgumentException(
f"Unknown allowed statuses {','.join(allowed_statuses)}, only "
f"{','.join(VISIT_STATUSES)} authorized"
)
# TODO: Do not fetch all visits
rows = self._cql_runner.origin_visit_get_all(origin)
latest_visit = None
for row in rows:
visit = self._format_origin_visit_row(row)
for status_row in self._cql_runner.origin_visit_status_get(
origin, visit["visit"]
):
updated_visit = self._origin_visit_apply_status(visit, status_row)
if type is not None and updated_visit["type"] != type:
continue
if allowed_statuses and updated_visit["status"] not in allowed_statuses:
continue
if require_snapshot and updated_visit["snapshot"] is None:
continue
# updated_visit is a candidate
if latest_visit is not None:
if updated_visit["date"] < latest_visit["date"]:
continue
if updated_visit["visit"] < latest_visit["visit"]:
continue
latest_visit = updated_visit
if latest_visit is None:
return None
return OriginVisit(
origin=latest_visit["origin"],
visit=latest_visit["visit"],
date=latest_visit["date"],
type=latest_visit["type"],
)
def origin_visit_status_get_latest(
self,
origin_url: str,
visit: int,
allowed_statuses: Optional[List[str]] = None,
require_snapshot: bool = False,
) -> Optional[OriginVisitStatus]:
if allowed_statuses and not set(allowed_statuses).intersection(VISIT_STATUSES):
raise StorageArgumentException(
f"Unknown allowed statuses {','.join(allowed_statuses)}, only "
f"{','.join(VISIT_STATUSES)} authorized"
)
rows = list(self._cql_runner.origin_visit_status_get(origin_url, visit))
# filtering is done python side as we cannot do it server side
if allowed_statuses:
rows = [row for row in rows if row.status in allowed_statuses]
if require_snapshot:
rows = [row for row in rows if row.snapshot is not None]
if not rows:
return None
return converters.row_to_visit_status(rows[0])
def origin_visit_status_get_random(self, type: str) -> Optional[OriginVisitStatus]:
back_in_the_day = now() - datetime.timedelta(weeks=12) # 3 months back
# Random position to start iteration at
start_token = random.randint(TOKEN_BEGIN, TOKEN_END)
# Iterator over all visits, ordered by token(origins) then visit_id
rows = self._cql_runner.origin_visit_iter(start_token)
for row in rows:
visit = converters.row_to_visit(row)
visit_status = self._origin_visit_get_latest_status(visit)
if visit.date > back_in_the_day and visit_status.status == "full":
return visit_status
return None
def stat_counters(self):
rows = self._cql_runner.stat_counters()
keys = (
"content",
"directory",
"origin",
"origin_visit",
"release",
"revision",
"skipped_content",
"snapshot",
)
stats = {key: 0 for key in keys}
stats.update({row.object_type: row.count for row in rows})
return stats
def refresh_stat_counters(self):
pass
def raw_extrinsic_metadata_add(
self, metadata: List[RawExtrinsicMetadata]
) -> Dict[str, int]:
self.journal_writer.raw_extrinsic_metadata_add(metadata)
counter = Counter[ExtendedObjectType]()
for metadata_entry in metadata:
if not self._cql_runner.metadata_authority_get(
metadata_entry.authority.type.value, metadata_entry.authority.url
):
raise StorageArgumentException(
f"Unknown authority {metadata_entry.authority}"
)
if not self._cql_runner.metadata_fetcher_get(
metadata_entry.fetcher.name, metadata_entry.fetcher.version
):
raise StorageArgumentException(
f"Unknown fetcher {metadata_entry.fetcher}"
)
try:
row = RawExtrinsicMetadataRow(
id=metadata_entry.id,
type=metadata_entry.target.object_type.name.lower(),
target=str(metadata_entry.target),
authority_type=metadata_entry.authority.type.value,
authority_url=metadata_entry.authority.url,
discovery_date=metadata_entry.discovery_date,
fetcher_name=metadata_entry.fetcher.name,
fetcher_version=metadata_entry.fetcher.version,
format=metadata_entry.format,
metadata=metadata_entry.metadata,
origin=metadata_entry.origin,
visit=metadata_entry.visit,
snapshot=map_optional(str, metadata_entry.snapshot),
release=map_optional(str, metadata_entry.release),
revision=map_optional(str, metadata_entry.revision),
path=metadata_entry.path,
directory=map_optional(str, metadata_entry.directory),
)
except TypeError as e:
raise StorageArgumentException(*e.args)
# Add to the index first
self._cql_runner.raw_extrinsic_metadata_by_id_add(
RawExtrinsicMetadataByIdRow(
id=row.id,
target=row.target,
authority_type=row.authority_type,
authority_url=row.authority_url,
)
)
# Then to the main table
self._cql_runner.raw_extrinsic_metadata_add(row)
counter[metadata_entry.target.object_type] += 1
return {
f"{type.value}_metadata:add": count for (type, count) in counter.items()
}
def raw_extrinsic_metadata_get(
self,
target: ExtendedSWHID,
authority: MetadataAuthority,
after: Optional[datetime.datetime] = None,
page_token: Optional[bytes] = None,
limit: int = 1000,
) -> PagedResult[RawExtrinsicMetadata]:
if page_token is not None:
(after_date, id_) = msgpack_loads(base64.b64decode(page_token))
if after and after_date < after:
raise StorageArgumentException(
"page_token is inconsistent with the value of 'after'."
)
entries = self._cql_runner.raw_extrinsic_metadata_get_after_date_and_id(
str(target), authority.type.value, authority.url, after_date, id_,
)
elif after is not None:
entries = self._cql_runner.raw_extrinsic_metadata_get_after_date(
str(target), authority.type.value, authority.url, after
)
else:
entries = self._cql_runner.raw_extrinsic_metadata_get(
str(target), authority.type.value, authority.url
)
if limit:
entries = itertools.islice(entries, 0, limit + 1)
results = []
for entry in entries:
assert str(target) == entry.target
results.append(converters.row_to_raw_extrinsic_metadata(entry))
if len(results) > limit:
results.pop()
assert len(results) == limit
last_result = results[-1]
next_page_token: Optional[str] = base64.b64encode(
msgpack_dumps((last_result.discovery_date, last_result.id,))
).decode()
else:
next_page_token = None
return PagedResult(next_page_token=next_page_token, results=results,)
def raw_extrinsic_metadata_get_by_ids(
self, ids: List[Sha1Git]
) -> List[RawExtrinsicMetadata]:
keys = self._cql_runner.raw_extrinsic_metadata_get_by_ids(ids)
results: Set[RawExtrinsicMetadata] = set()
for key in keys:
candidates = self._cql_runner.raw_extrinsic_metadata_get(
key.target, key.authority_type, key.authority_url
)
candidates = [
candidate for candidate in candidates if candidate.id == key.id
]
if len(candidates) > 1:
raise Exception(
"Found multiple RawExtrinsicMetadata objects with the same id: "
+ hash_to_hex(key.id)
)
results.update(map(converters.row_to_raw_extrinsic_metadata, candidates))
return list(results)
def raw_extrinsic_metadata_get_authorities(
self, target: ExtendedSWHID
) -> List[MetadataAuthority]:
return [
MetadataAuthority(
type=MetadataAuthorityType(authority_type), url=authority_url
)
for (authority_type, authority_url) in set(
self._cql_runner.raw_extrinsic_metadata_get_authorities(str(target))
)
]
def metadata_fetcher_add(self, fetchers: List[MetadataFetcher]) -> Dict[str, int]:
self.journal_writer.metadata_fetcher_add(fetchers)
for fetcher in fetchers:
self._cql_runner.metadata_fetcher_add(
MetadataFetcherRow(name=fetcher.name, version=fetcher.version,)
)
return {"metadata_fetcher:add": len(fetchers)}
def metadata_fetcher_get(
self, name: str, version: str
) -> Optional[MetadataFetcher]:
fetcher = self._cql_runner.metadata_fetcher_get(name, version)
if fetcher:
return MetadataFetcher(name=fetcher.name, version=fetcher.version,)
else:
return None
def metadata_authority_add(
self, authorities: List[MetadataAuthority]
) -> Dict[str, int]:
self.journal_writer.metadata_authority_add(authorities)
for authority in authorities:
self._cql_runner.metadata_authority_add(
MetadataAuthorityRow(url=authority.url, type=authority.type.value,)
)
return {"metadata_authority:add": len(authorities)}
def metadata_authority_get(
self, type: MetadataAuthorityType, url: str
) -> Optional[MetadataAuthority]:
authority = self._cql_runner.metadata_authority_get(type.value, url)
if authority:
return MetadataAuthority(
type=MetadataAuthorityType(authority.type), url=authority.url,
)
else:
return None
# ExtID tables
def extid_add(self, ids: List[ExtID]) -> Dict[str, int]:
if not self._allow_overwrite:
extids = [
extid
for extid in ids
if not self._cql_runner.extid_get_from_pk(
- extid_type=extid.extid_type, extid=extid.extid, target=extid.target,
+ extid_type=extid.extid_type,
+ extid_version=extid.extid_version,
+ extid=extid.extid,
+ target=extid.target,
)
]
else:
extids = list(ids)
self.journal_writer.extid_add(extids)
inserted = 0
for extid in extids:
target_type = extid.target.object_type.value
target = extid.target.object_id
extidrow = ExtIDRow(
extid_type=extid.extid_type,
+ extid_version=extid.extid_version,
extid=extid.extid,
target_type=target_type,
target=target,
)
(token, insertion_finalizer) = self._cql_runner.extid_add_prepare(extidrow)
indexrow = ExtIDByTargetRow(
target_type=target_type, target=target, target_token=token,
)
self._cql_runner.extid_index_add_one(indexrow)
insertion_finalizer()
inserted += 1
return {"extid:add": inserted}
def extid_get_from_extid(self, id_type: str, ids: List[bytes]) -> List[ExtID]:
result: List[ExtID] = []
for extid in ids:
extidrows = list(self._cql_runner.extid_get_from_extid(id_type, extid))
result.extend(
ExtID(
extid_type=extidrow.extid_type,
+ extid_version=extidrow.extid_version,
extid=extidrow.extid,
target=CoreSWHID(
object_type=extidrow.target_type, object_id=extidrow.target,
),
)
for extidrow in extidrows
)
return result
def extid_get_from_target(
self, target_type: SwhidObjectType, ids: List[Sha1Git]
) -> List[ExtID]:
result: List[ExtID] = []
for target in ids:
extidrows = list(
self._cql_runner.extid_get_from_target(target_type.value, target)
)
result.extend(
ExtID(
extid_type=extidrow.extid_type,
+ extid_version=extidrow.extid_version,
extid=extidrow.extid,
target=CoreSWHID(
object_type=SwhidObjectType(extidrow.target_type),
object_id=extidrow.target,
),
)
for extidrow in extidrows
)
return result
# Misc
def clear_buffers(self, object_types: Sequence[str] = ()) -> None:
"""Do nothing
"""
return None
def flush(self, object_types: Sequence[str] = ()) -> Dict[str, int]:
return {}
diff --git a/swh/storage/in_memory.py b/swh/storage/in_memory.py
index bf3ab717..284534e2 100644
--- a/swh/storage/in_memory.py
+++ b/swh/storage/in_memory.py
@@ -1,726 +1,727 @@
# Copyright (C) 2015-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
from collections import defaultdict
import datetime
import functools
import itertools
import random
from typing import (
Any,
Dict,
Generic,
Iterable,
Iterator,
List,
Optional,
Tuple,
Type,
TypeVar,
Union,
)
from swh.model.identifiers import ExtendedSWHID
from swh.model.model import Content, Sha1Git, SkippedContent
from swh.storage.cassandra import CassandraStorage
from swh.storage.cassandra.model import (
BaseRow,
ContentRow,
DirectoryEntryRow,
DirectoryRow,
ExtIDByTargetRow,
ExtIDRow,
MetadataAuthorityRow,
MetadataFetcherRow,
ObjectCountRow,
OriginRow,
OriginVisitRow,
OriginVisitStatusRow,
RawExtrinsicMetadataByIdRow,
RawExtrinsicMetadataRow,
ReleaseRow,
RevisionParentRow,
RevisionRow,
SkippedContentRow,
SnapshotBranchRow,
SnapshotRow,
)
from swh.storage.interface import ListOrder
from swh.storage.objstorage import ObjStorage
from .common import origin_url_to_sha1
from .writer import JournalWriter
TRow = TypeVar("TRow", bound=BaseRow)
class Table(Generic[TRow]):
def __init__(self, row_class: Type[TRow]):
self.row_class = row_class
self.primary_key_cols = row_class.PARTITION_KEY + row_class.CLUSTERING_KEY
# Map from tokens to clustering keys to rows
# These are not actually partitions (or rather, there is one partition
# for each token) and they aren't sorted.
# But it is good enough if we don't care about performance;
# and makes the code a lot simpler.
self.data: Dict[int, Dict[Tuple, TRow]] = defaultdict(dict)
def __repr__(self):
return f"<__module__.Table[{self.row_class.__name__}] object>"
def partition_key(self, row: Union[TRow, Dict[str, Any]]) -> Tuple:
"""Returns the partition key of a row (ie. the cells which get hashed
into the token."""
if isinstance(row, dict):
row_d = row
else:
row_d = row.to_dict()
return tuple(row_d[col] for col in self.row_class.PARTITION_KEY)
def clustering_key(self, row: Union[TRow, Dict[str, Any]]) -> Tuple:
"""Returns the clustering key of a row (ie. the cells which are used
for sorting rows within a partition."""
if isinstance(row, dict):
row_d = row
else:
row_d = row.to_dict()
return tuple(row_d[col] for col in self.row_class.CLUSTERING_KEY)
def primary_key(self, row):
return self.partition_key(row) + self.clustering_key(row)
def primary_key_from_dict(self, d: Dict[str, Any]) -> Tuple:
"""Returns the primary key (ie. concatenation of partition key and
clustering key) of the given dictionary interpreted as a row."""
return tuple(d[col] for col in self.primary_key_cols)
def token(self, key: Tuple):
"""Returns the token of a row (ie. the hash of its partition key)."""
return hash(key)
def get_partition(self, token: int) -> Dict[Tuple, TRow]:
"""Returns the partition that contains this token."""
return self.data[token]
def insert(self, row: TRow):
partition = self.data[self.token(self.partition_key(row))]
partition[self.clustering_key(row)] = row
def split_primary_key(self, key: Tuple) -> Tuple[Tuple, Tuple]:
"""Returns (partition_key, clustering_key) from a partition key"""
assert len(key) == len(self.primary_key_cols)
partition_key = key[0 : len(self.row_class.PARTITION_KEY)]
clustering_key = key[len(self.row_class.PARTITION_KEY) :]
return (partition_key, clustering_key)
def get_from_partition_key(self, partition_key: Tuple) -> Iterable[TRow]:
"""Returns at most one row, from its partition key."""
token = self.token(partition_key)
for row in self.get_from_token(token):
if self.partition_key(row) == partition_key:
yield row
def get_from_primary_key(self, primary_key: Tuple) -> Optional[TRow]:
"""Returns at most one row, from its primary key."""
(partition_key, clustering_key) = self.split_primary_key(primary_key)
token = self.token(partition_key)
partition = self.get_partition(token)
return partition.get(clustering_key)
def get_from_token(self, token: int) -> Iterable[TRow]:
"""Returns all rows whose token (ie. non-cryptographic hash of the
partition key) is the one passed as argument."""
return (v for (k, v) in sorted(self.get_partition(token).items()))
def iter_all(self) -> Iterator[Tuple[Tuple, TRow]]:
return (
(self.primary_key(row), row)
for (token, partition) in self.data.items()
for (clustering_key, row) in partition.items()
)
def get_random(self) -> Optional[TRow]:
return random.choice([row for (pk, row) in self.iter_all()])
class InMemoryCqlRunner:
def __init__(self):
self._contents = Table(ContentRow)
self._content_indexes = defaultdict(lambda: defaultdict(set))
self._skipped_contents = Table(ContentRow)
self._skipped_content_indexes = defaultdict(lambda: defaultdict(set))
self._directories = Table(DirectoryRow)
self._directory_entries = Table(DirectoryEntryRow)
self._revisions = Table(RevisionRow)
self._revision_parents = Table(RevisionParentRow)
self._releases = Table(ReleaseRow)
self._snapshots = Table(SnapshotRow)
self._snapshot_branches = Table(SnapshotBranchRow)
self._origins = Table(OriginRow)
self._origin_visits = Table(OriginVisitRow)
self._origin_visit_statuses = Table(OriginVisitStatusRow)
self._metadata_authorities = Table(MetadataAuthorityRow)
self._metadata_fetchers = Table(MetadataFetcherRow)
self._raw_extrinsic_metadata = Table(RawExtrinsicMetadataRow)
self._raw_extrinsic_metadata_by_id = Table(RawExtrinsicMetadataByIdRow)
self._extid = Table(ExtIDRow)
self._stat_counters = defaultdict(int)
def increment_counter(self, object_type: str, nb: int):
self._stat_counters[object_type] += nb
def stat_counters(self) -> Iterable[ObjectCountRow]:
for (object_type, count) in self._stat_counters.items():
yield ObjectCountRow(partition_key=0, object_type=object_type, count=count)
##########################
# 'content' table
##########################
def _content_add_finalize(self, content: ContentRow) -> None:
self._contents.insert(content)
self.increment_counter("content", 1)
def content_add_prepare(self, content: ContentRow):
finalizer = functools.partial(self._content_add_finalize, content)
return (self._contents.token(self._contents.partition_key(content)), finalizer)
def content_get_from_pk(
self, content_hashes: Dict[str, bytes]
) -> Optional[ContentRow]:
primary_key = self._contents.primary_key_from_dict(content_hashes)
return self._contents.get_from_primary_key(primary_key)
def content_get_from_token(self, token: int) -> Iterable[ContentRow]:
return self._contents.get_from_token(token)
def content_get_random(self) -> Optional[ContentRow]:
return self._contents.get_random()
def content_get_token_range(
self, start: int, end: int, limit: int,
) -> Iterable[Tuple[int, ContentRow]]:
matches = [
(token, row)
for (token, partition) in self._contents.data.items()
for (clustering_key, row) in partition.items()
if start <= token <= end
]
matches.sort()
return matches[0:limit]
##########################
# 'content_by_*' tables
##########################
def content_missing_by_sha1_git(self, ids: List[bytes]) -> List[bytes]:
missing = []
for id_ in ids:
if id_ not in self._content_indexes["sha1_git"]:
missing.append(id_)
return missing
def content_index_add_one(self, algo: str, content: Content, token: int) -> None:
self._content_indexes[algo][content.get_hash(algo)].add(token)
def content_get_tokens_from_single_hash(
self, algo: str, hash_: bytes
) -> Iterable[int]:
return self._content_indexes[algo][hash_]
##########################
# 'skipped_content' table
##########################
def _skipped_content_add_finalize(self, content: SkippedContentRow) -> None:
self._skipped_contents.insert(content)
self.increment_counter("skipped_content", 1)
def skipped_content_add_prepare(self, content: SkippedContentRow):
finalizer = functools.partial(self._skipped_content_add_finalize, content)
return (
self._skipped_contents.token(self._contents.partition_key(content)),
finalizer,
)
def skipped_content_get_from_pk(
self, content_hashes: Dict[str, bytes]
) -> Optional[SkippedContentRow]:
primary_key = self._skipped_contents.primary_key_from_dict(content_hashes)
return self._skipped_contents.get_from_primary_key(primary_key)
def skipped_content_get_from_token(self, token: int) -> Iterable[SkippedContentRow]:
return self._skipped_contents.get_from_token(token)
##########################
# 'skipped_content_by_*' tables
##########################
def skipped_content_index_add_one(
self, algo: str, content: SkippedContent, token: int
) -> None:
self._skipped_content_indexes[algo][content.get_hash(algo)].add(token)
def skipped_content_get_tokens_from_single_hash(
self, algo: str, hash_: bytes
) -> Iterable[int]:
return self._skipped_content_indexes[algo][hash_]
##########################
# 'directory' table
##########################
def directory_missing(self, ids: List[bytes]) -> List[bytes]:
missing = []
for id_ in ids:
if self._directories.get_from_primary_key((id_,)) is None:
missing.append(id_)
return missing
def directory_add_one(self, directory: DirectoryRow) -> None:
self._directories.insert(directory)
self.increment_counter("directory", 1)
def directory_get_random(self) -> Optional[DirectoryRow]:
return self._directories.get_random()
##########################
# 'directory_entry' table
##########################
def directory_entry_add_one(self, entry: DirectoryEntryRow) -> None:
self._directory_entries.insert(entry)
def directory_entry_get(
self, directory_ids: List[Sha1Git]
) -> Iterable[DirectoryEntryRow]:
for id_ in directory_ids:
yield from self._directory_entries.get_from_partition_key((id_,))
def directory_entry_get_from_name(
self, directory_id: Sha1Git, from_: bytes, limit: int
) -> Iterable[DirectoryEntryRow]:
# Get all entries
entries = self._directory_entries.get_from_partition_key((directory_id,))
# Filter out the ones before from_
entries = itertools.dropwhile(lambda entry: entry.name < from_, entries)
# Apply limit
return itertools.islice(entries, limit)
##########################
# 'revision' table
##########################
def revision_missing(self, ids: List[bytes]) -> Iterable[bytes]:
missing = []
for id_ in ids:
if self._revisions.get_from_primary_key((id_,)) is None:
missing.append(id_)
return missing
def revision_add_one(self, revision: RevisionRow) -> None:
self._revisions.insert(revision)
self.increment_counter("revision", 1)
def revision_get_ids(self, revision_ids) -> Iterable[int]:
for id_ in revision_ids:
if self._revisions.get_from_primary_key((id_,)) is not None:
yield id_
def revision_get(self, revision_ids: List[Sha1Git]) -> Iterable[RevisionRow]:
for id_ in revision_ids:
row = self._revisions.get_from_primary_key((id_,))
if row:
yield row
def revision_get_random(self) -> Optional[RevisionRow]:
return self._revisions.get_random()
##########################
# 'revision_parent' table
##########################
def revision_parent_add_one(self, revision_parent: RevisionParentRow) -> None:
self._revision_parents.insert(revision_parent)
def revision_parent_get(self, revision_id: Sha1Git) -> Iterable[bytes]:
for parent in self._revision_parents.get_from_partition_key((revision_id,)):
yield parent.parent_id
##########################
# 'release' table
##########################
def release_missing(self, ids: List[bytes]) -> List[bytes]:
missing = []
for id_ in ids:
if self._releases.get_from_primary_key((id_,)) is None:
missing.append(id_)
return missing
def release_add_one(self, release: ReleaseRow) -> None:
self._releases.insert(release)
self.increment_counter("release", 1)
def release_get(self, release_ids: List[str]) -> Iterable[ReleaseRow]:
for id_ in release_ids:
row = self._releases.get_from_primary_key((id_,))
if row:
yield row
def release_get_random(self) -> Optional[ReleaseRow]:
return self._releases.get_random()
##########################
# 'snapshot' table
##########################
def snapshot_missing(self, ids: List[bytes]) -> List[bytes]:
missing = []
for id_ in ids:
if self._snapshots.get_from_primary_key((id_,)) is None:
missing.append(id_)
return missing
def snapshot_add_one(self, snapshot: SnapshotRow) -> None:
self._snapshots.insert(snapshot)
self.increment_counter("snapshot", 1)
def snapshot_get_random(self) -> Optional[SnapshotRow]:
return self._snapshots.get_random()
##########################
# 'snapshot_branch' table
##########################
def snapshot_branch_add_one(self, branch: SnapshotBranchRow) -> None:
self._snapshot_branches.insert(branch)
def snapshot_count_branches(
self, snapshot_id: Sha1Git, branch_name_exclude_prefix: Optional[bytes] = None,
) -> Dict[Optional[str], int]:
"""Returns a dictionary from type names to the number of branches
of that type."""
counts: Dict[Optional[str], int] = defaultdict(int)
for branch in self._snapshot_branches.get_from_partition_key((snapshot_id,)):
if branch_name_exclude_prefix and branch.name.startswith(
branch_name_exclude_prefix
):
continue
if branch.target_type is None:
target_type = None
else:
target_type = branch.target_type
counts[target_type] += 1
return counts
def snapshot_branch_get(
self,
snapshot_id: Sha1Git,
from_: bytes,
limit: int,
branch_name_exclude_prefix: Optional[bytes] = None,
) -> Iterable[SnapshotBranchRow]:
count = 0
for branch in self._snapshot_branches.get_from_partition_key((snapshot_id,)):
prefix = branch_name_exclude_prefix
if branch.name >= from_ and (
prefix is None or not branch.name.startswith(prefix)
):
count += 1
yield branch
if count >= limit:
break
##########################
# 'origin' table
##########################
def origin_add_one(self, origin: OriginRow) -> None:
self._origins.insert(origin)
self.increment_counter("origin", 1)
def origin_get_by_sha1(self, sha1: bytes) -> Iterable[OriginRow]:
return self._origins.get_from_partition_key((sha1,))
def origin_get_by_url(self, url: str) -> Iterable[OriginRow]:
return self.origin_get_by_sha1(origin_url_to_sha1(url))
def origin_list(
self, start_token: int, limit: int
) -> Iterable[Tuple[int, OriginRow]]:
"""Returns an iterable of (token, origin)"""
matches = [
(token, row)
for (token, partition) in self._origins.data.items()
for (clustering_key, row) in partition.items()
if token >= start_token
]
matches.sort()
return matches[0:limit]
def origin_iter_all(self) -> Iterable[OriginRow]:
return (
row
for (token, partition) in self._origins.data.items()
for (clustering_key, row) in partition.items()
)
def origin_generate_unique_visit_id(self, origin_url: str) -> int:
origin = list(self.origin_get_by_url(origin_url))[0]
visit_id = origin.next_visit_id
origin.next_visit_id += 1
return visit_id
##########################
# 'origin_visit' table
##########################
def origin_visit_get(
self, origin_url: str, last_visit: Optional[int], limit: int, order: ListOrder,
) -> Iterable[OriginVisitRow]:
visits = list(self._origin_visits.get_from_partition_key((origin_url,)))
if last_visit is not None:
if order == ListOrder.ASC:
visits = [v for v in visits if v.visit > last_visit]
else:
visits = [v for v in visits if v.visit < last_visit]
visits.sort(key=lambda v: v.visit, reverse=order == ListOrder.DESC)
visits = visits[0:limit]
return visits
def origin_visit_add_one(self, visit: OriginVisitRow) -> None:
self._origin_visits.insert(visit)
self.increment_counter("origin_visit", 1)
def origin_visit_get_one(
self, origin_url: str, visit_id: int
) -> Optional[OriginVisitRow]:
return self._origin_visits.get_from_primary_key((origin_url, visit_id))
def origin_visit_get_all(self, origin_url: str) -> Iterable[OriginVisitRow]:
return self._origin_visits.get_from_partition_key((origin_url,))
def origin_visit_iter(self, start_token: int) -> Iterator[OriginVisitRow]:
"""Returns all origin visits in order from this token,
and wraps around the token space."""
return (
row
for (token, partition) in self._origin_visits.data.items()
for (clustering_key, row) in partition.items()
)
##########################
# 'origin_visit_status' table
##########################
def origin_visit_status_get_range(
self,
origin: str,
visit: int,
date_from: Optional[datetime.datetime],
limit: int,
order: ListOrder,
) -> Iterable[OriginVisitStatusRow]:
statuses = list(self.origin_visit_status_get(origin, visit))
if date_from is not None:
if order == ListOrder.ASC:
statuses = [s for s in statuses if s.date >= date_from]
else:
statuses = [s for s in statuses if s.date <= date_from]
statuses.sort(key=lambda s: s.date, reverse=order == ListOrder.DESC)
return statuses[0:limit]
def origin_visit_status_add_one(self, visit_update: OriginVisitStatusRow) -> None:
self._origin_visit_statuses.insert(visit_update)
self.increment_counter("origin_visit_status", 1)
def origin_visit_status_get_latest(
self, origin: str, visit: int,
) -> Optional[OriginVisitStatusRow]:
"""Given an origin visit id, return its latest origin_visit_status
"""
return next(self.origin_visit_status_get(origin, visit), None)
def origin_visit_status_get(
self, origin: str, visit: int,
) -> Iterator[OriginVisitStatusRow]:
"""Return all origin visit statuses for a given visit
"""
statuses = [
s
for s in self._origin_visit_statuses.get_from_partition_key((origin,))
if s.visit == visit
]
statuses.sort(key=lambda s: s.date, reverse=True)
return iter(statuses)
##########################
# 'metadata_authority' table
##########################
def metadata_authority_add(self, authority: MetadataAuthorityRow):
self._metadata_authorities.insert(authority)
self.increment_counter("metadata_authority", 1)
def metadata_authority_get(self, type, url) -> Optional[MetadataAuthorityRow]:
return self._metadata_authorities.get_from_primary_key((url, type))
##########################
# 'metadata_fetcher' table
##########################
def metadata_fetcher_add(self, fetcher: MetadataFetcherRow):
self._metadata_fetchers.insert(fetcher)
self.increment_counter("metadata_fetcher", 1)
def metadata_fetcher_get(self, name, version) -> Optional[MetadataAuthorityRow]:
return self._metadata_fetchers.get_from_primary_key((name, version))
#########################
# 'raw_extrinsic_metadata_by_id' table
#########################
def raw_extrinsic_metadata_by_id_add(
self, row: RawExtrinsicMetadataByIdRow
) -> None:
self._raw_extrinsic_metadata_by_id.insert(row)
def raw_extrinsic_metadata_get_by_ids(
self, ids
) -> List[RawExtrinsicMetadataByIdRow]:
results = []
for id_ in ids:
result = self._raw_extrinsic_metadata_by_id.get_from_primary_key((id_,))
if result:
results.append(result)
return results
#########################
# 'raw_extrinsic_metadata' table
#########################
def raw_extrinsic_metadata_add(self, raw_extrinsic_metadata):
self._raw_extrinsic_metadata.insert(raw_extrinsic_metadata)
self.increment_counter("raw_extrinsic_metadata", 1)
def raw_extrinsic_metadata_get_after_date(
self,
target: str,
authority_type: str,
authority_url: str,
after: datetime.datetime,
) -> Iterable[RawExtrinsicMetadataRow]:
metadata = self.raw_extrinsic_metadata_get(
target, authority_type, authority_url
)
return (m for m in metadata if m.discovery_date > after)
def raw_extrinsic_metadata_get_after_date_and_id(
self,
target: str,
authority_type: str,
authority_url: str,
after_date: datetime.datetime,
after_id: bytes,
) -> Iterable[RawExtrinsicMetadataRow]:
metadata = self._raw_extrinsic_metadata.get_from_partition_key((target,))
after_tuple = (after_date, after_id)
return (
m
for m in metadata
if m.authority_type == authority_type
and m.authority_url == authority_url
and (m.discovery_date, m.id) > after_tuple
)
def raw_extrinsic_metadata_get(
self, target: str, authority_type: str, authority_url: str
) -> Iterable[RawExtrinsicMetadataRow]:
metadata = self._raw_extrinsic_metadata.get_from_partition_key((target,))
return (
m
for m in metadata
if m.authority_type == authority_type and m.authority_url == authority_url
)
def raw_extrinsic_metadata_get_authorities(
self, target: str
) -> Iterable[Tuple[str, str]]:
metadata = self._raw_extrinsic_metadata.get_from_partition_key((target,))
return ((m.authority_type, m.authority_url) for m in metadata)
#########################
# 'extid' table
#########################
def _extid_add_finalize(self, extid: ExtIDRow) -> None:
self._extid.insert(extid)
self.increment_counter("extid", 1)
def extid_add_prepare(self, extid: ExtIDRow):
finalizer = functools.partial(self._extid_add_finalize, extid)
return (self._extid.token(self._extid.partition_key(extid)), finalizer)
def extid_index_add_one(self, row: ExtIDByTargetRow) -> None:
pass
def extid_get_from_pk(
- self, extid_type: str, extid: bytes, target: ExtendedSWHID,
+ self, extid_type: str, extid: bytes, extid_version: int, target: ExtendedSWHID,
) -> Optional[ExtIDRow]:
primary_key = self._extid.primary_key_from_dict(
dict(
extid_type=extid_type,
extid=extid,
+ extid_version=extid_version,
target_type=target.object_type.value,
target=target.object_id,
)
)
return self._extid.get_from_primary_key(primary_key)
def extid_get_from_extid(self, extid_type: str, extid: bytes) -> Iterable[ExtIDRow]:
return (
row
for pk, row in self._extid.iter_all()
if row.extid_type == extid_type and row.extid == extid
)
def extid_get_from_target(
self, target_type: str, target: bytes
) -> Iterable[ExtIDRow]:
return (
row
for pk, row in self._extid.iter_all()
if row.target_type == target_type and row.target == target
)
class InMemoryStorage(CassandraStorage):
_cql_runner: InMemoryCqlRunner # type: ignore
def __init__(self, journal_writer=None):
self.reset()
self.journal_writer = JournalWriter(journal_writer)
self._allow_overwrite = False
def reset(self):
self._cql_runner = InMemoryCqlRunner()
self.objstorage = ObjStorage({"cls": "memory"})
def check_config(self, *, check_write: bool) -> bool:
return True
diff --git a/swh/storage/postgresql/converters.py b/swh/storage/postgresql/converters.py
index c9f306b8..e1c64a92 100644
--- a/swh/storage/postgresql/converters.py
+++ b/swh/storage/postgresql/converters.py
@@ -1,338 +1,339 @@
# Copyright (C) 2015-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import datetime
from typing import Any, Dict, Optional
import warnings
from swh.core.utils import encode_with_unescape
from swh.model.identifiers import CoreSWHID, ExtendedSWHID
from swh.model.identifiers import ObjectType as SwhidObjectType
from swh.model.identifiers import origin_identifier
from swh.model.model import (
ExtID,
MetadataAuthority,
MetadataAuthorityType,
MetadataFetcher,
ObjectType,
Person,
RawExtrinsicMetadata,
Release,
Revision,
RevisionType,
Timestamp,
TimestampWithTimezone,
)
from ..utils import map_optional
DEFAULT_AUTHOR = {
"fullname": None,
"name": None,
"email": None,
}
DEFAULT_DATE = {
"timestamp": None,
"offset": 0,
"neg_utc_offset": None,
}
def author_to_db(author: Optional[Person]) -> Dict[str, Any]:
"""Convert a swh-model author to its DB representation.
Args:
author: a :mod:`swh.model` compatible author
Returns:
dict: a dictionary with three keys: author, fullname and email
"""
if author is None:
return DEFAULT_AUTHOR
return author.to_dict()
def db_to_author(
fullname: Optional[bytes], name: Optional[bytes], email: Optional[bytes]
) -> Optional[Person]:
"""Convert the DB representation of an author to a swh-model author.
Args:
fullname (bytes): the author's fullname
name (bytes): the author's name
email (bytes): the author's email
Returns:
a Person object, or None if 'fullname' is None.
"""
if fullname is None:
return None
return Person(fullname=fullname, name=name, email=email,)
def db_to_git_headers(db_git_headers):
ret = []
for key, value in db_git_headers:
ret.append([key.encode("utf-8"), encode_with_unescape(value)])
return ret
def db_to_date(
date: Optional[datetime.datetime], offset: int, neg_utc_offset: Optional[bool]
) -> Optional[TimestampWithTimezone]:
"""Convert the DB representation of a date to a swh-model compatible date.
Args:
date: a date pulled out of the database
offset: an integer number of minutes representing an UTC offset
neg_utc_offset: whether an utc offset is negative
Returns:
a TimestampWithTimezone, or None if the date is None.
"""
if date is None:
return None
if neg_utc_offset is None:
# For older versions of the database that were not migrated to schema v160
neg_utc_offset = False
return TimestampWithTimezone(
timestamp=Timestamp(
seconds=int(date.timestamp()), microseconds=date.microsecond,
),
offset=offset,
negative_utc=neg_utc_offset,
)
def date_to_db(ts_with_tz: Optional[TimestampWithTimezone]) -> Dict[str, Any]:
"""Convert a swh-model date_offset to its DB representation.
Args:
ts_with_tz: a TimestampWithTimezone object
Returns:
dict: a dictionary with three keys:
- timestamp: a date in ISO format
- offset: the UTC offset in minutes
- neg_utc_offset: a boolean indicating whether a null offset is
negative or positive.
"""
if ts_with_tz is None:
return DEFAULT_DATE
ts = ts_with_tz.timestamp
timestamp = datetime.datetime.fromtimestamp(ts.seconds, datetime.timezone.utc)
timestamp = timestamp.replace(microsecond=ts.microseconds)
return {
# PostgreSQL supports isoformatted timestamps
"timestamp": timestamp.isoformat(),
"offset": ts_with_tz.offset,
"neg_utc_offset": ts_with_tz.negative_utc,
}
def revision_to_db(revision: Revision) -> Dict[str, Any]:
"""Convert a swh-model revision to its database representation.
"""
author = author_to_db(revision.author)
date = date_to_db(revision.date)
committer = author_to_db(revision.committer)
committer_date = date_to_db(revision.committer_date)
return {
"id": revision.id,
"author_fullname": author["fullname"],
"author_name": author["name"],
"author_email": author["email"],
"date": date["timestamp"],
"date_offset": date["offset"],
"date_neg_utc_offset": date["neg_utc_offset"],
"committer_fullname": committer["fullname"],
"committer_name": committer["name"],
"committer_email": committer["email"],
"committer_date": committer_date["timestamp"],
"committer_date_offset": committer_date["offset"],
"committer_date_neg_utc_offset": committer_date["neg_utc_offset"],
"type": revision.type.value,
"directory": revision.directory,
"message": revision.message,
"metadata": None if revision.metadata is None else dict(revision.metadata),
"synthetic": revision.synthetic,
"extra_headers": revision.extra_headers,
"parents": [
{"id": revision.id, "parent_id": parent, "parent_rank": i,}
for i, parent in enumerate(revision.parents)
],
}
def db_to_revision(db_revision: Dict[str, Any]) -> Optional[Revision]:
"""Convert a database representation of a revision to its swh-model
representation."""
if db_revision["type"] is None:
assert all(
v is None for (k, v) in db_revision.items() if k not in ("id", "parents")
)
return None
author = db_to_author(
db_revision["author_fullname"],
db_revision["author_name"],
db_revision["author_email"],
)
date = db_to_date(
db_revision["date"],
db_revision["date_offset"],
db_revision["date_neg_utc_offset"],
)
committer = db_to_author(
db_revision["committer_fullname"],
db_revision["committer_name"],
db_revision["committer_email"],
)
committer_date = db_to_date(
db_revision["committer_date"],
db_revision["committer_date_offset"],
db_revision["committer_date_neg_utc_offset"],
)
assert author, "author is None"
assert committer, "committer is None"
parents = []
if "parents" in db_revision:
for parent in db_revision["parents"]:
if parent:
parents.append(parent)
metadata = db_revision["metadata"]
extra_headers = db_revision["extra_headers"]
if not extra_headers:
if metadata and "extra_headers" in metadata:
extra_headers = db_to_git_headers(metadata.pop("extra_headers"))
else:
# For older versions of the database that were not migrated to schema v161
extra_headers = ()
return Revision(
id=db_revision["id"],
author=author,
date=date,
committer=committer,
committer_date=committer_date,
type=RevisionType(db_revision["type"]),
directory=db_revision["directory"],
message=db_revision["message"],
metadata=metadata,
synthetic=db_revision["synthetic"],
extra_headers=extra_headers,
parents=tuple(parents),
)
def release_to_db(release: Release) -> Dict[str, Any]:
"""Convert a swh-model release to its database representation.
"""
author = author_to_db(release.author)
date = date_to_db(release.date)
return {
"id": release.id,
"author_fullname": author["fullname"],
"author_name": author["name"],
"author_email": author["email"],
"date": date["timestamp"],
"date_offset": date["offset"],
"date_neg_utc_offset": date["neg_utc_offset"],
"name": release.name,
"target": release.target,
"target_type": release.target_type.value,
"comment": release.message,
"synthetic": release.synthetic,
}
def db_to_release(db_release: Dict[str, Any]) -> Optional[Release]:
"""Convert a database representation of a release to its swh-model
representation.
"""
if db_release["target_type"] is None:
assert all(v is None for (k, v) in db_release.items() if k != "id")
return None
author = db_to_author(
db_release["author_fullname"],
db_release["author_name"],
db_release["author_email"],
)
date = db_to_date(
db_release["date"], db_release["date_offset"], db_release["date_neg_utc_offset"]
)
return Release(
author=author,
date=date,
id=db_release["id"],
name=db_release["name"],
message=db_release["comment"],
synthetic=db_release["synthetic"],
target=db_release["target"],
target_type=ObjectType(db_release["target_type"]),
)
def db_to_raw_extrinsic_metadata(row) -> RawExtrinsicMetadata:
target = row["raw_extrinsic_metadata.target"]
if not target.startswith("swh:1:"):
warnings.warn(
"Fetching raw_extrinsic_metadata row with URL target", DeprecationWarning
)
target = "swh:1:ori:" + origin_identifier({"url": target})
return RawExtrinsicMetadata(
target=ExtendedSWHID.from_string(target),
authority=MetadataAuthority(
type=MetadataAuthorityType(row["metadata_authority.type"]),
url=row["metadata_authority.url"],
),
fetcher=MetadataFetcher(
name=row["metadata_fetcher.name"], version=row["metadata_fetcher.version"],
),
discovery_date=row["discovery_date"],
format=row["format"],
metadata=row["raw_extrinsic_metadata.metadata"],
origin=row["origin"],
visit=row["visit"],
snapshot=map_optional(CoreSWHID.from_string, row["snapshot"]),
release=map_optional(CoreSWHID.from_string, row["release"]),
revision=map_optional(CoreSWHID.from_string, row["revision"]),
path=row["path"],
directory=map_optional(CoreSWHID.from_string, row["directory"]),
)
def db_to_extid(row) -> ExtID:
return ExtID(
extid=row["extid"],
extid_type=row["extid_type"],
+ extid_version=row.get("extid_version", 0),
target=CoreSWHID(
object_id=row["target"],
object_type=SwhidObjectType[row["target_type"].upper()],
),
)
diff --git a/swh/storage/postgresql/db.py b/swh/storage/postgresql/db.py
index a9281a8c..fc51f3d0 100644
--- a/swh/storage/postgresql/db.py
+++ b/swh/storage/postgresql/db.py
@@ -1,1473 +1,1473 @@
# Copyright (C) 2015-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import datetime
import logging
import random
import select
from typing import Any, Dict, Iterable, List, Optional, Tuple
from swh.core.db import BaseDb
from swh.core.db.db_utils import execute_values_generator
from swh.core.db.db_utils import jsonize as _jsonize
from swh.core.db.db_utils import stored_procedure
from swh.model.hashutil import DEFAULT_ALGORITHMS
from swh.model.identifiers import ObjectType
from swh.model.model import SHA1_SIZE, OriginVisit, OriginVisitStatus, Sha1Git
from swh.storage.interface import ListOrder
logger = logging.getLogger(__name__)
def jsonize(d):
return _jsonize(dict(d) if d is not None else None)
class Db(BaseDb):
"""Proxy to the SWH DB, with wrappers around stored procedures
"""
- current_version = 175
+ current_version = 176
def mktemp_dir_entry(self, entry_type, cur=None):
self._cursor(cur).execute(
"SELECT swh_mktemp_dir_entry(%s)", (("directory_entry_%s" % entry_type),)
)
@stored_procedure("swh_mktemp_revision")
def mktemp_revision(self, cur=None):
pass
@stored_procedure("swh_mktemp_release")
def mktemp_release(self, cur=None):
pass
@stored_procedure("swh_mktemp_snapshot_branch")
def mktemp_snapshot_branch(self, cur=None):
pass
def register_listener(self, notify_queue, cur=None):
"""Register a listener for NOTIFY queue `notify_queue`"""
self._cursor(cur).execute("LISTEN %s" % notify_queue)
def listen_notifies(self, timeout):
"""Listen to notifications for `timeout` seconds"""
if select.select([self.conn], [], [], timeout) == ([], [], []):
return
else:
self.conn.poll()
while self.conn.notifies:
yield self.conn.notifies.pop(0)
@stored_procedure("swh_content_add")
def content_add_from_temp(self, cur=None):
pass
@stored_procedure("swh_directory_add")
def directory_add_from_temp(self, cur=None):
pass
@stored_procedure("swh_skipped_content_add")
def skipped_content_add_from_temp(self, cur=None):
pass
@stored_procedure("swh_revision_add")
def revision_add_from_temp(self, cur=None):
pass
@stored_procedure("swh_extid_add")
def extid_add_from_temp(self, cur=None):
pass
@stored_procedure("swh_release_add")
def release_add_from_temp(self, cur=None):
pass
def content_update_from_temp(self, keys_to_update, cur=None):
cur = self._cursor(cur)
cur.execute(
"""select swh_content_update(ARRAY[%s] :: text[])""" % keys_to_update
)
content_get_metadata_keys = [
"sha1",
"sha1_git",
"sha256",
"blake2s256",
"length",
"status",
]
content_add_keys = content_get_metadata_keys + ["ctime"]
skipped_content_keys = [
"sha1",
"sha1_git",
"sha256",
"blake2s256",
"length",
"reason",
"status",
"origin",
]
def content_get_metadata_from_hashes(
self, hashes: List[bytes], algo: str, cur=None
):
cur = self._cursor(cur)
assert algo in DEFAULT_ALGORITHMS
query = f"""
select {", ".join(self.content_get_metadata_keys)}
from (values %s) as t (hash)
inner join content on (content.{algo}=hash)
"""
yield from execute_values_generator(
cur, query, ((hash_,) for hash_ in hashes),
)
def content_get_range(self, start, end, limit=None, cur=None):
"""Retrieve contents within range [start, end].
"""
cur = self._cursor(cur)
query = """select %s from content
where %%s <= sha1 and sha1 <= %%s
order by sha1
limit %%s""" % ", ".join(
self.content_get_metadata_keys
)
cur.execute(query, (start, end, limit))
yield from cur
content_hash_keys = ["sha1", "sha1_git", "sha256", "blake2s256"]
def content_missing_from_list(self, contents, cur=None):
cur = self._cursor(cur)
keys = ", ".join(self.content_hash_keys)
equality = " AND ".join(
("t.%s = c.%s" % (key, key)) for key in self.content_hash_keys
)
yield from execute_values_generator(
cur,
"""
SELECT %s
FROM (VALUES %%s) as t(%s)
WHERE NOT EXISTS (
SELECT 1 FROM content c
WHERE %s
)
"""
% (keys, keys, equality),
(tuple(c[key] for key in self.content_hash_keys) for c in contents),
)
def content_missing_per_sha1(self, sha1s, cur=None):
cur = self._cursor(cur)
yield from execute_values_generator(
cur,
"""
SELECT t.sha1 FROM (VALUES %s) AS t(sha1)
WHERE NOT EXISTS (
SELECT 1 FROM content c WHERE c.sha1 = t.sha1
)""",
((sha1,) for sha1 in sha1s),
)
def content_missing_per_sha1_git(self, contents, cur=None):
cur = self._cursor(cur)
yield from execute_values_generator(
cur,
"""
SELECT t.sha1_git FROM (VALUES %s) AS t(sha1_git)
WHERE NOT EXISTS (
SELECT 1 FROM content c WHERE c.sha1_git = t.sha1_git
)""",
((sha1,) for sha1 in contents),
)
def skipped_content_missing(self, contents, cur=None):
if not contents:
return []
cur = self._cursor(cur)
query = """SELECT * FROM (VALUES %s) AS t (%s)
WHERE not exists
(SELECT 1 FROM skipped_content s WHERE
s.sha1 is not distinct from t.sha1::sha1 and
s.sha1_git is not distinct from t.sha1_git::sha1 and
s.sha256 is not distinct from t.sha256::bytea);""" % (
(", ".join("%s" for _ in contents)),
", ".join(self.content_hash_keys),
)
cur.execute(
query,
[tuple(cont[key] for key in self.content_hash_keys) for cont in contents],
)
yield from cur
def snapshot_exists(self, snapshot_id, cur=None):
"""Check whether a snapshot with the given id exists"""
cur = self._cursor(cur)
cur.execute("""SELECT 1 FROM snapshot where id=%s""", (snapshot_id,))
return bool(cur.fetchone())
def snapshot_missing_from_list(self, snapshots, cur=None):
cur = self._cursor(cur)
yield from execute_values_generator(
cur,
"""
SELECT id FROM (VALUES %s) as t(id)
WHERE NOT EXISTS (
SELECT 1 FROM snapshot d WHERE d.id = t.id
)
""",
((id,) for id in snapshots),
)
def snapshot_add(self, snapshot_id, cur=None):
"""Add a snapshot from the temporary table"""
cur = self._cursor(cur)
cur.execute("""SELECT swh_snapshot_add(%s)""", (snapshot_id,))
snapshot_count_cols = ["target_type", "count"]
def snapshot_count_branches(
self, snapshot_id, branch_name_exclude_prefix=None, cur=None,
):
cur = self._cursor(cur)
query = """\
SELECT %s FROM swh_snapshot_count_branches(%%s, %%s)
""" % ", ".join(
self.snapshot_count_cols
)
cur.execute(query, (snapshot_id, branch_name_exclude_prefix))
yield from cur
snapshot_get_cols = ["snapshot_id", "name", "target", "target_type"]
def snapshot_get_by_id(
self,
snapshot_id,
branches_from=b"",
branches_count=None,
target_types=None,
branch_name_include_substring=None,
branch_name_exclude_prefix=None,
cur=None,
):
cur = self._cursor(cur)
query = """\
SELECT %s
FROM swh_snapshot_get_by_id(%%s, %%s, %%s, %%s :: snapshot_target[], %%s, %%s)
""" % ", ".join(
self.snapshot_get_cols
)
cur.execute(
query,
(
snapshot_id,
branches_from,
branches_count,
target_types,
branch_name_include_substring,
branch_name_exclude_prefix,
),
)
yield from cur
def snapshot_get_random(self, cur=None):
return self._get_random_row_from_table("snapshot", ["id"], "id", cur)
content_find_cols = [
"sha1",
"sha1_git",
"sha256",
"blake2s256",
"length",
"ctime",
"status",
]
def content_find(
self,
sha1: Optional[bytes] = None,
sha1_git: Optional[bytes] = None,
sha256: Optional[bytes] = None,
blake2s256: Optional[bytes] = None,
cur=None,
):
"""Find the content optionally on a combination of the following
checksums sha1, sha1_git, sha256 or blake2s256.
Args:
sha1: sha1 content
git_sha1: the sha1 computed `a la git` sha1 of the content
sha256: sha256 content
blake2s256: blake2s256 content
Returns:
The tuple (sha1, sha1_git, sha256, blake2s256) if found or None.
"""
cur = self._cursor(cur)
checksum_dict = {
"sha1": sha1,
"sha1_git": sha1_git,
"sha256": sha256,
"blake2s256": blake2s256,
}
query_parts = [f"SELECT {','.join(self.content_find_cols)} FROM content WHERE "]
query_params = []
where_parts = []
# Adds only those keys which have values exist
for algorithm in checksum_dict:
if checksum_dict[algorithm] is not None:
where_parts.append(f"{algorithm} = %s")
query_params.append(checksum_dict[algorithm])
query_parts.append(" AND ".join(where_parts))
query = "\n".join(query_parts)
cur.execute(query, query_params)
content = cur.fetchall()
return content
def content_get_random(self, cur=None):
return self._get_random_row_from_table("content", ["sha1_git"], "sha1_git", cur)
def directory_missing_from_list(self, directories, cur=None):
cur = self._cursor(cur)
yield from execute_values_generator(
cur,
"""
SELECT id FROM (VALUES %s) as t(id)
WHERE NOT EXISTS (
SELECT 1 FROM directory d WHERE d.id = t.id
)
""",
((id,) for id in directories),
)
directory_ls_cols = [
"dir_id",
"type",
"target",
"name",
"perms",
"status",
"sha1",
"sha1_git",
"sha256",
"length",
]
def directory_walk_one(self, directory, cur=None):
cur = self._cursor(cur)
cols = ", ".join(self.directory_ls_cols)
query = "SELECT %s FROM swh_directory_walk_one(%%s)" % cols
cur.execute(query, (directory,))
yield from cur
def directory_walk(self, directory, cur=None):
cur = self._cursor(cur)
cols = ", ".join(self.directory_ls_cols)
query = "SELECT %s FROM swh_directory_walk(%%s)" % cols
cur.execute(query, (directory,))
yield from cur
def directory_entry_get_by_path(self, directory, paths, cur=None):
"""Retrieve a directory entry by path.
"""
cur = self._cursor(cur)
cols = ", ".join(self.directory_ls_cols)
query = "SELECT %s FROM swh_find_directory_entry_by_path(%%s, %%s)" % cols
cur.execute(query, (directory, paths))
data = cur.fetchone()
if set(data) == {None}:
return None
return data
directory_get_entries_cols = ["type", "target", "name", "perms"]
def directory_get_entries(self, directory: Sha1Git, cur=None) -> List[Tuple]:
cur = self._cursor(cur)
cur.execute(
"SELECT * FROM swh_directory_get_entries(%s::sha1_git)", (directory,)
)
return list(cur)
def directory_get_random(self, cur=None):
return self._get_random_row_from_table("directory", ["id"], "id", cur)
def revision_missing_from_list(self, revisions, cur=None):
cur = self._cursor(cur)
yield from execute_values_generator(
cur,
"""
SELECT id FROM (VALUES %s) as t(id)
WHERE NOT EXISTS (
SELECT 1 FROM revision r WHERE r.id = t.id
)
""",
((id,) for id in revisions),
)
revision_add_cols = [
"id",
"date",
"date_offset",
"date_neg_utc_offset",
"committer_date",
"committer_date_offset",
"committer_date_neg_utc_offset",
"type",
"directory",
"message",
"author_fullname",
"author_name",
"author_email",
"committer_fullname",
"committer_name",
"committer_email",
"metadata",
"synthetic",
"extra_headers",
]
revision_get_cols = revision_add_cols + ["parents"]
def origin_visit_add(self, origin, ts, type, cur=None):
"""Add a new origin_visit for origin origin at timestamp ts.
Args:
origin: origin concerned by the visit
ts: the date of the visit
type: type of loader for the visit
Returns:
The new visit index step for that origin
"""
cur = self._cursor(cur)
self._cursor(cur).execute(
"SELECT swh_origin_visit_add(%s, %s, %s)", (origin, ts, type)
)
return cur.fetchone()[0]
origin_visit_status_cols = [
"origin",
"visit",
"date",
"type",
"status",
"snapshot",
"metadata",
]
def origin_visit_status_add(
self, visit_status: OriginVisitStatus, cur=None
) -> None:
"""Add new origin visit status
"""
assert self.origin_visit_status_cols[0] == "origin"
assert self.origin_visit_status_cols[-1] == "metadata"
cols = self.origin_visit_status_cols[1:-1]
cur = self._cursor(cur)
cur.execute(
f"WITH origin_id as (select id from origin where url=%s) "
f"INSERT INTO origin_visit_status "
f"(origin, {', '.join(cols)}, metadata) "
f"VALUES ((select id from origin_id), "
f"{', '.join(['%s']*len(cols))}, %s) "
f"ON CONFLICT (origin, visit, date) do nothing",
[visit_status.origin]
+ [getattr(visit_status, key) for key in cols]
+ [jsonize(visit_status.metadata)],
)
origin_visit_cols = ["origin", "visit", "date", "type"]
def origin_visit_add_with_id(self, origin_visit: OriginVisit, cur=None) -> None:
"""Insert origin visit when id are already set
"""
ov = origin_visit
assert ov.visit is not None
cur = self._cursor(cur)
query = """INSERT INTO origin_visit ({cols})
VALUES ((select id from origin where url=%s), {values})
ON CONFLICT (origin, visit) DO NOTHING""".format(
cols=", ".join(self.origin_visit_cols),
values=", ".join("%s" for col in self.origin_visit_cols[1:]),
)
cur.execute(query, (ov.origin, ov.visit, ov.date, ov.type))
origin_visit_get_cols = [
"origin",
"visit",
"date",
"type",
"status",
"metadata",
"snapshot",
]
origin_visit_select_cols = [
"o.url AS origin",
"ov.visit",
"ov.date",
"ov.type AS type",
"ovs.status",
"ovs.metadata",
"ovs.snapshot",
]
origin_visit_status_select_cols = [
"o.url AS origin",
"ovs.visit",
"ovs.date",
"ovs.type AS type",
"ovs.status",
"ovs.snapshot",
"ovs.metadata",
]
def _make_origin_visit_status(
self, row: Optional[Tuple[Any]]
) -> Optional[Dict[str, Any]]:
"""Make an origin_visit_status dict out of a row
"""
if not row:
return None
return dict(zip(self.origin_visit_status_cols, row))
def origin_visit_status_get_latest(
self,
origin_url: str,
visit: int,
allowed_statuses: Optional[List[str]] = None,
require_snapshot: bool = False,
cur=None,
) -> Optional[Dict[str, Any]]:
"""Given an origin visit id, return its latest origin_visit_status
"""
cur = self._cursor(cur)
query_parts = [
"SELECT %s" % ", ".join(self.origin_visit_status_select_cols),
"FROM origin_visit_status ovs ",
"INNER JOIN origin o ON o.id = ovs.origin",
]
query_parts.append("WHERE o.url = %s")
query_params: List[Any] = [origin_url]
query_parts.append("AND ovs.visit = %s")
query_params.append(visit)
if require_snapshot:
query_parts.append("AND ovs.snapshot is not null")
if allowed_statuses:
query_parts.append("AND ovs.status IN %s")
query_params.append(tuple(allowed_statuses))
query_parts.append("ORDER BY ovs.date DESC LIMIT 1")
query = "\n".join(query_parts)
cur.execute(query, tuple(query_params))
row = cur.fetchone()
return self._make_origin_visit_status(row)
def origin_visit_status_get_range(
self,
origin: str,
visit: int,
date_from: Optional[datetime.datetime],
order: ListOrder,
limit: int,
cur=None,
):
"""Retrieve visit_status rows for visit (origin, visit) in a paginated way.
"""
cur = self._cursor(cur)
query_parts = [
f"SELECT {', '.join(self.origin_visit_status_select_cols)} "
"FROM origin_visit_status ovs ",
"INNER JOIN origin o ON o.id = ovs.origin ",
]
query_parts.append("WHERE o.url = %s AND ovs.visit = %s ")
query_params: List[Any] = [origin, visit]
if date_from is not None:
op_comparison = ">=" if order == ListOrder.ASC else "<="
query_parts.append(f"and ovs.date {op_comparison} %s ")
query_params.append(date_from)
if order == ListOrder.ASC:
query_parts.append("ORDER BY ovs.date ASC ")
elif order == ListOrder.DESC:
query_parts.append("ORDER BY ovs.date DESC ")
else:
assert False
query_parts.append("LIMIT %s")
query_params.append(limit)
query = "\n".join(query_parts)
cur.execute(query, tuple(query_params))
yield from cur
def origin_visit_get_range(
self, origin: str, visit_from: int, order: ListOrder, limit: int, cur=None,
):
cur = self._cursor(cur)
origin_visit_cols = ["o.url as origin", "ov.visit", "ov.date", "ov.type"]
query_parts = [
f"SELECT {', '.join(origin_visit_cols)} FROM origin_visit ov ",
"INNER JOIN origin o ON o.id = ov.origin ",
]
query_parts.append("WHERE o.url = %s")
query_params: List[Any] = [origin]
if visit_from > 0:
op_comparison = ">" if order == ListOrder.ASC else "<"
query_parts.append(f"and ov.visit {op_comparison} %s")
query_params.append(visit_from)
if order == ListOrder.ASC:
query_parts.append("ORDER BY ov.visit ASC")
elif order == ListOrder.DESC:
query_parts.append("ORDER BY ov.visit DESC")
query_parts.append("LIMIT %s")
query_params.append(limit)
query = "\n".join(query_parts)
cur.execute(query, tuple(query_params))
yield from cur
def origin_visit_get(self, origin_id, visit_id, cur=None):
"""Retrieve information on visit visit_id of origin origin_id.
Args:
origin_id: the origin concerned
visit_id: The visit step for that origin
Returns:
The origin_visit information
"""
cur = self._cursor(cur)
query = """\
SELECT %s
FROM origin_visit ov
INNER JOIN origin o ON o.id = ov.origin
INNER JOIN origin_visit_status ovs USING (origin, visit)
WHERE o.url = %%s AND ov.visit = %%s
ORDER BY ovs.date DESC
LIMIT 1
""" % (
", ".join(self.origin_visit_select_cols)
)
cur.execute(query, (origin_id, visit_id))
r = cur.fetchall()
if not r:
return None
return r[0]
def origin_visit_find_by_date(self, origin, visit_date, cur=None):
cur = self._cursor(cur)
cur.execute(
"SELECT * FROM swh_visit_find_by_date(%s, %s)", (origin, visit_date)
)
rows = cur.fetchall()
if rows:
visit = dict(zip(self.origin_visit_get_cols, rows[0]))
visit["origin"] = origin
return visit
def origin_visit_exists(self, origin_id, visit_id, cur=None):
"""Check whether an origin visit with the given ids exists"""
cur = self._cursor(cur)
query = "SELECT 1 FROM origin_visit where origin = %s AND visit = %s"
cur.execute(query, (origin_id, visit_id))
return bool(cur.fetchone())
def origin_visit_get_latest(
self,
origin_id: str,
type: Optional[str],
allowed_statuses: Optional[Iterable[str]],
require_snapshot: bool,
cur=None,
):
"""Retrieve the most recent origin_visit of the given origin,
with optional filters.
Args:
origin_id: the origin concerned
type: Optional visit type to filter on
allowed_statuses: the visit statuses allowed for the returned visit
require_snapshot (bool): If True, only a visit with a known
snapshot will be returned.
Returns:
The origin_visit information, or None if no visit matches.
"""
cur = self._cursor(cur)
query_parts = [
"SELECT %s" % ", ".join(self.origin_visit_select_cols),
"FROM origin_visit ov ",
"INNER JOIN origin o ON o.id = ov.origin",
"INNER JOIN origin_visit_status ovs USING (origin, visit)",
]
query_parts.append("WHERE o.url = %s")
query_params: List[Any] = [origin_id]
if type is not None:
query_parts.append("AND ov.type = %s")
query_params.append(type)
if require_snapshot:
query_parts.append("AND ovs.snapshot is not null")
if allowed_statuses:
query_parts.append("AND ovs.status IN %s")
query_params.append(tuple(allowed_statuses))
query_parts.append(
"ORDER BY ov.date DESC, ov.visit DESC, ovs.date DESC LIMIT 1"
)
query = "\n".join(query_parts)
cur.execute(query, tuple(query_params))
r = cur.fetchone()
if not r:
return None
return r
def origin_visit_get_random(self, type, cur=None):
"""Randomly select one origin visit that was full and in the last 3
months
"""
cur = self._cursor(cur)
columns = ",".join(self.origin_visit_select_cols)
query = f"""select {columns}
from origin_visit ov
inner join origin o on ov.origin=o.id
inner join origin_visit_status ovs using (origin, visit)
where ovs.status='full'
and ov.type=%s
and ov.date > now() - '3 months'::interval
and random() < 0.1
limit 1
"""
cur.execute(query, (type,))
return cur.fetchone()
@staticmethod
def mangle_query_key(key, main_table):
if key == "id":
return "t.id"
if key == "parents":
return """
ARRAY(
SELECT rh.parent_id::bytea
FROM revision_history rh
WHERE rh.id = t.id
ORDER BY rh.parent_rank
)"""
if "_" not in key:
return "%s.%s" % (main_table, key)
head, tail = key.split("_", 1)
if head in ("author", "committer") and tail in (
"name",
"email",
"id",
"fullname",
):
return "%s.%s" % (head, tail)
return "%s.%s" % (main_table, key)
def revision_get_from_list(self, revisions, cur=None):
cur = self._cursor(cur)
query_keys = ", ".join(
self.mangle_query_key(k, "revision") for k in self.revision_get_cols
)
yield from execute_values_generator(
cur,
"""
SELECT %s FROM (VALUES %%s) as t(sortkey, id)
LEFT JOIN revision ON t.id = revision.id
LEFT JOIN person author ON revision.author = author.id
LEFT JOIN person committer ON revision.committer = committer.id
ORDER BY sortkey
"""
% query_keys,
((sortkey, id) for sortkey, id in enumerate(revisions)),
)
- extid_cols = ["extid", "extid_type", "target", "target_type"]
+ extid_cols = ["extid", "extid_version", "extid_type", "target", "target_type"]
def extid_get_from_extid_list(self, extid_type, ids, cur=None):
cur = self._cursor(cur)
query_keys = ", ".join(
self.mangle_query_key(k, "extid") for k in self.extid_cols
)
sql = """
SELECT %s
FROM (VALUES %%s) as t(sortkey, extid, extid_type)
LEFT JOIN extid USING (extid, extid_type)
ORDER BY sortkey
""" % (
query_keys,
)
yield from execute_values_generator(
cur,
sql,
(((sortkey, extid, extid_type) for sortkey, extid in enumerate(ids))),
)
def extid_get_from_swhid_list(self, target_type, ids, cur=None):
cur = self._cursor(cur)
target_type = ObjectType(
target_type
).name.lower() # aka "rev" -> "revision", ...
query_keys = ", ".join(
self.mangle_query_key(k, "extid") for k in self.extid_cols
)
sql = """
SELECT %s
FROM (VALUES %%s) as t(sortkey, target, target_type)
LEFT JOIN extid USING (target, target_type)
ORDER BY sortkey
""" % (
query_keys,
)
yield from execute_values_generator(
cur,
sql,
(((sortkey, target, target_type) for sortkey, target in enumerate(ids))),
template=b"(%s,%s,%s::object_type)",
)
def revision_log(self, root_revisions, limit=None, cur=None):
cur = self._cursor(cur)
query = """SELECT %s
FROM swh_revision_log(%%s, %%s)
""" % ", ".join(
self.revision_get_cols
)
cur.execute(query, (root_revisions, limit))
yield from cur
revision_shortlog_cols = ["id", "parents"]
def revision_shortlog(self, root_revisions, limit=None, cur=None):
cur = self._cursor(cur)
query = """SELECT %s
FROM swh_revision_list(%%s, %%s)
""" % ", ".join(
self.revision_shortlog_cols
)
cur.execute(query, (root_revisions, limit))
yield from cur
def revision_get_random(self, cur=None):
return self._get_random_row_from_table("revision", ["id"], "id", cur)
def release_missing_from_list(self, releases, cur=None):
cur = self._cursor(cur)
yield from execute_values_generator(
cur,
"""
SELECT id FROM (VALUES %s) as t(id)
WHERE NOT EXISTS (
SELECT 1 FROM release r WHERE r.id = t.id
)
""",
((id,) for id in releases),
)
object_find_by_sha1_git_cols = ["sha1_git", "type"]
def object_find_by_sha1_git(self, ids, cur=None):
cur = self._cursor(cur)
yield from execute_values_generator(
cur,
"""
WITH t (sha1_git) AS (VALUES %s),
known_objects as ((
select
id as sha1_git,
'release'::object_type as type,
object_id
from release r
where exists (select 1 from t where t.sha1_git = r.id)
) union all (
select
id as sha1_git,
'revision'::object_type as type,
object_id
from revision r
where exists (select 1 from t where t.sha1_git = r.id)
) union all (
select
id as sha1_git,
'directory'::object_type as type,
object_id
from directory d
where exists (select 1 from t where t.sha1_git = d.id)
) union all (
select
sha1_git as sha1_git,
'content'::object_type as type,
object_id
from content c
where exists (select 1 from t where t.sha1_git = c.sha1_git)
))
select t.sha1_git as sha1_git, k.type
from t
left join known_objects k on t.sha1_git = k.sha1_git
""",
((id,) for id in ids),
)
def stat_counters(self, cur=None):
cur = self._cursor(cur)
cur.execute("SELECT * FROM swh_stat_counters()")
yield from cur
def origin_add(self, url, cur=None):
"""Insert a new origin and return the new identifier."""
insert = """INSERT INTO origin (url) values (%s)
ON CONFLICT DO NOTHING
"""
cur.execute(insert, (url,))
return cur.rowcount
origin_cols = ["url"]
def origin_get_by_url(self, origins, cur=None):
"""Retrieve origin `(type, url)` from urls if found."""
cur = self._cursor(cur)
query = """SELECT %s FROM (VALUES %%s) as t(url)
LEFT JOIN origin ON t.url = origin.url
""" % ",".join(
"origin." + col for col in self.origin_cols
)
yield from execute_values_generator(cur, query, ((url,) for url in origins))
def origin_get_by_sha1(self, sha1s, cur=None):
"""Retrieve origin urls from sha1s if found."""
cur = self._cursor(cur)
query = """SELECT %s FROM (VALUES %%s) as t(sha1)
LEFT JOIN origin ON t.sha1 = digest(origin.url, 'sha1')
""" % ",".join(
"origin." + col for col in self.origin_cols
)
yield from execute_values_generator(cur, query, ((sha1,) for sha1 in sha1s))
def origin_id_get_by_url(self, origins, cur=None):
"""Retrieve origin `(type, url)` from urls if found."""
cur = self._cursor(cur)
query = """SELECT id FROM (VALUES %s) as t(url)
LEFT JOIN origin ON t.url = origin.url
"""
for row in execute_values_generator(cur, query, ((url,) for url in origins)):
yield row[0]
origin_get_range_cols = ["id", "url"]
def origin_get_range(self, origin_from: int = 1, origin_count: int = 100, cur=None):
"""Retrieve ``origin_count`` origins whose ids are greater
or equal than ``origin_from``.
Origins are sorted by id before retrieving them.
Args:
origin_from: the minimum id of origins to retrieve
origin_count: the maximum number of origins to retrieve
"""
cur = self._cursor(cur)
query = """SELECT %s
FROM origin WHERE id >= %%s
ORDER BY id LIMIT %%s
""" % ",".join(
self.origin_get_range_cols
)
cur.execute(query, (origin_from, origin_count))
yield from cur
def _origin_query(
self,
url_pattern,
count=False,
offset=0,
limit=50,
regexp=False,
with_visit=False,
visit_types=None,
cur=None,
):
"""
Method factorizing query creation for searching and counting origins.
"""
cur = self._cursor(cur)
if count:
origin_cols = "COUNT(*)"
order_clause = ""
else:
origin_cols = ",".join(self.origin_cols)
order_clause = "ORDER BY id"
if not regexp:
operator = "ILIKE"
query_params = [f"%{url_pattern}%"]
else:
operator = "~*"
query_params = [url_pattern]
query = f"""
WITH filtered_origins AS (
SELECT *
FROM origin
WHERE url {operator} %s
{order_clause}
)
SELECT {origin_cols}
FROM filtered_origins AS o
"""
if with_visit or visit_types:
visit_predicat = (
"""
INNER JOIN origin_visit_status ovs USING (origin, visit)
INNER JOIN snapshot ON ovs.snapshot=snapshot.id
"""
if with_visit
else ""
)
type_predicat = (
f"AND ov.type=any(ARRAY{visit_types})" if visit_types else ""
)
query += f"""
WHERE EXISTS (
SELECT 1
FROM origin_visit ov
{visit_predicat}
WHERE ov.origin=o.id {type_predicat}
)
"""
if not count:
query += "OFFSET %s LIMIT %s"
query_params.extend([offset, limit])
cur.execute(query, query_params)
def origin_search(
self,
url_pattern: str,
offset: int = 0,
limit: int = 50,
regexp: bool = False,
with_visit: bool = False,
visit_types: Optional[List[str]] = None,
cur=None,
):
"""Search for origins whose urls contain a provided string pattern
or match a provided regular expression.
The search is performed in a case insensitive way.
Args:
url_pattern: the string pattern to search for in origin urls
offset: number of found origins to skip before returning
results
limit: the maximum number of found origins to return
regexp: if True, consider the provided pattern as a regular
expression and returns origins whose urls match it
with_visit: if True, filter out origins with no visit
"""
self._origin_query(
url_pattern,
offset=offset,
limit=limit,
regexp=regexp,
with_visit=with_visit,
visit_types=visit_types,
cur=cur,
)
yield from cur
def origin_count(self, url_pattern, regexp=False, with_visit=False, cur=None):
"""Count origins whose urls contain a provided string pattern
or match a provided regular expression.
The pattern search in origin urls is performed in a case insensitive
way.
Args:
url_pattern (str): the string pattern to search for in origin urls
regexp (bool): if True, consider the provided pattern as a regular
expression and returns origins whose urls match it
with_visit (bool): if True, filter out origins with no visit
"""
self._origin_query(
url_pattern, count=True, regexp=regexp, with_visit=with_visit, cur=cur
)
return cur.fetchone()[0]
release_add_cols = [
"id",
"target",
"target_type",
"date",
"date_offset",
"date_neg_utc_offset",
"name",
"comment",
"synthetic",
"author_fullname",
"author_name",
"author_email",
]
release_get_cols = release_add_cols
def release_get_from_list(self, releases, cur=None):
cur = self._cursor(cur)
query_keys = ", ".join(
self.mangle_query_key(k, "release") for k in self.release_get_cols
)
yield from execute_values_generator(
cur,
"""
SELECT %s FROM (VALUES %%s) as t(sortkey, id)
LEFT JOIN release ON t.id = release.id
LEFT JOIN person author ON release.author = author.id
ORDER BY sortkey
"""
% query_keys,
((sortkey, id) for sortkey, id in enumerate(releases)),
)
def release_get_random(self, cur=None):
return self._get_random_row_from_table("release", ["id"], "id", cur)
_raw_extrinsic_metadata_context_cols = [
"origin",
"visit",
"snapshot",
"release",
"revision",
"path",
"directory",
]
"""The list of context columns for all artifact types."""
_raw_extrinsic_metadata_insert_cols = [
"id",
"type",
"target",
"authority_id",
"fetcher_id",
"discovery_date",
"format",
"metadata",
*_raw_extrinsic_metadata_context_cols,
]
"""List of columns of the raw_extrinsic_metadata table, used when writing
metadata."""
_raw_extrinsic_metadata_insert_query = f"""
INSERT INTO raw_extrinsic_metadata
({', '.join(_raw_extrinsic_metadata_insert_cols)})
VALUES ({', '.join('%s' for _ in _raw_extrinsic_metadata_insert_cols)})
ON CONFLICT (id)
DO NOTHING
"""
raw_extrinsic_metadata_get_cols = [
"raw_extrinsic_metadata.target",
"raw_extrinsic_metadata.type",
"discovery_date",
"metadata_authority.type",
"metadata_authority.url",
"metadata_fetcher.id",
"metadata_fetcher.name",
"metadata_fetcher.version",
*_raw_extrinsic_metadata_context_cols,
"format",
"raw_extrinsic_metadata.metadata",
]
"""List of columns of the raw_extrinsic_metadata, metadata_authority,
and metadata_fetcher tables, used when reading object metadata."""
_raw_extrinsic_metadata_select_query = f"""
SELECT
{', '.join(raw_extrinsic_metadata_get_cols)}
FROM raw_extrinsic_metadata
INNER JOIN metadata_authority
ON (metadata_authority.id=authority_id)
INNER JOIN metadata_fetcher ON (metadata_fetcher.id=fetcher_id)
"""
def raw_extrinsic_metadata_add(
self,
id: bytes,
type: str,
target: str,
discovery_date: datetime.datetime,
authority_id: int,
fetcher_id: int,
format: str,
metadata: bytes,
origin: Optional[str],
visit: Optional[int],
snapshot: Optional[str],
release: Optional[str],
revision: Optional[str],
path: Optional[bytes],
directory: Optional[str],
cur,
):
query = self._raw_extrinsic_metadata_insert_query
args: Dict[str, Any] = dict(
id=id,
type=type,
target=target,
authority_id=authority_id,
fetcher_id=fetcher_id,
discovery_date=discovery_date,
format=format,
metadata=metadata,
origin=origin,
visit=visit,
snapshot=snapshot,
release=release,
revision=revision,
path=path,
directory=directory,
)
params = [args[col] for col in self._raw_extrinsic_metadata_insert_cols]
cur.execute(query, params)
def raw_extrinsic_metadata_get(
self,
target: str,
authority_id: int,
after_time: Optional[datetime.datetime],
after_fetcher: Optional[int],
limit: int,
cur,
):
query_parts = [self._raw_extrinsic_metadata_select_query]
query_parts.append("WHERE raw_extrinsic_metadata.target=%s AND authority_id=%s")
args = [target, authority_id]
if after_fetcher is not None:
assert after_time
query_parts.append("AND (discovery_date, fetcher_id) > (%s, %s)")
args.extend([after_time, after_fetcher])
elif after_time is not None:
query_parts.append("AND discovery_date > %s")
args.append(after_time)
query_parts.append("ORDER BY discovery_date, fetcher_id")
if limit:
query_parts.append("LIMIT %s")
args.append(limit)
cur.execute(" ".join(query_parts), args)
yield from cur
def raw_extrinsic_metadata_get_by_ids(self, ids: List[Sha1Git], cur=None):
cur = self._cursor(cur)
yield from execute_values_generator(
cur,
self._raw_extrinsic_metadata_select_query
+ "INNER JOIN (VALUES %s) AS t(id) ON t.id = raw_extrinsic_metadata.id",
[(id_,) for id_ in ids],
)
def raw_extrinsic_metadata_get_authorities(self, id: str, cur=None):
cur = self._cursor(cur)
cur.execute(
"""
SELECT
DISTINCT metadata_authority.type, metadata_authority.url
FROM raw_extrinsic_metadata
INNER JOIN metadata_authority
ON (metadata_authority.id=authority_id)
WHERE raw_extrinsic_metadata.target = %s
""",
(id,),
)
yield from cur
metadata_fetcher_cols = ["name", "version"]
def metadata_fetcher_add(self, name: str, version: str, cur=None) -> None:
cur = self._cursor(cur)
cur.execute(
"INSERT INTO metadata_fetcher (name, version) "
"VALUES (%s, %s) ON CONFLICT DO NOTHING",
(name, version),
)
def metadata_fetcher_get(self, name: str, version: str, cur=None):
cur = self._cursor(cur)
cur.execute(
f"SELECT {', '.join(self.metadata_fetcher_cols)} "
f"FROM metadata_fetcher "
f"WHERE name=%s AND version=%s",
(name, version),
)
return cur.fetchone()
def metadata_fetcher_get_id(
self, name: str, version: str, cur=None
) -> Optional[int]:
cur = self._cursor(cur)
cur.execute(
"SELECT id FROM metadata_fetcher WHERE name=%s AND version=%s",
(name, version),
)
row = cur.fetchone()
if row:
return row[0]
else:
return None
metadata_authority_cols = ["type", "url"]
def metadata_authority_add(self, type: str, url: str, cur=None) -> None:
cur = self._cursor(cur)
cur.execute(
"INSERT INTO metadata_authority (type, url) "
"VALUES (%s, %s) ON CONFLICT DO NOTHING",
(type, url),
)
def metadata_authority_get(self, type: str, url: str, cur=None):
cur = self._cursor(cur)
cur.execute(
f"SELECT {', '.join(self.metadata_authority_cols)} "
f"FROM metadata_authority "
f"WHERE type=%s AND url=%s",
(type, url),
)
return cur.fetchone()
def metadata_authority_get_id(self, type: str, url: str, cur=None) -> Optional[int]:
cur = self._cursor(cur)
cur.execute(
"SELECT id FROM metadata_authority WHERE type=%s AND url=%s", (type, url)
)
row = cur.fetchone()
if row:
return row[0]
else:
return None
def _get_random_row_from_table(self, table_name, cols, id_col, cur=None):
random_sha1 = bytes(random.randint(0, 255) for _ in range(SHA1_SIZE))
cur = self._cursor(cur)
query = """
(SELECT {cols} FROM {table} WHERE {id_col} >= %s
ORDER BY {id_col} LIMIT 1)
UNION
(SELECT {cols} FROM {table} WHERE {id_col} < %s
ORDER BY {id_col} DESC LIMIT 1)
LIMIT 1
""".format(
cols=", ".join(cols), table=table_name, id_col=id_col
)
cur.execute(query, (random_sha1, random_sha1))
row = cur.fetchone()
if row:
return row[0]
dbversion_cols = ["version", "release", "description"]
def dbversion(self):
with self.transaction() as cur:
cur.execute(
f"""
SELECT {', '.join(self.dbversion_cols)}
FROM dbversion
ORDER BY version DESC
LIMIT 1
"""
)
return dict(zip(self.dbversion_cols, cur.fetchone()))
def check_dbversion(self):
dbversion = self.dbversion()["version"]
if dbversion != self.current_version:
logger.warning(
"database dbversion (%s) != %s current_version (%s)",
dbversion,
__name__,
self.current_version,
)
return dbversion == self.current_version
diff --git a/swh/storage/postgresql/storage.py b/swh/storage/postgresql/storage.py
index 5f3f0f38..bb3ae398 100644
--- a/swh/storage/postgresql/storage.py
+++ b/swh/storage/postgresql/storage.py
@@ -1,1547 +1,1548 @@
# Copyright (C) 2015-2021 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import base64
from collections import defaultdict
import contextlib
from contextlib import contextmanager
import datetime
import itertools
import operator
from typing import Any, Counter, Dict, Iterable, List, Optional, Sequence, Tuple
import attr
import psycopg2
import psycopg2.errors
import psycopg2.pool
from swh.core.api.serializers import msgpack_dumps, msgpack_loads
from swh.core.db.common import db_transaction, db_transaction_generator
from swh.model.hashutil import DEFAULT_ALGORITHMS, hash_to_bytes, hash_to_hex
from swh.model.identifiers import ExtendedObjectType, ExtendedSWHID, ObjectType
from swh.model.model import (
SHA1_SIZE,
Content,
Directory,
DirectoryEntry,
ExtID,
MetadataAuthority,
MetadataAuthorityType,
MetadataFetcher,
Origin,
OriginVisit,
OriginVisitStatus,
RawExtrinsicMetadata,
Release,
Revision,
Sha1,
Sha1Git,
SkippedContent,
Snapshot,
SnapshotBranch,
TargetType,
)
from swh.storage.exc import HashCollision, StorageArgumentException, StorageDBError
from swh.storage.interface import (
VISIT_STATUSES,
ListOrder,
PagedResult,
PartialBranches,
)
from swh.storage.metrics import process_metrics, send_metric, timed
from swh.storage.objstorage import ObjStorage
from swh.storage.utils import (
extract_collision_hash,
get_partition_bounds_bytes,
map_optional,
now,
)
from swh.storage.writer import JournalWriter
from . import converters
from .db import Db
# Max block size of contents to return
BULK_BLOCK_CONTENT_LEN_MAX = 10000
EMPTY_SNAPSHOT_ID = hash_to_bytes("1a8893e6a86f444e8be8e7bda6cb34fb1735a00e")
"""Identifier for the empty snapshot"""
VALIDATION_EXCEPTIONS = (
KeyError,
TypeError,
ValueError,
psycopg2.errors.CheckViolation,
psycopg2.errors.IntegrityError,
psycopg2.errors.InvalidTextRepresentation,
psycopg2.errors.NotNullViolation,
psycopg2.errors.NumericValueOutOfRange,
psycopg2.errors.UndefinedFunction, # (raised on wrong argument typs)
)
"""Exceptions raised by postgresql when validation of the arguments
failed."""
@contextlib.contextmanager
def convert_validation_exceptions():
"""Catches postgresql errors related to invalid arguments, and
re-raises a StorageArgumentException."""
try:
yield
except tuple(VALIDATION_EXCEPTIONS) as e:
raise StorageArgumentException(str(e))
class Storage:
"""SWH storage proxy, encompassing DB and object storage
"""
def __init__(
self, db, objstorage, min_pool_conns=1, max_pool_conns=10, journal_writer=None
):
"""
Args:
db_conn: either a libpq connection string, or a psycopg2 connection
obj_root: path to the root of the object storage
"""
try:
if isinstance(db, psycopg2.extensions.connection):
self._pool = None
self._db = Db(db)
else:
self._pool = psycopg2.pool.ThreadedConnectionPool(
min_pool_conns, max_pool_conns, db
)
self._db = None
except psycopg2.OperationalError as e:
raise StorageDBError(e)
self.journal_writer = JournalWriter(journal_writer)
self.objstorage = ObjStorage(objstorage)
def get_db(self):
if self._db:
return self._db
else:
return Db.from_pool(self._pool)
def put_db(self, db):
if db is not self._db:
db.put_conn()
@contextmanager
def db(self):
db = None
try:
db = self.get_db()
yield db
finally:
if db:
self.put_db(db)
@timed
@db_transaction()
def check_config(self, *, check_write: bool, db: Db, cur=None) -> bool:
if not self.objstorage.check_config(check_write=check_write):
return False
if not db.check_dbversion():
return False
# Check permissions on one of the tables
if check_write:
check = "INSERT"
else:
check = "SELECT"
cur.execute("select has_table_privilege(current_user, 'content', %s)", (check,))
return cur.fetchone()[0]
def _content_unique_key(self, hash, db):
"""Given a hash (tuple or dict), return a unique key from the
aggregation of keys.
"""
keys = db.content_hash_keys
if isinstance(hash, tuple):
return hash
return tuple([hash[k] for k in keys])
def _content_add_metadata(self, db, cur, content):
"""Add content to the postgresql database but not the object storage.
"""
# create temporary table for metadata injection
db.mktemp("content", cur)
db.copy_to(
(c.to_dict() for c in content), "tmp_content", db.content_add_keys, cur
)
# move metadata in place
try:
db.content_add_from_temp(cur)
except psycopg2.IntegrityError as e:
if e.diag.sqlstate == "23505" and e.diag.table_name == "content":
message_detail = e.diag.message_detail
if message_detail:
hash_name, hash_id = extract_collision_hash(message_detail)
collision_contents_hashes = [
c.hashes() for c in content if c.get_hash(hash_name) == hash_id
]
else:
constraint_to_hash_name = {
"content_pkey": "sha1",
"content_sha1_git_idx": "sha1_git",
"content_sha256_idx": "sha256",
}
hash_name = constraint_to_hash_name.get(e.diag.constraint_name)
hash_id = None
collision_contents_hashes = None
raise HashCollision(
hash_name, hash_id, collision_contents_hashes
) from None
else:
raise
@timed
@process_metrics
def content_add(self, content: List[Content]) -> Dict[str, int]:
ctime = now()
contents = [attr.evolve(c, ctime=ctime) for c in content]
# Must add to the objstorage before the DB and journal. Otherwise:
# 1. in case of a crash the DB may "believe" we have the content, but
# we didn't have time to write to the objstorage before the crash
# 2. the objstorage mirroring, which reads from the journal, may attempt to
# read from the objstorage before we finished writing it
objstorage_summary = self.objstorage.content_add(contents)
with self.db() as db:
with db.transaction() as cur:
missing = list(
self.content_missing(
map(Content.to_dict, contents),
key_hash="sha1_git",
db=db,
cur=cur,
)
)
contents = [c for c in contents if c.sha1_git in missing]
self.journal_writer.content_add(contents)
self._content_add_metadata(db, cur, contents)
return {
"content:add": len(contents),
"content:add:bytes": objstorage_summary["content:add:bytes"],
}
@timed
@db_transaction()
def content_update(
self, contents: List[Dict[str, Any]], keys: List[str] = [], *, db: Db, cur=None
) -> None:
# TODO: Add a check on input keys. How to properly implement
# this? We don't know yet the new columns.
self.journal_writer.content_update(contents)
db.mktemp("content", cur)
select_keys = list(set(db.content_get_metadata_keys).union(set(keys)))
with convert_validation_exceptions():
db.copy_to(contents, "tmp_content", select_keys, cur)
db.content_update_from_temp(keys_to_update=keys, cur=cur)
@timed
@process_metrics
@db_transaction()
def content_add_metadata(
self, content: List[Content], *, db: Db, cur=None
) -> Dict[str, int]:
missing = self.content_missing(
(c.to_dict() for c in content), key_hash="sha1_git", db=db, cur=cur,
)
contents = [c for c in content if c.sha1_git in missing]
self.journal_writer.content_add_metadata(contents)
self._content_add_metadata(db, cur, contents)
return {
"content:add": len(contents),
}
@timed
def content_get_data(self, content: Sha1) -> Optional[bytes]:
# FIXME: Make this method support slicing the `data`
return self.objstorage.content_get(content)
@timed
@db_transaction()
def content_get_partition(
self,
partition_id: int,
nb_partitions: int,
page_token: Optional[str] = None,
limit: int = 1000,
*,
db: Db,
cur=None,
) -> PagedResult[Content]:
if limit is None:
raise StorageArgumentException("limit should not be None")
(start, end) = get_partition_bounds_bytes(
partition_id, nb_partitions, SHA1_SIZE
)
if page_token:
start = hash_to_bytes(page_token)
if end is None:
end = b"\xff" * SHA1_SIZE
next_page_token: Optional[str] = None
contents = []
for counter, row in enumerate(db.content_get_range(start, end, limit + 1, cur)):
row_d = dict(zip(db.content_get_metadata_keys, row))
content = Content(**row_d)
if counter >= limit:
# take the last content for the next page starting from this
next_page_token = hash_to_hex(content.sha1)
break
contents.append(content)
assert len(contents) <= limit
return PagedResult(results=contents, next_page_token=next_page_token)
@timed
@db_transaction(statement_timeout=500)
def content_get(
self, contents: List[bytes], algo: str = "sha1", *, db: Db, cur=None
) -> List[Optional[Content]]:
contents_by_hash: Dict[bytes, Optional[Content]] = {}
if algo not in DEFAULT_ALGORITHMS:
raise StorageArgumentException(
"algo should be one of {','.join(DEFAULT_ALGORITHMS)}"
)
rows = db.content_get_metadata_from_hashes(contents, algo, cur)
key = operator.attrgetter(algo)
for row in rows:
row_d = dict(zip(db.content_get_metadata_keys, row))
content = Content(**row_d)
contents_by_hash[key(content)] = content
return [contents_by_hash.get(sha1) for sha1 in contents]
@timed
@db_transaction_generator()
def content_missing(
self,
contents: List[Dict[str, Any]],
key_hash: str = "sha1",
*,
db: Db,
cur=None,
) -> Iterable[bytes]:
if key_hash not in DEFAULT_ALGORITHMS:
raise StorageArgumentException(
"key_hash should be one of {','.join(DEFAULT_ALGORITHMS)}"
)
keys = db.content_hash_keys
key_hash_idx = keys.index(key_hash)
for obj in db.content_missing_from_list(contents, cur):
yield obj[key_hash_idx]
@timed
@db_transaction_generator()
def content_missing_per_sha1(
self, contents: List[bytes], *, db: Db, cur=None
) -> Iterable[bytes]:
for obj in db.content_missing_per_sha1(contents, cur):
yield obj[0]
@timed
@db_transaction_generator()
def content_missing_per_sha1_git(
self, contents: List[bytes], *, db: Db, cur=None
) -> Iterable[Sha1Git]:
for obj in db.content_missing_per_sha1_git(contents, cur):
yield obj[0]
@timed
@db_transaction()
def content_find(
self, content: Dict[str, Any], *, db: Db, cur=None
) -> List[Content]:
if not set(content).intersection(DEFAULT_ALGORITHMS):
raise StorageArgumentException(
"content keys must contain at least one "
f"of: {', '.join(sorted(DEFAULT_ALGORITHMS))}"
)
rows = db.content_find(
sha1=content.get("sha1"),
sha1_git=content.get("sha1_git"),
sha256=content.get("sha256"),
blake2s256=content.get("blake2s256"),
cur=cur,
)
contents = []
for row in rows:
row_d = dict(zip(db.content_find_cols, row))
contents.append(Content(**row_d))
return contents
@timed
@db_transaction()
def content_get_random(self, *, db: Db, cur=None) -> Sha1Git:
return db.content_get_random(cur)
@staticmethod
def _skipped_content_normalize(d):
d = d.copy()
if d.get("status") is None:
d["status"] = "absent"
if d.get("length") is None:
d["length"] = -1
return d
def _skipped_content_add_metadata(self, db, cur, content: List[SkippedContent]):
origin_ids = db.origin_id_get_by_url([cont.origin for cont in content], cur=cur)
content = [
attr.evolve(c, origin=origin_id)
for (c, origin_id) in zip(content, origin_ids)
]
db.mktemp("skipped_content", cur)
db.copy_to(
[c.to_dict() for c in content],
"tmp_skipped_content",
db.skipped_content_keys,
cur,
)
# move metadata in place
db.skipped_content_add_from_temp(cur)
@timed
@process_metrics
@db_transaction()
def skipped_content_add(
self, content: List[SkippedContent], *, db: Db, cur=None
) -> Dict[str, int]:
ctime = now()
content = [attr.evolve(c, ctime=ctime) for c in content]
missing_contents = self.skipped_content_missing(
(c.to_dict() for c in content), db=db, cur=cur,
)
content = [
c
for c in content
if any(
all(
c.get_hash(algo) == missing_content.get(algo)
for algo in DEFAULT_ALGORITHMS
)
for missing_content in missing_contents
)
]
self.journal_writer.skipped_content_add(content)
self._skipped_content_add_metadata(db, cur, content)
return {
"skipped_content:add": len(content),
}
@timed
@db_transaction_generator()
def skipped_content_missing(
self, contents: List[Dict[str, Any]], *, db: Db, cur=None
) -> Iterable[Dict[str, Any]]:
contents = list(contents)
for content in db.skipped_content_missing(contents, cur):
yield dict(zip(db.content_hash_keys, content))
@timed
@process_metrics
@db_transaction()
def directory_add(
self, directories: List[Directory], *, db: Db, cur=None
) -> Dict[str, int]:
summary = {"directory:add": 0}
dirs = set()
dir_entries: Dict[str, defaultdict] = {
"file": defaultdict(list),
"dir": defaultdict(list),
"rev": defaultdict(list),
}
for cur_dir in directories:
dir_id = cur_dir.id
dirs.add(dir_id)
for src_entry in cur_dir.entries:
entry = src_entry.to_dict()
entry["dir_id"] = dir_id
dir_entries[entry["type"]][dir_id].append(entry)
dirs_missing = set(self.directory_missing(dirs, db=db, cur=cur))
if not dirs_missing:
return summary
self.journal_writer.directory_add(
dir_ for dir_ in directories if dir_.id in dirs_missing
)
# Copy directory ids
dirs_missing_dict = ({"id": dir} for dir in dirs_missing)
db.mktemp("directory", cur)
db.copy_to(dirs_missing_dict, "tmp_directory", ["id"], cur)
# Copy entries
for entry_type, entry_list in dir_entries.items():
entries = itertools.chain.from_iterable(
entries_for_dir
for dir_id, entries_for_dir in entry_list.items()
if dir_id in dirs_missing
)
db.mktemp_dir_entry(entry_type)
db.copy_to(
entries,
"tmp_directory_entry_%s" % entry_type,
["target", "name", "perms", "dir_id"],
cur,
)
# Do the final copy
db.directory_add_from_temp(cur)
summary["directory:add"] = len(dirs_missing)
return summary
@timed
@db_transaction_generator()
def directory_missing(
self, directories: List[Sha1Git], *, db: Db, cur=None
) -> Iterable[Sha1Git]:
for obj in db.directory_missing_from_list(directories, cur):
yield obj[0]
@timed
@db_transaction_generator(statement_timeout=20000)
def directory_ls(
self, directory: Sha1Git, recursive: bool = False, *, db: Db, cur=None
) -> Iterable[Dict[str, Any]]:
if recursive:
res_gen = db.directory_walk(directory, cur=cur)
else:
res_gen = db.directory_walk_one(directory, cur=cur)
for line in res_gen:
yield dict(zip(db.directory_ls_cols, line))
@timed
@db_transaction(statement_timeout=2000)
def directory_entry_get_by_path(
self, directory: Sha1Git, paths: List[bytes], *, db: Db, cur=None
) -> Optional[Dict[str, Any]]:
res = db.directory_entry_get_by_path(directory, paths, cur)
return dict(zip(db.directory_ls_cols, res)) if res else None
@timed
@db_transaction()
def directory_get_random(self, *, db: Db, cur=None) -> Sha1Git:
return db.directory_get_random(cur)
@db_transaction()
def directory_get_entries(
self,
directory_id: Sha1Git,
page_token: Optional[bytes] = None,
limit: int = 1000,
*,
db: Db,
cur=None,
) -> Optional[PagedResult[DirectoryEntry]]:
if list(self.directory_missing([directory_id], db=db, cur=cur)):
return None
if page_token is not None:
raise StorageArgumentException("Unsupported page token")
# TODO: actually paginate
rows = db.directory_get_entries(directory_id, cur=cur)
return PagedResult(
results=[
DirectoryEntry(**dict(zip(db.directory_get_entries_cols, row)))
for row in rows
],
next_page_token=None,
)
@timed
@process_metrics
@db_transaction()
def revision_add(
self, revisions: List[Revision], *, db: Db, cur=None
) -> Dict[str, int]:
summary = {"revision:add": 0}
revisions_missing = set(
self.revision_missing(
set(revision.id for revision in revisions), db=db, cur=cur
)
)
if not revisions_missing:
return summary
db.mktemp_revision(cur)
revisions_filtered = [
revision for revision in revisions if revision.id in revisions_missing
]
self.journal_writer.revision_add(revisions_filtered)
db_revisions_filtered = list(map(converters.revision_to_db, revisions_filtered))
parents_filtered: List[Dict[str, Any]] = []
with convert_validation_exceptions():
db.copy_to(
db_revisions_filtered,
"tmp_revision",
db.revision_add_cols,
cur,
lambda rev: parents_filtered.extend(rev["parents"]),
)
db.revision_add_from_temp(cur)
db.copy_to(
parents_filtered,
"revision_history",
["id", "parent_id", "parent_rank"],
cur,
)
return {"revision:add": len(revisions_missing)}
@timed
@db_transaction_generator()
def revision_missing(
self, revisions: List[Sha1Git], *, db: Db, cur=None
) -> Iterable[Sha1Git]:
if not revisions:
return None
for obj in db.revision_missing_from_list(revisions, cur):
yield obj[0]
@timed
@db_transaction(statement_timeout=1000)
def revision_get(
self, revision_ids: List[Sha1Git], *, db: Db, cur=None
) -> List[Optional[Revision]]:
revisions = []
for line in db.revision_get_from_list(revision_ids, cur):
revision = converters.db_to_revision(dict(zip(db.revision_get_cols, line)))
revisions.append(revision)
return revisions
@timed
@db_transaction_generator(statement_timeout=2000)
def revision_log(
self, revisions: List[Sha1Git], limit: Optional[int] = None, *, db: Db, cur=None
) -> Iterable[Optional[Dict[str, Any]]]:
for line in db.revision_log(revisions, limit, cur):
data = converters.db_to_revision(dict(zip(db.revision_get_cols, line)))
if not data:
yield None
continue
yield data.to_dict()
@timed
@db_transaction_generator(statement_timeout=2000)
def revision_shortlog(
self, revisions: List[Sha1Git], limit: Optional[int] = None, *, db: Db, cur=None
) -> Iterable[Optional[Tuple[Sha1Git, Tuple[Sha1Git, ...]]]]:
yield from db.revision_shortlog(revisions, limit, cur)
@timed
@db_transaction()
def revision_get_random(self, *, db: Db, cur=None) -> Sha1Git:
return db.revision_get_random(cur)
@timed
@db_transaction()
def extid_get_from_extid(
self, id_type: str, ids: List[bytes], *, db: Db, cur=None
) -> List[ExtID]:
extids = []
for row in db.extid_get_from_extid_list(id_type, ids, cur):
if row[0] is not None:
extids.append(converters.db_to_extid(dict(zip(db.extid_cols, row))))
return extids
@timed
@db_transaction()
def extid_get_from_target(
self, target_type: ObjectType, ids: List[Sha1Git], *, db: Db, cur=None
) -> List[ExtID]:
extids = []
for row in db.extid_get_from_swhid_list(target_type.value, ids, cur):
if row[0] is not None:
extids.append(converters.db_to_extid(dict(zip(db.extid_cols, row))))
return extids
@timed
@db_transaction()
def extid_add(self, ids: List[ExtID], *, db: Db, cur=None) -> Dict[str, int]:
extid = [
{
"extid": extid.extid,
"extid_type": extid.extid_type,
+ "extid_version": getattr(extid, "extid_version", 0),
"target": extid.target.object_id,
"target_type": extid.target.object_type.name.lower(), # arghh
}
for extid in ids
]
db.mktemp("extid", cur)
self.journal_writer.extid_add(ids)
db.copy_to(extid, "tmp_extid", db.extid_cols, cur)
# move metadata in place
db.extid_add_from_temp(cur)
return {"extid:add": len(extid)}
@timed
@process_metrics
@db_transaction()
def release_add(
self, releases: List[Release], *, db: Db, cur=None
) -> Dict[str, int]:
summary = {"release:add": 0}
release_ids = set(release.id for release in releases)
releases_missing = set(self.release_missing(release_ids, db=db, cur=cur))
if not releases_missing:
return summary
db.mktemp_release(cur)
releases_filtered = [
release for release in releases if release.id in releases_missing
]
self.journal_writer.release_add(releases_filtered)
db_releases_filtered = list(map(converters.release_to_db, releases_filtered))
with convert_validation_exceptions():
db.copy_to(db_releases_filtered, "tmp_release", db.release_add_cols, cur)
db.release_add_from_temp(cur)
return {"release:add": len(releases_missing)}
@timed
@db_transaction_generator()
def release_missing(
self, releases: List[Sha1Git], *, db: Db, cur=None
) -> Iterable[Sha1Git]:
if not releases:
return
for obj in db.release_missing_from_list(releases, cur):
yield obj[0]
@timed
@db_transaction(statement_timeout=500)
def release_get(
self, releases: List[Sha1Git], *, db: Db, cur=None
) -> List[Optional[Release]]:
rels = []
for release in db.release_get_from_list(releases, cur):
data = converters.db_to_release(dict(zip(db.release_get_cols, release)))
rels.append(data if data else None)
return rels
@timed
@db_transaction()
def release_get_random(self, *, db: Db, cur=None) -> Sha1Git:
return db.release_get_random(cur)
@timed
@process_metrics
@db_transaction()
def snapshot_add(
self, snapshots: List[Snapshot], *, db: Db, cur=None
) -> Dict[str, int]:
created_temp_table = False
count = 0
for snapshot in snapshots:
if not db.snapshot_exists(snapshot.id, cur):
if not created_temp_table:
db.mktemp_snapshot_branch(cur)
created_temp_table = True
with convert_validation_exceptions():
db.copy_to(
(
{
"name": name,
"target": info.target if info else None,
"target_type": (
info.target_type.value if info else None
),
}
for name, info in snapshot.branches.items()
),
"tmp_snapshot_branch",
["name", "target", "target_type"],
cur,
)
self.journal_writer.snapshot_add([snapshot])
db.snapshot_add(snapshot.id, cur)
count += 1
return {"snapshot:add": count}
@timed
@db_transaction_generator()
def snapshot_missing(
self, snapshots: List[Sha1Git], *, db: Db, cur=None
) -> Iterable[Sha1Git]:
for obj in db.snapshot_missing_from_list(snapshots, cur):
yield obj[0]
@timed
@db_transaction(statement_timeout=2000)
def snapshot_get(
self, snapshot_id: Sha1Git, *, db: Db, cur=None
) -> Optional[Dict[str, Any]]:
d = self.snapshot_get_branches(snapshot_id)
if d is None:
return d
return {
"id": d["id"],
"branches": {
name: branch.to_dict() if branch else None
for (name, branch) in d["branches"].items()
},
"next_branch": d["next_branch"],
}
@timed
@db_transaction(statement_timeout=2000)
def snapshot_count_branches(
self,
snapshot_id: Sha1Git,
branch_name_exclude_prefix: Optional[bytes] = None,
*,
db: Db,
cur=None,
) -> Optional[Dict[Optional[str], int]]:
return dict(
[
bc
for bc in db.snapshot_count_branches(
snapshot_id, branch_name_exclude_prefix, cur,
)
]
)
@timed
@db_transaction(statement_timeout=2000)
def snapshot_get_branches(
self,
snapshot_id: Sha1Git,
branches_from: bytes = b"",
branches_count: int = 1000,
target_types: Optional[List[str]] = None,
branch_name_include_substring: Optional[bytes] = None,
branch_name_exclude_prefix: Optional[bytes] = None,
*,
db: Db,
cur=None,
) -> Optional[PartialBranches]:
if snapshot_id == EMPTY_SNAPSHOT_ID:
return PartialBranches(id=snapshot_id, branches={}, next_branch=None,)
branches = {}
next_branch = None
fetched_branches = list(
db.snapshot_get_by_id(
snapshot_id,
branches_from=branches_from,
# the underlying SQL query can be quite expensive to execute for small
# branches_count value, so we ensure a minimum branches limit of 10 for
# optimal performances
branches_count=max(branches_count + 1, 10),
target_types=target_types,
branch_name_include_substring=branch_name_include_substring,
branch_name_exclude_prefix=branch_name_exclude_prefix,
cur=cur,
)
)
for row in fetched_branches[:branches_count]:
branch_d = dict(zip(db.snapshot_get_cols, row))
del branch_d["snapshot_id"]
name = branch_d.pop("name")
if branch_d["target"] is None and branch_d["target_type"] is None:
branch = None
else:
assert branch_d["target_type"] is not None
branch = SnapshotBranch(
target=branch_d["target"],
target_type=TargetType(branch_d["target_type"]),
)
branches[name] = branch
if len(fetched_branches) > branches_count:
next_branch = dict(
zip(db.snapshot_get_cols, fetched_branches[branches_count])
)["name"]
if branches:
return PartialBranches(
id=snapshot_id, branches=branches, next_branch=next_branch,
)
return None
@timed
@db_transaction()
def snapshot_get_random(self, *, db: Db, cur=None) -> Sha1Git:
return db.snapshot_get_random(cur)
@timed
@db_transaction()
def origin_visit_add(
self, visits: List[OriginVisit], *, db: Db, cur=None
) -> Iterable[OriginVisit]:
for visit in visits:
origin = self.origin_get([visit.origin], db=db, cur=cur)[0]
if not origin: # Cannot add a visit without an origin
raise StorageArgumentException("Unknown origin %s", visit.origin)
all_visits = []
nb_visits = 0
for visit in visits:
nb_visits += 1
if not visit.visit:
with convert_validation_exceptions():
visit_id = db.origin_visit_add(
visit.origin, visit.date, visit.type, cur=cur
)
visit = attr.evolve(visit, visit=visit_id)
else:
db.origin_visit_add_with_id(visit, cur=cur)
assert visit.visit is not None
all_visits.append(visit)
# Forced to write after for the case when the visit has no id
self.journal_writer.origin_visit_add([visit])
visit_status = OriginVisitStatus(
origin=visit.origin,
visit=visit.visit,
date=visit.date,
type=visit.type,
status="created",
snapshot=None,
)
self._origin_visit_status_add(visit_status, db=db, cur=cur)
send_metric("origin_visit:add", count=nb_visits, method_name="origin_visit")
return all_visits
def _origin_visit_status_add(
self, visit_status: OriginVisitStatus, db, cur
) -> None:
"""Add an origin visit status"""
self.journal_writer.origin_visit_status_add([visit_status])
db.origin_visit_status_add(visit_status, cur=cur)
@timed
@process_metrics
@db_transaction()
def origin_visit_status_add(
self, visit_statuses: List[OriginVisitStatus], *, db: Db, cur=None,
) -> Dict[str, int]:
visit_statuses_ = []
# First round to check existence (fail early if any is ko)
for visit_status in visit_statuses:
origin_url = self.origin_get([visit_status.origin], db=db, cur=cur)[0]
if not origin_url:
raise StorageArgumentException(f"Unknown origin {visit_status.origin}")
if visit_status.type is None:
origin_visit = self.origin_visit_get_by(
visit_status.origin, visit_status.visit, db=db, cur=cur
)
if origin_visit is None:
raise StorageArgumentException(
f"Unknown origin visit {visit_status.visit} "
f"of origin {visit_status.origin}"
)
origin_visit_status = attr.evolve(visit_status, type=origin_visit.type)
else:
origin_visit_status = visit_status
visit_statuses_.append(origin_visit_status)
for visit_status in visit_statuses_:
self._origin_visit_status_add(visit_status, db, cur)
return {"origin_visit_status:add": len(visit_statuses_)}
@timed
@db_transaction()
def origin_visit_status_get_latest(
self,
origin_url: str,
visit: int,
allowed_statuses: Optional[List[str]] = None,
require_snapshot: bool = False,
*,
db: Db,
cur=None,
) -> Optional[OriginVisitStatus]:
if allowed_statuses and not set(allowed_statuses).intersection(VISIT_STATUSES):
raise StorageArgumentException(
f"Unknown allowed statuses {','.join(allowed_statuses)}, only "
f"{','.join(VISIT_STATUSES)} authorized"
)
row_d = db.origin_visit_status_get_latest(
origin_url, visit, allowed_statuses, require_snapshot, cur=cur
)
if not row_d:
return None
return OriginVisitStatus(**row_d)
@timed
@db_transaction(statement_timeout=500)
def origin_visit_get(
self,
origin: str,
page_token: Optional[str] = None,
order: ListOrder = ListOrder.ASC,
limit: int = 10,
*,
db: Db,
cur=None,
) -> PagedResult[OriginVisit]:
page_token = page_token or "0"
if not isinstance(order, ListOrder):
raise StorageArgumentException("order must be a ListOrder value")
if not isinstance(page_token, str):
raise StorageArgumentException("page_token must be a string.")
next_page_token = None
visit_from = int(page_token)
visits: List[OriginVisit] = []
extra_limit = limit + 1
for row in db.origin_visit_get_range(
origin, visit_from=visit_from, order=order, limit=extra_limit, cur=cur
):
row_d = dict(zip(db.origin_visit_cols, row))
visits.append(
OriginVisit(
origin=row_d["origin"],
visit=row_d["visit"],
date=row_d["date"],
type=row_d["type"],
)
)
assert len(visits) <= extra_limit
if len(visits) == extra_limit:
visits = visits[:limit]
next_page_token = str(visits[-1].visit)
return PagedResult(results=visits, next_page_token=next_page_token)
@timed
@db_transaction(statement_timeout=500)
def origin_visit_find_by_date(
self, origin: str, visit_date: datetime.datetime, *, db: Db, cur=None
) -> Optional[OriginVisit]:
row_d = db.origin_visit_find_by_date(origin, visit_date, cur=cur)
if not row_d:
return None
return OriginVisit(
origin=row_d["origin"],
visit=row_d["visit"],
date=row_d["date"],
type=row_d["type"],
)
@timed
@db_transaction(statement_timeout=500)
def origin_visit_get_by(
self, origin: str, visit: int, *, db: Db, cur=None
) -> Optional[OriginVisit]:
row = db.origin_visit_get(origin, visit, cur)
if row:
row_d = dict(zip(db.origin_visit_get_cols, row))
return OriginVisit(
origin=row_d["origin"],
visit=row_d["visit"],
date=row_d["date"],
type=row_d["type"],
)
return None
@timed
@db_transaction(statement_timeout=4000)
def origin_visit_get_latest(
self,
origin: str,
type: Optional[str] = None,
allowed_statuses: Optional[List[str]] = None,
require_snapshot: bool = False,
*,
db: Db,
cur=None,
) -> Optional[OriginVisit]:
if allowed_statuses and not set(allowed_statuses).intersection(VISIT_STATUSES):
raise StorageArgumentException(
f"Unknown allowed statuses {','.join(allowed_statuses)}, only "
f"{','.join(VISIT_STATUSES)} authorized"
)
row = db.origin_visit_get_latest(
origin,
type=type,
allowed_statuses=allowed_statuses,
require_snapshot=require_snapshot,
cur=cur,
)
if row:
row_d = dict(zip(db.origin_visit_get_cols, row))
visit = OriginVisit(
origin=row_d["origin"],
visit=row_d["visit"],
date=row_d["date"],
type=row_d["type"],
)
return visit
return None
@timed
@db_transaction(statement_timeout=500)
def origin_visit_status_get(
self,
origin: str,
visit: int,
page_token: Optional[str] = None,
order: ListOrder = ListOrder.ASC,
limit: int = 10,
*,
db: Db,
cur=None,
) -> PagedResult[OriginVisitStatus]:
next_page_token = None
date_from = None
if page_token is not None:
date_from = datetime.datetime.fromisoformat(page_token)
visit_statuses: List[OriginVisitStatus] = []
# Take one more visit status so we can reuse it as the next page token if any
for row in db.origin_visit_status_get_range(
origin, visit, date_from=date_from, order=order, limit=limit + 1, cur=cur,
):
row_d = dict(zip(db.origin_visit_status_cols, row))
visit_statuses.append(OriginVisitStatus(**row_d))
if len(visit_statuses) > limit:
# last visit status date is the next page token
next_page_token = str(visit_statuses[-1].date)
# excluding that visit status from the result to respect the limit size
visit_statuses = visit_statuses[:limit]
return PagedResult(results=visit_statuses, next_page_token=next_page_token)
@timed
@db_transaction()
def origin_visit_status_get_random(
self, type: str, *, db: Db, cur=None
) -> Optional[OriginVisitStatus]:
row = db.origin_visit_get_random(type, cur)
if row is not None:
row_d = dict(zip(db.origin_visit_status_cols, row))
return OriginVisitStatus(**row_d)
return None
@timed
@db_transaction(statement_timeout=2000)
def object_find_by_sha1_git(
self, ids: List[Sha1Git], *, db: Db, cur=None
) -> Dict[Sha1Git, List[Dict]]:
ret: Dict[Sha1Git, List[Dict]] = {id: [] for id in ids}
for retval in db.object_find_by_sha1_git(ids, cur=cur):
if retval[1]:
ret[retval[0]].append(
dict(zip(db.object_find_by_sha1_git_cols, retval))
)
return ret
@timed
@db_transaction(statement_timeout=500)
def origin_get(
self, origins: List[str], *, db: Db, cur=None
) -> Iterable[Optional[Origin]]:
rows = db.origin_get_by_url(origins, cur)
result: List[Optional[Origin]] = []
for row in rows:
origin_d = dict(zip(db.origin_cols, row))
url = origin_d["url"]
result.append(None if url is None else Origin(url=url))
return result
@timed
@db_transaction(statement_timeout=500)
def origin_get_by_sha1(
self, sha1s: List[bytes], *, db: Db, cur=None
) -> List[Optional[Dict[str, Any]]]:
return [
dict(zip(db.origin_cols, row)) if row[0] else None
for row in db.origin_get_by_sha1(sha1s, cur)
]
@timed
@db_transaction_generator()
def origin_get_range(self, origin_from=1, origin_count=100, *, db: Db, cur=None):
for origin in db.origin_get_range(origin_from, origin_count, cur):
yield dict(zip(db.origin_get_range_cols, origin))
@timed
@db_transaction()
def origin_list(
self, page_token: Optional[str] = None, limit: int = 100, *, db: Db, cur=None
) -> PagedResult[Origin]:
page_token = page_token or "0"
if not isinstance(page_token, str):
raise StorageArgumentException("page_token must be a string.")
origin_from = int(page_token)
next_page_token = None
origins: List[Origin] = []
# Take one more origin so we can reuse it as the next page token if any
for row_d in self.origin_get_range(origin_from, limit + 1, db=db, cur=cur):
origins.append(Origin(url=row_d["url"]))
# keep the last_id for the pagination if needed
last_id = row_d["id"]
if len(origins) > limit: # data left for subsequent call
# last origin id is the next page token
next_page_token = str(last_id)
# excluding that origin from the result to respect the limit size
origins = origins[:limit]
assert len(origins) <= limit
return PagedResult(results=origins, next_page_token=next_page_token)
@timed
@db_transaction()
def origin_search(
self,
url_pattern: str,
page_token: Optional[str] = None,
limit: int = 50,
regexp: bool = False,
with_visit: bool = False,
visit_types: Optional[List[str]] = None,
*,
db: Db,
cur=None,
) -> PagedResult[Origin]:
next_page_token = None
offset = int(page_token) if page_token else 0
origins = []
# Take one more origin so we can reuse it as the next page token if any
for origin in db.origin_search(
url_pattern, offset, limit + 1, regexp, with_visit, visit_types, cur
):
row_d = dict(zip(db.origin_cols, origin))
origins.append(Origin(url=row_d["url"]))
if len(origins) > limit:
# next offset
next_page_token = str(offset + limit)
# excluding that origin from the result to respect the limit size
origins = origins[:limit]
assert len(origins) <= limit
return PagedResult(results=origins, next_page_token=next_page_token)
@timed
@db_transaction()
def origin_count(
self,
url_pattern: str,
regexp: bool = False,
with_visit: bool = False,
*,
db: Db,
cur=None,
) -> int:
return db.origin_count(url_pattern, regexp, with_visit, cur)
@timed
@process_metrics
@db_transaction()
def origin_add(self, origins: List[Origin], *, db: Db, cur=None) -> Dict[str, int]:
urls = [o.url for o in origins]
known_origins = set(url for (url,) in db.origin_get_by_url(urls, cur))
# keep only one occurrence of each given origin while keeping the list
# sorted as originally given
to_add = sorted(set(urls) - known_origins, key=urls.index)
self.journal_writer.origin_add([Origin(url=url) for url in to_add])
added = 0
for url in to_add:
if db.origin_add(url, cur):
added += 1
return {"origin:add": added}
@db_transaction(statement_timeout=500)
def stat_counters(self, *, db: Db, cur=None):
return {k: v for (k, v) in db.stat_counters()}
@db_transaction()
def refresh_stat_counters(self, *, db: Db, cur=None):
keys = [
"content",
"directory",
"directory_entry_dir",
"directory_entry_file",
"directory_entry_rev",
"origin",
"origin_visit",
"person",
"release",
"revision",
"revision_history",
"skipped_content",
"snapshot",
]
for key in keys:
cur.execute("select * from swh_update_counter(%s)", (key,))
@timed
@process_metrics
@db_transaction()
def raw_extrinsic_metadata_add(
self, metadata: List[RawExtrinsicMetadata], db, cur,
) -> Dict[str, int]:
metadata = list(metadata)
self.journal_writer.raw_extrinsic_metadata_add(metadata)
counter = Counter[ExtendedObjectType]()
for metadata_entry in metadata:
authority_id = self._get_authority_id(metadata_entry.authority, db, cur)
fetcher_id = self._get_fetcher_id(metadata_entry.fetcher, db, cur)
db.raw_extrinsic_metadata_add(
id=metadata_entry.id,
type=metadata_entry.target.object_type.name.lower(),
target=str(metadata_entry.target),
discovery_date=metadata_entry.discovery_date,
authority_id=authority_id,
fetcher_id=fetcher_id,
format=metadata_entry.format,
metadata=metadata_entry.metadata,
origin=metadata_entry.origin,
visit=metadata_entry.visit,
snapshot=map_optional(str, metadata_entry.snapshot),
release=map_optional(str, metadata_entry.release),
revision=map_optional(str, metadata_entry.revision),
path=metadata_entry.path,
directory=map_optional(str, metadata_entry.directory),
cur=cur,
)
counter[metadata_entry.target.object_type] += 1
return {
f"{type.value}_metadata:add": count for (type, count) in counter.items()
}
@db_transaction()
def raw_extrinsic_metadata_get(
self,
target: ExtendedSWHID,
authority: MetadataAuthority,
after: Optional[datetime.datetime] = None,
page_token: Optional[bytes] = None,
limit: int = 1000,
*,
db: Db,
cur=None,
) -> PagedResult[RawExtrinsicMetadata]:
if page_token:
(after_time, after_fetcher) = msgpack_loads(base64.b64decode(page_token))
if after and after_time < after:
raise StorageArgumentException(
"page_token is inconsistent with the value of 'after'."
)
else:
after_time = after
after_fetcher = None
authority_id = self._get_authority_id(authority, db, cur)
if not authority_id:
return PagedResult(next_page_token=None, results=[],)
rows = db.raw_extrinsic_metadata_get(
str(target), authority_id, after_time, after_fetcher, limit + 1, cur,
)
rows = [dict(zip(db.raw_extrinsic_metadata_get_cols, row)) for row in rows]
results = []
for row in rows:
assert str(target) == row["raw_extrinsic_metadata.target"]
results.append(converters.db_to_raw_extrinsic_metadata(row))
if len(results) > limit:
results.pop()
assert len(results) == limit
last_returned_row = rows[-2] # rows[-1] corresponds to the popped result
next_page_token: Optional[str] = base64.b64encode(
msgpack_dumps(
(
last_returned_row["discovery_date"],
last_returned_row["metadata_fetcher.id"],
)
)
).decode()
else:
next_page_token = None
return PagedResult(next_page_token=next_page_token, results=results,)
@db_transaction()
def raw_extrinsic_metadata_get_by_ids(
self, ids: List[Sha1Git], *, db: Db, cur=None,
) -> List[RawExtrinsicMetadata]:
return [
converters.db_to_raw_extrinsic_metadata(
dict(zip(db.raw_extrinsic_metadata_get_cols, row))
)
for row in db.raw_extrinsic_metadata_get_by_ids(ids)
]
@db_transaction()
def raw_extrinsic_metadata_get_authorities(
self, target: ExtendedSWHID, *, db: Db, cur=None,
) -> List[MetadataAuthority]:
return [
MetadataAuthority(
type=MetadataAuthorityType(authority_type), url=authority_url
)
for (
authority_type,
authority_url,
) in db.raw_extrinsic_metadata_get_authorities(str(target), cur)
]
@timed
@process_metrics
@db_transaction()
def metadata_fetcher_add(
self, fetchers: List[MetadataFetcher], *, db: Db, cur=None
) -> Dict[str, int]:
fetchers = list(fetchers)
self.journal_writer.metadata_fetcher_add(fetchers)
count = 0
for fetcher in fetchers:
db.metadata_fetcher_add(fetcher.name, fetcher.version, cur=cur)
count += 1
return {"metadata_fetcher:add": count}
@timed
@db_transaction(statement_timeout=500)
def metadata_fetcher_get(
self, name: str, version: str, *, db: Db, cur=None
) -> Optional[MetadataFetcher]:
row = db.metadata_fetcher_get(name, version, cur=cur)
if not row:
return None
return MetadataFetcher.from_dict(dict(zip(db.metadata_fetcher_cols, row)))
@timed
@process_metrics
@db_transaction()
def metadata_authority_add(
self, authorities: List[MetadataAuthority], *, db: Db, cur=None
) -> Dict[str, int]:
authorities = list(authorities)
self.journal_writer.metadata_authority_add(authorities)
count = 0
for authority in authorities:
db.metadata_authority_add(authority.type.value, authority.url, cur=cur)
count += 1
return {"metadata_authority:add": count}
@timed
@db_transaction()
def metadata_authority_get(
self, type: MetadataAuthorityType, url: str, *, db: Db, cur=None
) -> Optional[MetadataAuthority]:
row = db.metadata_authority_get(type.value, url, cur=cur)
if not row:
return None
return MetadataAuthority.from_dict(dict(zip(db.metadata_authority_cols, row)))
def clear_buffers(self, object_types: Sequence[str] = ()) -> None:
"""Do nothing
"""
return None
def flush(self, object_types: Sequence[str] = ()) -> Dict[str, int]:
return {}
def _get_authority_id(self, authority: MetadataAuthority, db, cur):
authority_id = db.metadata_authority_get_id(
authority.type.value, authority.url, cur
)
if not authority_id:
raise StorageArgumentException(f"Unknown authority {authority}")
return authority_id
def _get_fetcher_id(self, fetcher: MetadataFetcher, db, cur):
fetcher_id = db.metadata_fetcher_get_id(fetcher.name, fetcher.version, cur)
if not fetcher_id:
raise StorageArgumentException(f"Unknown fetcher {fetcher}")
return fetcher_id
diff --git a/swh/storage/sql/30-schema.sql b/swh/storage/sql/30-schema.sql
index 5f801e58..713ee28c 100644
--- a/swh/storage/sql/30-schema.sql
+++ b/swh/storage/sql/30-schema.sql
@@ -1,515 +1,517 @@
---
--- SQL implementation of the Software Heritage data model
---
-- schema versions
create table dbversion
(
version int primary key,
release timestamptz,
description text
);
comment on table dbversion is 'Details of current db version';
comment on column dbversion.version is 'SQL schema version';
comment on column dbversion.release is 'Version deployment timestamp';
comment on column dbversion.description is 'Release description';
-- latest schema version
insert into dbversion(version, release, description)
- values(175, now(), 'Work In Progress');
+ values(176, now(), 'Work In Progress');
-- a SHA1 checksum
create domain sha1 as bytea check (length(value) = 20);
-- a Git object ID, i.e., a Git-style salted SHA1 checksum
create domain sha1_git as bytea check (length(value) = 20);
-- a SHA256 checksum
create domain sha256 as bytea check (length(value) = 32);
-- a blake2 checksum
create domain blake2s256 as bytea check (length(value) = 32);
-- UNIX path (absolute, relative, individual path component, etc.)
create domain unix_path as bytea;
-- a set of UNIX-like access permissions, as manipulated by, e.g., chmod
create domain file_perms as int;
-- an SWHID
create domain swhid as text check (value ~ '^swh:[0-9]+:.*');
-- Checksums about actual file content. Note that the content itself is not
-- stored in the DB, but on external (key-value) storage. A single checksum is
-- used as key there, but the other can be used to verify that we do not inject
-- content collisions not knowingly.
create table content
(
sha1 sha1 not null,
sha1_git sha1_git not null,
sha256 sha256 not null,
blake2s256 blake2s256 not null,
length bigint not null,
ctime timestamptz not null default now(),
-- creation time, i.e. time of (first) injection into the storage
status content_status not null default 'visible',
object_id bigserial
);
comment on table content is 'Checksums of file content which is actually stored externally';
comment on column content.sha1 is 'Content sha1 hash';
comment on column content.sha1_git is 'Git object sha1 hash';
comment on column content.sha256 is 'Content Sha256 hash';
comment on column content.blake2s256 is 'Content blake2s hash';
comment on column content.length is 'Content length';
comment on column content.ctime is 'First seen time';
comment on column content.status is 'Content status (absent, visible, hidden)';
comment on column content.object_id is 'Content identifier';
-- An origin is a place, identified by an URL, where software source code
-- artifacts can be found. We support different kinds of origins, e.g., git and
-- other VCS repositories, web pages that list tarballs URLs (e.g.,
-- http://www.kernel.org), indirect tarball URLs (e.g.,
-- http://www.example.org/latest.tar.gz), etc. The key feature of an origin is
-- that it can be *fetched* from (wget, git clone, svn checkout, etc.) to
-- retrieve all the contained software.
create table origin
(
id bigserial not null,
url text not null
);
comment on column origin.id is 'Artifact origin id';
comment on column origin.url is 'URL of origin';
-- Content blobs observed somewhere, but not ingested into the archive for
-- whatever reason. This table is separate from the content table as we might
-- not have the sha1 checksum of skipped contents (for instance when we inject
-- git repositories, objects that are too big will be skipped here, and we will
-- only know their sha1_git). 'reason' contains the reason the content was
-- skipped. origin is a nullable column allowing to find out which origin
-- contains that skipped content.
create table skipped_content
(
sha1 sha1,
sha1_git sha1_git,
sha256 sha256,
blake2s256 blake2s256,
length bigint not null,
ctime timestamptz not null default now(),
status content_status not null default 'absent',
reason text not null,
origin bigint,
object_id bigserial
);
comment on table skipped_content is 'Content blobs observed, but not ingested in the archive';
comment on column skipped_content.sha1 is 'Skipped content sha1 hash';
comment on column skipped_content.sha1_git is 'Git object sha1 hash';
comment on column skipped_content.sha256 is 'Skipped content sha256 hash';
comment on column skipped_content.blake2s256 is 'Skipped content blake2s hash';
comment on column skipped_content.length is 'Skipped content length';
comment on column skipped_content.ctime is 'First seen time';
comment on column skipped_content.status is 'Skipped content status (absent, visible, hidden)';
comment on column skipped_content.reason is 'Reason for skipping';
comment on column skipped_content.origin is 'Origin table identifier';
comment on column skipped_content.object_id is 'Skipped content identifier';
-- A file-system directory. A directory is a list of directory entries (see
-- tables: directory_entry_{dir,file}).
--
-- To list the contents of a directory:
-- 1. list the contained directory_entry_dir using array dir_entries
-- 2. list the contained directory_entry_file using array file_entries
-- 3. list the contained directory_entry_rev using array rev_entries
-- 4. UNION
--
-- Synonyms/mappings:
-- * git: tree
create table directory
(
id sha1_git not null,
dir_entries bigint[], -- sub-directories, reference directory_entry_dir
file_entries bigint[], -- contained files, reference directory_entry_file
rev_entries bigint[], -- mounted revisions, reference directory_entry_rev
object_id bigserial -- short object identifier
);
comment on table directory is 'Contents of a directory, synonymous to tree (git)';
comment on column directory.id is 'Git object sha1 hash';
comment on column directory.dir_entries is 'Sub-directories, reference directory_entry_dir';
comment on column directory.file_entries is 'Contained files, reference directory_entry_file';
comment on column directory.rev_entries is 'Mounted revisions, reference directory_entry_rev';
comment on column directory.object_id is 'Short object identifier';
-- A directory entry pointing to a (sub-)directory.
create table directory_entry_dir
(
id bigserial,
target sha1_git not null, -- id of target directory
name unix_path not null, -- path name, relative to containing dir
perms file_perms not null -- unix-like permissions
);
comment on table directory_entry_dir is 'Directory entry for directory';
comment on column directory_entry_dir.id is 'Directory identifier';
comment on column directory_entry_dir.target is 'Target directory identifier';
comment on column directory_entry_dir.name is 'Path name, relative to containing directory';
comment on column directory_entry_dir.perms is 'Unix-like permissions';
-- A directory entry pointing to a file content.
create table directory_entry_file
(
id bigserial,
target sha1_git not null, -- id of target file
name unix_path not null, -- path name, relative to containing dir
perms file_perms not null -- unix-like permissions
);
comment on table directory_entry_file is 'Directory entry for file';
comment on column directory_entry_file.id is 'File identifier';
comment on column directory_entry_file.target is 'Target file identifier';
comment on column directory_entry_file.name is 'Path name, relative to containing directory';
comment on column directory_entry_file.perms is 'Unix-like permissions';
-- A directory entry pointing to a revision.
create table directory_entry_rev
(
id bigserial,
target sha1_git not null, -- id of target revision
name unix_path not null, -- path name, relative to containing dir
perms file_perms not null -- unix-like permissions
);
comment on table directory_entry_rev is 'Directory entry for revision';
comment on column directory_entry_dir.id is 'Revision identifier';
comment on column directory_entry_dir.target is 'Target revision in identifier';
comment on column directory_entry_dir.name is 'Path name, relative to containing directory';
comment on column directory_entry_dir.perms is 'Unix-like permissions';
-- A person referenced by some source code artifacts, e.g., a VCS revision or
-- release metadata.
create table person
(
id bigserial,
name bytea, -- advisory: not null if we managed to parse a name
email bytea, -- advisory: not null if we managed to parse an email
fullname bytea not null -- freeform specification; what is actually used in the checksums
-- will usually be of the form 'name '
);
comment on table person is 'Person referenced in code artifact release metadata';
comment on column person.id is 'Person identifier';
comment on column person.name is 'Name';
comment on column person.email is 'Email';
comment on column person.fullname is 'Full name (raw name)';
-- The state of a source code tree at a specific point in time.
--
-- Synonyms/mappings:
-- * git / subversion / etc: commit
-- * tarball: a specific tarball
--
-- Revisions are organized as DAGs. Each revision points to 0, 1, or more (in
-- case of merges) parent revisions. Each revision points to a directory, i.e.,
-- a file-system tree containing files and directories.
create table revision
(
id sha1_git not null,
date timestamptz,
date_offset smallint,
committer_date timestamptz,
committer_date_offset smallint,
type revision_type not null,
directory sha1_git, -- source code 'root' directory
message bytea,
author bigint,
committer bigint,
synthetic boolean not null default false, -- true iff revision has been created by Software Heritage
metadata jsonb, -- extra metadata (tarball checksums, extra commit information, etc...)
object_id bigserial,
date_neg_utc_offset boolean,
committer_date_neg_utc_offset boolean,
extra_headers bytea[][] not null -- extra headers (used in hash computation)
);
comment on table revision is 'A revision represents the state of a source code tree at a specific point in time';
comment on column revision.id is 'Git-style SHA1 commit identifier';
comment on column revision.date is 'Author timestamp as UNIX epoch';
comment on column revision.date_offset is 'Author timestamp timezone, as minute offsets from UTC';
comment on column revision.date_neg_utc_offset is 'True indicates a -0 UTC offset on author timestamp';
comment on column revision.committer_date is 'Committer timestamp as UNIX epoch';
comment on column revision.committer_date_offset is 'Committer timestamp timezone, as minute offsets from UTC';
comment on column revision.committer_date_neg_utc_offset is 'True indicates a -0 UTC offset on committer timestamp';
comment on column revision.type is 'Type of revision';
comment on column revision.directory is 'Directory identifier';
comment on column revision.message is 'Commit message';
comment on column revision.author is 'Author identity';
comment on column revision.committer is 'Committer identity';
comment on column revision.synthetic is 'True iff revision has been synthesized by Software Heritage';
comment on column revision.metadata is 'Extra revision metadata';
comment on column revision.object_id is 'Non-intrinsic, sequential object identifier';
comment on column revision.extra_headers is 'Extra revision headers; used in revision hash computation';
-- either this table or the sha1_git[] column on the revision table
create table revision_history
(
id sha1_git not null,
parent_id sha1_git not null,
parent_rank int not null default 0
-- parent position in merge commits, 0-based
);
comment on table revision_history is 'Sequence of revision history with parent and position in history';
comment on column revision_history.id is 'Revision history git object sha1 checksum';
comment on column revision_history.parent_id is 'Parent revision git object identifier';
comment on column revision_history.parent_rank is 'Parent position in merge commits, 0-based';
-- Crawling history of software origins visited by Software Heritage. Each
-- visit is a 3-way mapping between a software origin, a timestamp, and a
-- snapshot object capturing the full-state of the origin at visit time.
create table origin_visit
(
origin bigint not null,
visit bigint not null,
date timestamptz not null,
type text not null
);
comment on column origin_visit.origin is 'Visited origin';
comment on column origin_visit.visit is 'Sequential visit number for the origin';
comment on column origin_visit.date is 'Visit timestamp';
comment on column origin_visit.type is 'Type of loader that did the visit (hg, git, ...)';
-- Crawling history of software origin visits by Software Heritage. Each
-- visit see its history change through new origin visit status updates
create table origin_visit_status
(
origin bigint not null,
visit bigint not null,
date timestamptz not null,
type text not null,
status origin_visit_state not null,
metadata jsonb,
snapshot sha1_git
);
comment on column origin_visit_status.origin is 'Origin concerned by the visit update';
comment on column origin_visit_status.visit is 'Visit concerned by the visit update';
comment on column origin_visit_status.date is 'Visit update timestamp';
comment on column origin_visit_status.type is 'Type of loader that did the visit (hg, git, ...)';
comment on column origin_visit_status.status is 'Visit status (ongoing, failed, full)';
comment on column origin_visit_status.metadata is 'Optional origin visit metadata';
comment on column origin_visit_status.snapshot is 'Optional, possibly partial, snapshot of the origin visit. It can be partial.';
-- A snapshot represents the entire state of a software origin as crawled by
-- Software Heritage. This table is a simple mapping between (public) intrinsic
-- snapshot identifiers and (private) numeric sequential identifiers.
create table snapshot
(
object_id bigserial not null, -- PK internal object identifier
id sha1_git not null -- snapshot intrinsic identifier
);
comment on table snapshot is 'State of a software origin as crawled by Software Heritage';
comment on column snapshot.object_id is 'Internal object identifier';
comment on column snapshot.id is 'Intrinsic snapshot identifier';
-- Each snapshot associate "branch" names to other objects in the Software
-- Heritage Merkle DAG. This table describes branches as mappings between names
-- and target typed objects.
create table snapshot_branch
(
object_id bigserial not null, -- PK internal object identifier
name bytea not null, -- branch name, e.g., "master" or "feature/drag-n-drop"
target bytea, -- target object identifier, e.g., a revision identifier
target_type snapshot_target -- target object type, e.g., "revision"
);
comment on table snapshot_branch is 'Associates branches with objects in Heritage Merkle DAG';
comment on column snapshot_branch.object_id is 'Internal object identifier';
comment on column snapshot_branch.name is 'Branch name';
comment on column snapshot_branch.target is 'Target object identifier';
comment on column snapshot_branch.target_type is 'Target object type';
-- Mapping between snapshots and their branches.
create table snapshot_branches
(
snapshot_id bigint not null, -- snapshot identifier, ref. snapshot.object_id
branch_id bigint not null -- branch identifier, ref. snapshot_branch.object_id
);
comment on table snapshot_branches is 'Mapping between snapshot and their branches';
comment on column snapshot_branches.snapshot_id is 'Snapshot identifier';
comment on column snapshot_branches.branch_id is 'Branch identifier';
-- A "memorable" point in time in the development history of a software
-- project.
--
-- Synonyms/mappings:
-- * git: tag (of the annotated kind, otherwise they are just references)
-- * tarball: the release version number
create table release
(
id sha1_git not null,
target sha1_git,
date timestamptz,
date_offset smallint,
name bytea,
comment bytea,
author bigint,
synthetic boolean not null default false, -- true iff release has been created by Software Heritage
object_id bigserial,
target_type object_type not null,
date_neg_utc_offset boolean
);
comment on table release is 'Details of a software release, synonymous with
a tag (git) or version number (tarball)';
comment on column release.id is 'Release git identifier';
comment on column release.target is 'Target git identifier';
comment on column release.date is 'Release timestamp';
comment on column release.date_offset is 'Timestamp offset from UTC';
comment on column release.name is 'Name';
comment on column release.comment is 'Comment';
comment on column release.author is 'Author';
comment on column release.synthetic is 'Indicates if created by Software Heritage';
comment on column release.object_id is 'Object identifier';
comment on column release.target_type is 'Object type (''content'', ''directory'', ''revision'',
''release'', ''snapshot'')';
comment on column release.date_neg_utc_offset is 'True indicates -0 UTC offset for release timestamp';
-- Tools
create table metadata_fetcher
(
id serial not null,
name text not null,
version text not null
);
comment on table metadata_fetcher is 'Tools used to retrieve metadata';
comment on column metadata_fetcher.id is 'Internal identifier of the fetcher';
comment on column metadata_fetcher.name is 'Fetcher name';
comment on column metadata_fetcher.version is 'Fetcher version';
create table metadata_authority
(
id serial not null,
type text not null,
url text not null
);
comment on table metadata_authority is 'Metadata authority information';
comment on column metadata_authority.id is 'Internal identifier of the authority';
comment on column metadata_authority.type is 'Type of authority (deposit_client/forge/registry)';
comment on column metadata_authority.url is 'Authority''s uri';
-- Extrinsic metadata on a DAG objects and origins.
create table raw_extrinsic_metadata
(
id sha1_git not null,
type text not null,
target text not null,
-- metadata source
authority_id bigint not null,
fetcher_id bigint not null,
discovery_date timestamptz not null,
-- metadata itself
format text not null,
metadata bytea not null,
-- context
origin text,
visit bigint,
snapshot swhid,
release swhid,
revision swhid,
path bytea,
directory swhid
);
comment on table raw_extrinsic_metadata is 'keeps all metadata found concerning an object';
comment on column raw_extrinsic_metadata.type is 'the type of object (content/directory/revision/release/snapshot/origin) the metadata is on';
comment on column raw_extrinsic_metadata.target is 'the SWHID or origin URL for which the metadata was found';
comment on column raw_extrinsic_metadata.discovery_date is 'the date of retrieval';
comment on column raw_extrinsic_metadata.authority_id is 'the metadata provider: github, openhub, deposit, etc.';
comment on column raw_extrinsic_metadata.fetcher_id is 'the tool used for extracting metadata: loaders, crawlers, etc.';
comment on column raw_extrinsic_metadata.format is 'name of the format of metadata, used by readers to interpret it.';
comment on column raw_extrinsic_metadata.metadata is 'original metadata in opaque format';
-- Keep a cache of object counts
create table object_counts
(
object_type text, -- table for which we're counting objects (PK)
value bigint, -- count of objects in the table
last_update timestamptz, -- last update for the object count in this table
single_update boolean -- whether we update this table standalone (true) or through bucketed counts (false)
);
comment on table object_counts is 'Cache of object counts';
comment on column object_counts.object_type is 'Object type (''content'', ''directory'', ''revision'',
''release'', ''snapshot'')';
comment on column object_counts.value is 'Count of objects in the table';
comment on column object_counts.last_update is 'Last update for object count';
comment on column object_counts.single_update is 'standalone (true) or bucketed counts (false)';
create table object_counts_bucketed
(
line serial not null, -- PK
object_type text not null, -- table for which we're counting objects
identifier text not null, -- identifier across which we're bucketing objects
bucket_start bytea, -- lower bound (inclusive) for the bucket
bucket_end bytea, -- upper bound (exclusive) for the bucket
value bigint, -- count of objects in the bucket
last_update timestamptz -- last update for the object count in this bucket
);
comment on table object_counts_bucketed is 'Bucketed count for objects ordered by type';
comment on column object_counts_bucketed.line is 'Auto incremented idenitfier value';
comment on column object_counts_bucketed.object_type is 'Object type (''content'', ''directory'', ''revision'',
''release'', ''snapshot'')';
comment on column object_counts_bucketed.identifier is 'Common identifier for bucketed objects';
comment on column object_counts_bucketed.bucket_start is 'Lower bound (inclusive) for the bucket';
comment on column object_counts_bucketed.bucket_end is 'Upper bound (exclusive) for the bucket';
comment on column object_counts_bucketed.value is 'Count of objects in the bucket';
comment on column object_counts_bucketed.last_update is 'Last update for the object count in this bucket';
-- The ExtID (typ. original VCS) <-> swhid relation table
create table extid
(
extid_type text not null,
extid bytea not null,
target_type object_type not null,
- target sha1_git not null
+ target sha1_git not null,
+ extid_version bigint not null default 0
);
comment on table extid is 'Correspondance SWH object (SWHID) <-> original revision id (vcs id)';
comment on column extid.extid_type is 'ExtID type';
comment on column extid.extid is 'Intrinsic identifier of the object (e.g. hg revision)';
comment on column extid.target_type is 'Type of SWHID of the referenced SWH object';
comment on column extid.target is 'Value (hash) of SWHID of the refenced SWH object';
+comment on column extid.extid_version is 'Version of the extid for the given original object';
diff --git a/swh/storage/sql/40-funcs.sql b/swh/storage/sql/40-funcs.sql
index 4d1a621e..184cf78c 100644
--- a/swh/storage/sql/40-funcs.sql
+++ b/swh/storage/sql/40-funcs.sql
@@ -1,1011 +1,1011 @@
create or replace function hash_sha1(text)
returns text
as $$
select encode(digest($1, 'sha1'), 'hex')
$$ language sql strict immutable;
comment on function hash_sha1(text) is 'Compute SHA1 hash as text';
-- create a temporary table called tmp_TBLNAME, mimicking existing table
-- TBLNAME
--
-- Args:
-- tblname: name of the table to mimic
create or replace function swh_mktemp(tblname regclass)
returns void
language plpgsql
as $$
begin
execute format('
create temporary table if not exists tmp_%1$I
(like %1$I including defaults)
on commit delete rows;
alter table tmp_%1$I drop column if exists object_id;
', tblname);
return;
end
$$;
-- create a temporary table for directory entries called tmp_TBLNAME,
-- mimicking existing table TBLNAME with an extra dir_id (sha1_git)
-- column, and dropping the id column.
--
-- This is used to create the tmp_directory_entry_ tables.
--
-- Args:
-- tblname: name of the table to mimic
create or replace function swh_mktemp_dir_entry(tblname regclass)
returns void
language plpgsql
as $$
begin
execute format('
create temporary table if not exists tmp_%1$I
(like %1$I including defaults, dir_id sha1_git)
on commit delete rows;
alter table tmp_%1$I drop column if exists id;
', tblname);
return;
end
$$;
-- create a temporary table for revisions called tmp_revisions,
-- mimicking existing table revision, replacing the foreign keys to
-- people with an email and name field
--
create or replace function swh_mktemp_revision()
returns void
language sql
as $$
create temporary table if not exists tmp_revision (
like revision including defaults,
author_fullname bytea,
author_name bytea,
author_email bytea,
committer_fullname bytea,
committer_name bytea,
committer_email bytea
) on commit delete rows;
alter table tmp_revision drop column if exists author;
alter table tmp_revision drop column if exists committer;
alter table tmp_revision drop column if exists object_id;
$$;
-- create a temporary table for releases called tmp_release,
-- mimicking existing table release, replacing the foreign keys to
-- people with an email and name field
--
create or replace function swh_mktemp_release()
returns void
language sql
as $$
create temporary table if not exists tmp_release (
like release including defaults,
author_fullname bytea,
author_name bytea,
author_email bytea
) on commit delete rows;
alter table tmp_release drop column if exists author;
alter table tmp_release drop column if exists object_id;
$$;
-- create a temporary table for the branches of a snapshot
create or replace function swh_mktemp_snapshot_branch()
returns void
language sql
as $$
create temporary table if not exists tmp_snapshot_branch (
name bytea not null,
target bytea,
target_type snapshot_target
) on commit delete rows;
$$;
-- a content signature is a set of cryptographic checksums that we use to
-- uniquely identify content, for the purpose of verifying if we already have
-- some content or not during content injection
create type content_signature as (
sha1 sha1,
sha1_git sha1_git,
sha256 sha256,
blake2s256 blake2s256
);
-- check which entries of tmp_skipped_content are missing from skipped_content
--
-- operates in bulk: 0. swh_mktemp(skipped_content), 1. COPY to tmp_skipped_content,
-- 2. call this function
create or replace function swh_skipped_content_missing()
returns setof content_signature
language plpgsql
as $$
begin
return query
select sha1, sha1_git, sha256, blake2s256 from tmp_skipped_content t
where not exists
(select 1 from skipped_content s where
s.sha1 is not distinct from t.sha1 and
s.sha1_git is not distinct from t.sha1_git and
s.sha256 is not distinct from t.sha256);
return;
end
$$;
-- add tmp_content entries to content, skipping duplicates
--
-- operates in bulk: 0. swh_mktemp(content), 1. COPY to tmp_content,
-- 2. call this function
create or replace function swh_content_add()
returns void
language plpgsql
as $$
begin
insert into content (sha1, sha1_git, sha256, blake2s256, length, status, ctime)
select distinct sha1, sha1_git, sha256, blake2s256, length, status, ctime from tmp_content;
return;
end
$$;
-- add tmp_skipped_content entries to skipped_content, skipping duplicates
--
-- operates in bulk: 0. swh_mktemp(skipped_content), 1. COPY to tmp_skipped_content,
-- 2. call this function
create or replace function swh_skipped_content_add()
returns void
language plpgsql
as $$
begin
insert into skipped_content (sha1, sha1_git, sha256, blake2s256, length, status, reason, origin)
select distinct sha1, sha1_git, sha256, blake2s256, length, status, reason, origin
from tmp_skipped_content
where (coalesce(sha1, ''), coalesce(sha1_git, ''), coalesce(sha256, '')) in (
select coalesce(sha1, ''), coalesce(sha1_git, ''), coalesce(sha256, '')
from swh_skipped_content_missing()
);
-- TODO XXX use postgres 9.5 "UPSERT" support here, when available.
-- Specifically, using "INSERT .. ON CONFLICT IGNORE" we can avoid
-- the extra swh_skipped_content_missing() query here.
return;
end
$$;
-- Update content entries from temporary table.
-- (columns are potential new columns added to the schema, this cannot be empty)
--
create or replace function swh_content_update(columns_update text[])
returns void
language plpgsql
as $$
declare
query text;
tmp_array text[];
begin
if array_length(columns_update, 1) = 0 then
raise exception 'Please, provide the list of column names to update.';
end if;
tmp_array := array(select format('%1$s=t.%1$s', unnest) from unnest(columns_update));
query = format('update content set %s
from tmp_content t where t.sha1 = content.sha1',
array_to_string(tmp_array, ', '));
execute query;
return;
end
$$;
comment on function swh_content_update(text[]) IS 'Update existing content''s columns';
create type directory_entry_type as enum('file', 'dir', 'rev');
-- Add tmp_directory_entry_* entries to directory_entry_* and directory,
-- skipping duplicates in directory_entry_*. This is a generic function that
-- works on all kind of directory entries.
--
-- operates in bulk: 0. swh_mktemp_dir_entry('directory_entry_*'), 1 COPY to
-- tmp_directory_entry_*, 2. call this function
--
-- Assumption: this function is used in the same transaction that inserts the
-- context directory in table "directory".
create or replace function swh_directory_entry_add(typ directory_entry_type)
returns void
language plpgsql
as $$
begin
execute format('
insert into directory_entry_%1$s (target, name, perms)
select distinct t.target, t.name, t.perms
from tmp_directory_entry_%1$s t
where not exists (
select 1
from directory_entry_%1$s i
where t.target = i.target and t.name = i.name and t.perms = i.perms)
', typ);
execute format('
with new_entries as (
select t.dir_id, array_agg(i.id) as entries
from tmp_directory_entry_%1$s t
inner join directory_entry_%1$s i
using (target, name, perms)
group by t.dir_id
)
update tmp_directory as d
set %1$s_entries = new_entries.entries
from new_entries
where d.id = new_entries.dir_id
', typ);
return;
end
$$;
-- Insert the data from tmp_directory, tmp_directory_entry_file,
-- tmp_directory_entry_dir, tmp_directory_entry_rev into their final
-- tables.
--
-- Prerequisites:
-- directory ids in tmp_directory
-- entries in tmp_directory_entry_{file,dir,rev}
--
create or replace function swh_directory_add()
returns void
language plpgsql
as $$
begin
perform swh_directory_entry_add('file');
perform swh_directory_entry_add('dir');
perform swh_directory_entry_add('rev');
insert into directory
select * from tmp_directory t
where not exists (
select 1 from directory d
where d.id = t.id);
return;
end
$$;
-- a directory listing entry with all the metadata
--
-- can be used to list a directory, and retrieve all the data in one go.
create type directory_entry as
(
dir_id sha1_git, -- id of the parent directory
type directory_entry_type, -- type of entry
target sha1_git, -- id of target
name unix_path, -- path name, relative to containing dir
perms file_perms, -- unix-like permissions
status content_status, -- visible or absent
sha1 sha1, -- content if sha1 if type is not dir
sha1_git sha1_git, -- content's sha1 git if type is not dir
sha256 sha256, -- content's sha256 if type is not dir
length bigint -- content length if type is not dir
);
-- List a single level of directory walked_dir_id
-- FIXME: order by name is not correct. For git, we need to order by
-- lexicographic order but as if a trailing / is present in directory
-- name
create or replace function swh_directory_walk_one(walked_dir_id sha1_git)
returns setof directory_entry
language sql
stable
as $$
with dir as (
select id as dir_id, dir_entries, file_entries, rev_entries
from directory
where id = walked_dir_id),
ls_d as (select dir_id, unnest(dir_entries) as entry_id from dir),
ls_f as (select dir_id, unnest(file_entries) as entry_id from dir),
ls_r as (select dir_id, unnest(rev_entries) as entry_id from dir)
(select dir_id, 'dir'::directory_entry_type as type,
e.target, e.name, e.perms, NULL::content_status,
NULL::sha1, NULL::sha1_git, NULL::sha256, NULL::bigint
from ls_d
left join directory_entry_dir e on ls_d.entry_id = e.id)
union
(with known_contents as
(select dir_id, 'file'::directory_entry_type as type,
e.target, e.name, e.perms, c.status,
c.sha1, c.sha1_git, c.sha256, c.length
from ls_f
left join directory_entry_file e on ls_f.entry_id = e.id
inner join content c on e.target = c.sha1_git)
select * from known_contents
union
(select dir_id, 'file'::directory_entry_type as type,
e.target, e.name, e.perms, c.status,
c.sha1, c.sha1_git, c.sha256, c.length
from ls_f
left join directory_entry_file e on ls_f.entry_id = e.id
left join skipped_content c on e.target = c.sha1_git
where not exists (select 1 from known_contents where known_contents.sha1_git=e.target)))
union
(select dir_id, 'rev'::directory_entry_type as type,
e.target, e.name, e.perms, NULL::content_status,
NULL::sha1, NULL::sha1_git, NULL::sha256, NULL::bigint
from ls_r
left join directory_entry_rev e on ls_r.entry_id = e.id)
order by name;
$$;
-- List recursively the revision directory arborescence
create or replace function swh_directory_walk(walked_dir_id sha1_git)
returns setof directory_entry
language sql
stable
as $$
with recursive entries as (
select dir_id, type, target, name, perms, status, sha1, sha1_git,
sha256, length
from swh_directory_walk_one(walked_dir_id)
union all
select dir_id, type, target, (dirname || '/' || name)::unix_path as name,
perms, status, sha1, sha1_git, sha256, length
from (select (swh_directory_walk_one(dirs.target)).*, dirs.name as dirname
from (select target, name from entries where type = 'dir') as dirs) as with_parent
)
select dir_id, type, target, name, perms, status, sha1, sha1_git, sha256, length
from entries
$$;
-- Find a directory entry by its path
create or replace function swh_find_directory_entry_by_path(
walked_dir_id sha1_git,
dir_or_content_path bytea[])
returns directory_entry
language plpgsql
as $$
declare
end_index integer;
paths bytea default '';
path bytea;
res bytea[];
r record;
begin
end_index := array_upper(dir_or_content_path, 1);
res[1] := walked_dir_id;
for i in 1..end_index
loop
path := dir_or_content_path[i];
-- concatenate path for patching the name in the result record (if we found it)
if i = 1 then
paths = path;
else
paths := paths || '/' || path; -- concatenate paths
end if;
if i <> end_index then
select *
from swh_directory_walk_one(res[i] :: sha1_git)
where name=path
and type = 'dir'
limit 1 into r;
else
select *
from swh_directory_walk_one(res[i] :: sha1_git)
where name=path
limit 1 into r;
end if;
-- find the path
if r is null then
return null;
else
-- store the next dir to lookup the next local path from
res[i+1] := r.target;
end if;
end loop;
-- at this moment, r is the result. Patch its 'name' with the full path before returning it.
r.name := paths;
return r;
end
$$;
-- Returns the entries in a directory, without joining with their target tables
create or replace function swh_directory_get_entries(dir_id sha1_git)
returns table (
dir_id directory_entry_type, target sha1_git, name unix_path, perms file_perms
)
language sql
stable
as $$
with dir as (
select id as dir_id, dir_entries, file_entries, rev_entries
from directory
where id = dir_id),
ls_d as (select dir_id, unnest(dir_entries) as entry_id from dir),
ls_f as (select dir_id, unnest(file_entries) as entry_id from dir),
ls_r as (select dir_id, unnest(rev_entries) as entry_id from dir)
(select 'dir'::directory_entry_type, e.target, e.name, e.perms
from ls_d
left join directory_entry_dir e on ls_d.entry_id = e.id)
union
(select 'file'::directory_entry_type, e.target, e.name, e.perms
from ls_f
left join directory_entry_file e on ls_f.entry_id = e.id)
union
(select 'rev'::directory_entry_type, e.target, e.name, e.perms
from ls_r
left join directory_entry_rev e on ls_r.entry_id = e.id)
$$;
-- List all revision IDs starting from a given revision, going back in time
--
-- TODO ordering: should be breadth-first right now (what do we want?)
-- TODO ordering: ORDER BY parent_rank somewhere?
create or replace function swh_revision_list(root_revisions bytea[], num_revs bigint default NULL)
returns table (id sha1_git, parents bytea[])
language sql
stable
as $$
with recursive full_rev_list(id) as (
(select id from revision where id = ANY(root_revisions))
union
(select h.parent_id
from revision_history as h
join full_rev_list on h.id = full_rev_list.id)
),
rev_list as (select id from full_rev_list limit num_revs)
select rev_list.id as id,
array(select rh.parent_id::bytea
from revision_history rh
where rh.id = rev_list.id
order by rh.parent_rank
) as parent
from rev_list;
$$;
-- Detailed entry for a revision
create type revision_entry as
(
id sha1_git,
date timestamptz,
date_offset smallint,
date_neg_utc_offset boolean,
committer_date timestamptz,
committer_date_offset smallint,
committer_date_neg_utc_offset boolean,
type revision_type,
directory sha1_git,
message bytea,
author_id bigint,
author_fullname bytea,
author_name bytea,
author_email bytea,
committer_id bigint,
committer_fullname bytea,
committer_name bytea,
committer_email bytea,
metadata jsonb,
synthetic boolean,
parents bytea[],
object_id bigint,
extra_headers bytea[][]
);
-- "git style" revision log. Similar to swh_revision_list(), but returning all
-- information associated to each revision, and expanding authors/committers
create or replace function swh_revision_log(root_revisions bytea[], num_revs bigint default NULL)
returns setof revision_entry
language sql
stable
as $$
select t.id, r.date, r.date_offset, r.date_neg_utc_offset,
r.committer_date, r.committer_date_offset, r.committer_date_neg_utc_offset,
r.type, r.directory, r.message,
a.id, a.fullname, a.name, a.email,
c.id, c.fullname, c.name, c.email,
r.metadata, r.synthetic, t.parents, r.object_id, r.extra_headers
from swh_revision_list(root_revisions, num_revs) as t
left join revision r on t.id = r.id
left join person a on a.id = r.author
left join person c on c.id = r.committer;
$$;
-- Detailed entry for a release
create type release_entry as
(
id sha1_git,
target sha1_git,
target_type object_type,
date timestamptz,
date_offset smallint,
date_neg_utc_offset boolean,
name bytea,
comment bytea,
synthetic boolean,
author_id bigint,
author_fullname bytea,
author_name bytea,
author_email bytea,
object_id bigint
);
-- Create entries in person from tmp_revision
create or replace function swh_person_add_from_revision()
returns void
language plpgsql
as $$
begin
with t as (
select author_fullname as fullname, author_name as name, author_email as email from tmp_revision
union
select committer_fullname as fullname, committer_name as name, committer_email as email from tmp_revision
) insert into person (fullname, name, email)
select distinct on (fullname) fullname, name, email from t
where not exists (
select 1
from person p
where t.fullname = p.fullname
);
return;
end
$$;
-- Create entries in revision from tmp_revision
create or replace function swh_revision_add()
returns void
language plpgsql
as $$
begin
perform swh_person_add_from_revision();
insert into revision (id, date, date_offset, date_neg_utc_offset, committer_date, committer_date_offset, committer_date_neg_utc_offset, type, directory, message, author, committer, metadata, synthetic, extra_headers)
select t.id, t.date, t.date_offset, t.date_neg_utc_offset, t.committer_date, t.committer_date_offset, t.committer_date_neg_utc_offset, t.type, t.directory, t.message, a.id, c.id, t.metadata, t.synthetic, t.extra_headers
from tmp_revision t
left join person a on a.fullname = t.author_fullname
left join person c on c.fullname = t.committer_fullname;
return;
end
$$;
-- Create entries in extid from tmp_extid
-- operates in bulk: 0. swh_mktemp(extid), 1. COPY to tmp_extid,
-- 2. call this function
create or replace function swh_extid_add()
returns void
language plpgsql
as $$
begin
- insert into extid (extid_type, extid, target_type, target)
- select distinct t.extid_type, t.extid, t.target_type, t.target
+ insert into extid (extid_type, extid, extid_version, target_type, target)
+ select distinct t.extid_type, t.extid, t.extid_version, t.target_type, t.target
from tmp_extid t
on conflict do nothing;
return;
end
$$;
-- Create entries in person from tmp_release
create or replace function swh_person_add_from_release()
returns void
language plpgsql
as $$
begin
with t as (
select distinct author_fullname as fullname, author_name as name, author_email as email from tmp_release
where author_fullname is not null
) insert into person (fullname, name, email)
select distinct on (fullname) fullname, name, email from t
where not exists (
select 1
from person p
where t.fullname = p.fullname
);
return;
end
$$;
-- Create entries in release from tmp_release
create or replace function swh_release_add()
returns void
language plpgsql
as $$
begin
perform swh_person_add_from_release();
insert into release (id, target, target_type, date, date_offset, date_neg_utc_offset, name, comment, author, synthetic)
select distinct t.id, t.target, t.target_type, t.date, t.date_offset, t.date_neg_utc_offset, t.name, t.comment, a.id, t.synthetic
from tmp_release t
left join person a on a.fullname = t.author_fullname
where not exists (select 1 from release where t.id = release.id);
return;
end
$$;
-- add a new origin_visit for origin origin_id at date.
--
-- Returns the new visit id.
create or replace function swh_origin_visit_add(origin_url text, date timestamptz, type text)
returns bigint
language sql
as $$
with origin_id as (
select id
from origin
where url = origin_url
), last_known_visit as (
select coalesce(max(visit), 0) as visit
from origin_visit
where origin = (select id from origin_id)
)
insert into origin_visit (origin, date, type, visit)
values ((select id from origin_id), date, type,
(select visit from last_known_visit) + 1)
returning visit;
$$;
create or replace function swh_snapshot_add(snapshot_id sha1_git)
returns void
language plpgsql
as $$
declare
snapshot_object_id snapshot.object_id%type;
begin
select object_id from snapshot where id = snapshot_id into snapshot_object_id;
if snapshot_object_id is null then
insert into snapshot (id) values (snapshot_id) returning object_id into snapshot_object_id;
insert into snapshot_branch (name, target_type, target)
select name, target_type, target from tmp_snapshot_branch tmp
where not exists (
select 1
from snapshot_branch sb
where sb.name = tmp.name
and sb.target = tmp.target
and sb.target_type = tmp.target_type
)
on conflict do nothing;
insert into snapshot_branches (snapshot_id, branch_id)
select snapshot_object_id, sb.object_id as branch_id
from tmp_snapshot_branch tmp
join snapshot_branch sb
using (name, target, target_type)
where tmp.target is not null and tmp.target_type is not null
union
select snapshot_object_id, sb.object_id as branch_id
from tmp_snapshot_branch tmp
join snapshot_branch sb
using (name)
where tmp.target is null and tmp.target_type is null
and sb.target is null and sb.target_type is null;
end if;
truncate table tmp_snapshot_branch;
end;
$$;
create type snapshot_result as (
snapshot_id sha1_git,
name bytea,
target bytea,
target_type snapshot_target
);
create or replace function swh_snapshot_get_by_id(id sha1_git,
branches_from bytea default '', branches_count bigint default null,
target_types snapshot_target[] default NULL,
branch_name_include_substring bytea default NULL,
branch_name_exclude_prefix bytea default NULL)
returns setof snapshot_result
language sql
stable
as $$
-- with small limits, the "naive" version of this query can degenerate into
-- using the deduplication index on snapshot_branch (name, target,
-- target_type); The planner happily scans several hundred million rows.
-- Do the query in two steps: first pull the relevant branches for the given
-- snapshot (filtering them by type), then do the limiting. This two-step
-- process guides the planner into using the proper index.
with filtered_snapshot_branches as (
select swh_snapshot_get_by_id.id as snapshot_id, name, target, target_type
from snapshot_branches
inner join snapshot_branch on snapshot_branches.branch_id = snapshot_branch.object_id
where snapshot_id = (select object_id from snapshot where snapshot.id = swh_snapshot_get_by_id.id)
and (target_types is null or target_type = any(target_types))
order by name
)
select snapshot_id, name, target, target_type
from filtered_snapshot_branches
where name >= branches_from
and (branch_name_include_substring is null or name like '%'||branch_name_include_substring||'%')
and (branch_name_exclude_prefix is null or name not like branch_name_exclude_prefix||'%')
order by name limit branches_count;
$$;
create type snapshot_size as (
target_type snapshot_target,
count bigint
);
create or replace function swh_snapshot_count_branches(id sha1_git,
branch_name_exclude_prefix bytea default NULL)
returns setof snapshot_size
language sql
stable
as $$
SELECT target_type, count(name)
from swh_snapshot_get_by_id(swh_snapshot_count_branches.id,
branch_name_exclude_prefix => swh_snapshot_count_branches.branch_name_exclude_prefix)
group by target_type;
$$;
-- Absolute path: directory reference + complete path relative to it
create type content_dir as (
directory sha1_git,
path unix_path
);
-- Find the containing directory of a given content, specified by sha1
-- (note: *not* sha1_git).
--
-- Return a pair (dir_it, path) where path is a UNIX path that, from the
-- directory root, reach down to a file with the desired content. Return NULL
-- if no match is found.
--
-- In case of multiple paths (i.e., pretty much always), an arbitrary one is
-- chosen.
create or replace function swh_content_find_directory(content_id sha1)
returns content_dir
language sql
stable
as $$
with recursive path as (
-- Recursively build a path from the requested content to a root
-- directory. Each iteration returns a pair (dir_id, filename) where
-- filename is relative to dir_id. Stops when no parent directory can
-- be found.
(select dir.id as dir_id, dir_entry_f.name as name, 0 as depth
from directory_entry_file as dir_entry_f
join content on content.sha1_git = dir_entry_f.target
join directory as dir on dir.file_entries @> array[dir_entry_f.id]
where content.sha1 = content_id
limit 1)
union all
(select dir.id as dir_id,
(dir_entry_d.name || '/' || path.name)::unix_path as name,
path.depth + 1
from path
join directory_entry_dir as dir_entry_d on dir_entry_d.target = path.dir_id
join directory as dir on dir.dir_entries @> array[dir_entry_d.id]
limit 1)
)
select dir_id, name from path order by depth desc limit 1;
$$;
-- Find the visit of origin closest to date visit_date
-- Breaks ties by selecting the largest visit id
create or replace function swh_visit_find_by_date(origin_url text, visit_date timestamptz default NOW())
returns setof origin_visit
language plpgsql
stable
as $$
declare
origin_id bigint;
begin
select id into origin_id from origin where url=origin_url;
return query
with closest_two_visits as ((
select ov, (date - visit_date), visit as interval
from origin_visit ov
where ov.origin = origin_id
and ov.date >= visit_date
order by ov.date asc, ov.visit desc
limit 1
) union (
select ov, (visit_date - date), visit as interval
from origin_visit ov
where ov.origin = origin_id
and ov.date < visit_date
order by ov.date desc, ov.visit desc
limit 1
)) select (ov).* from closest_two_visits order by interval, visit limit 1;
end
$$;
-- Object listing by object_id
create or replace function swh_content_list_by_object_id(
min_excl bigint,
max_incl bigint
)
returns setof content
language sql
stable
as $$
select * from content
where object_id > min_excl and object_id <= max_incl
order by object_id;
$$;
create or replace function swh_revision_list_by_object_id(
min_excl bigint,
max_incl bigint
)
returns setof revision_entry
language sql
stable
as $$
with revs as (
select * from revision
where object_id > min_excl and object_id <= max_incl
)
select r.id, r.date, r.date_offset, r.date_neg_utc_offset,
r.committer_date, r.committer_date_offset, r.committer_date_neg_utc_offset,
r.type, r.directory, r.message,
a.id, a.fullname, a.name, a.email, c.id, c.fullname, c.name, c.email, r.metadata, r.synthetic,
array(select rh.parent_id::bytea from revision_history rh where rh.id = r.id order by rh.parent_rank)
as parents, r.object_id, r.extra_headers
from revs r
left join person a on a.id = r.author
left join person c on c.id = r.committer
order by r.object_id;
$$;
create or replace function swh_release_list_by_object_id(
min_excl bigint,
max_incl bigint
)
returns setof release_entry
language sql
stable
as $$
with rels as (
select * from release
where object_id > min_excl and object_id <= max_incl
)
select r.id, r.target, r.target_type, r.date, r.date_offset, r.date_neg_utc_offset, r.name, r.comment,
r.synthetic, p.id as author_id, p.fullname as author_fullname, p.name as author_name, p.email as author_email, r.object_id
from rels r
left join person p on p.id = r.author
order by r.object_id;
$$;
-- simple counter mapping a textual label to an integer value
create type counter as (
label text,
value bigint
);
-- return statistics about the number of tuples in various SWH tables
--
-- Note: the returned values are based on postgres internal statistics
-- (pg_class table), which are only updated daily (by autovacuum) or so
create or replace function swh_stat_counters()
returns setof counter
language sql
stable
as $$
select object_type as label, value as value
from object_counts
where object_type in (
'content',
'directory',
'directory_entry_dir',
'directory_entry_file',
'directory_entry_rev',
'origin',
'origin_visit',
'person',
'release',
'revision',
'revision_history',
'skipped_content',
'snapshot'
);
$$;
create or replace function swh_update_counter(object_type text)
returns void
language plpgsql
as $$
begin
execute format('
insert into object_counts
(value, last_update, object_type)
values
((select count(*) from %1$I), NOW(), %1$L)
on conflict (object_type) do update set
value = excluded.value,
last_update = excluded.last_update',
object_type);
return;
end;
$$;
create or replace function swh_update_counter_bucketed()
returns void
language plpgsql
as $$
declare
query text;
line_to_update int;
new_value bigint;
begin
select
object_counts_bucketed.line,
format(
'select count(%I) from %I where %s',
coalesce(identifier, '*'),
object_type,
coalesce(
concat_ws(
' and ',
case when bucket_start is not null then
format('%I >= %L', identifier, bucket_start) -- lower bound condition, inclusive
end,
case when bucket_end is not null then
format('%I < %L', identifier, bucket_end) -- upper bound condition, exclusive
end
),
'true'
)
)
from object_counts_bucketed
order by coalesce(last_update, now() - '1 month'::interval) asc
limit 1
into line_to_update, query;
execute query into new_value;
update object_counts_bucketed
set value = new_value,
last_update = now()
where object_counts_bucketed.line = line_to_update;
END
$$;
create or replace function swh_update_counters_from_buckets()
returns trigger
language plpgsql
as $$
begin
with to_update as (
select object_type, sum(value) as value, max(last_update) as last_update
from object_counts_bucketed ob1
where not exists (
select 1 from object_counts_bucketed ob2
where ob1.object_type = ob2.object_type
and value is null
)
group by object_type
) update object_counts
set
value = to_update.value,
last_update = to_update.last_update
from to_update
where
object_counts.object_type = to_update.object_type
and object_counts.value != to_update.value;
return null;
end
$$;
create trigger update_counts_from_bucketed
after insert or update
on object_counts_bucketed
for each row
when (NEW.line % 256 = 0)
execute procedure swh_update_counters_from_buckets();
diff --git a/swh/storage/sql/60-indexes.sql b/swh/storage/sql/60-indexes.sql
index 1a7a6065..3f2f1354 100644
--- a/swh/storage/sql/60-indexes.sql
+++ b/swh/storage/sql/60-indexes.sql
@@ -1,293 +1,293 @@
-- psql variables to get the current database flavor
select swh_get_dbflavor() = 'read_replica' as dbflavor_read_replica \gset
select swh_get_dbflavor() != 'read_replica' as dbflavor_does_deduplication \gset
select swh_get_dbflavor() = 'mirror' as dbflavor_mirror \gset
select swh_get_dbflavor() = 'default' as dbflavor_default \gset
-- content
create unique index concurrently content_pkey on content(sha1);
alter table content add primary key using index content_pkey;
\if :dbflavor_does_deduplication
create unique index concurrently on content(sha1_git);
\else
create index concurrently on content(sha1_git);
\endif
create index concurrently on content(sha256);
create index concurrently on content(blake2s256);
\if :dbflavor_default
create unique index concurrently on content(object_id); -- to be reviewed
create index concurrently on content(ctime); -- to be reviewed
\endif
-- origin
create unique index concurrently origin_pkey on origin(id);
alter table origin add primary key using index origin_pkey;
\if :dbflavor_does_deduplication
create unique index concurrently on origin using btree(url);
\else
create index concurrently on origin using btree(url);
\endif
create index concurrently on origin using gin (url gin_trgm_ops);
create index concurrently on origin using btree(digest(url, 'sha1'));
-- skipped_content
\if :dbflavor_does_deduplication
alter table skipped_content add constraint skipped_content_sha1_sha1_git_sha256_key unique (sha1, sha1_git, sha256);
\endif
create index concurrently on skipped_content(sha1);
create index concurrently on skipped_content(sha1_git);
create index concurrently on skipped_content(sha256);
create index concurrently on skipped_content(blake2s256);
create unique index concurrently on skipped_content(object_id);
\if :dbflavor_default
alter table skipped_content add constraint skipped_content_origin_fkey foreign key (origin) references origin(id) not valid;
alter table skipped_content validate constraint skipped_content_origin_fkey;
\endif
-- directory
create unique index concurrently directory_pkey on directory(id);
alter table directory add primary key using index directory_pkey;
\if :dbflavor_default
create index concurrently on directory using gin (dir_entries); -- to be reviewed
create index concurrently on directory using gin (file_entries); -- to be reviewed
create index concurrently on directory using gin (rev_entries); -- to be reviewed
create unique index concurrently on directory(object_id); -- to be reviewed
\endif
-- directory_entry_dir
create unique index concurrently directory_entry_dir_pkey on directory_entry_dir(id);
alter table directory_entry_dir add primary key using index directory_entry_dir_pkey;
\if :dbflavor_does_deduplication
create unique index concurrently on directory_entry_dir(target, name, perms);
\endif
-- directory_entry_file
create unique index concurrently directory_entry_file_pkey on directory_entry_file(id);
alter table directory_entry_file add primary key using index directory_entry_file_pkey;
\if :dbflavor_does_deduplication
create unique index concurrently on directory_entry_file(target, name, perms);
\endif
-- directory_entry_rev
create unique index concurrently directory_entry_rev_pkey on directory_entry_rev(id);
alter table directory_entry_rev add primary key using index directory_entry_rev_pkey;
\if :dbflavor_does_deduplication
create unique index concurrently on directory_entry_rev(target, name, perms);
\endif
-- person
create unique index concurrently person_pkey on person(id);
alter table person add primary key using index person_pkey;
\if :dbflavor_does_deduplication
create unique index concurrently on person(fullname);
\else
create index concurrently on person(fullname); -- to be reviewed
\endif
\if :dbflavor_default
create index concurrently on person(name); -- to be reviewed
create index concurrently on person(email); -- to be reviewed
\endif
-- revision
create unique index concurrently revision_pkey on revision(id);
alter table revision add primary key using index revision_pkey;
\if :dbflavor_does_deduplication
alter table revision add constraint revision_author_fkey foreign key (author) references person(id) not valid;
alter table revision validate constraint revision_author_fkey;
alter table revision add constraint revision_committer_fkey foreign key (committer) references person(id) not valid;
alter table revision validate constraint revision_committer_fkey;
alter table revision
add constraint revision_date_neg_utc_offset_not_null
check (date is null or date_neg_utc_offset is not null)
not valid;
alter table revision
add constraint revision_committer_date_neg_utc_offset_not_null
check (committer_date is null or committer_date_neg_utc_offset is not null)
not valid;
alter table revision
validate constraint revision_date_neg_utc_offset_not_null;
alter table revision
validate constraint revision_committer_date_neg_utc_offset_not_null;
\endif
\if :dbflavor_default
create index concurrently on revision(directory); -- to be reviewed
create unique index concurrently on revision(object_id); -- to be reviewed
\endif
-- revision_history
create unique index concurrently revision_history_pkey on revision_history(id, parent_rank);
alter table revision_history add primary key using index revision_history_pkey;
\if :dbflavor_default
create index concurrently on revision_history(parent_id); -- to be reviewed
\endif
\if :dbflavor_does_deduplication
alter table revision_history add constraint revision_history_id_fkey foreign key (id) references revision(id) not valid;
alter table revision_history validate constraint revision_history_id_fkey;
\endif
-- snapshot
create unique index concurrently snapshot_pkey on snapshot(object_id);
alter table snapshot add primary key using index snapshot_pkey;
\if :dbflavor_does_deduplication
create unique index concurrently on snapshot(id);
\else
create index concurrently on snapshot(id);
\endif
-- snapshot_branch
create unique index concurrently snapshot_branch_pkey on snapshot_branch(object_id);
alter table snapshot_branch add primary key using index snapshot_branch_pkey;
\if :dbflavor_does_deduplication
create unique index concurrently on snapshot_branch (target_type, target, name);
alter table snapshot_branch add constraint snapshot_branch_target_check check ((target_type is null) = (target is null)) not valid;
alter table snapshot_branch validate constraint snapshot_branch_target_check;
alter table snapshot_branch add constraint snapshot_target_check check (target_type not in ('content', 'directory', 'revision', 'release', 'snapshot') or length(target) = 20) not valid;
alter table snapshot_branch validate constraint snapshot_target_check;
create unique index concurrently on snapshot_branch (name) where target_type is null and target is null;
\endif
-- snapshot_branches
create unique index concurrently snapshot_branches_pkey on snapshot_branches(snapshot_id, branch_id);
alter table snapshot_branches add primary key using index snapshot_branches_pkey;
\if :dbflavor_does_deduplication
alter table snapshot_branches add constraint snapshot_branches_snapshot_id_fkey foreign key (snapshot_id) references snapshot(object_id) not valid;
alter table snapshot_branches validate constraint snapshot_branches_snapshot_id_fkey;
alter table snapshot_branches add constraint snapshot_branches_branch_id_fkey foreign key (branch_id) references snapshot_branch(object_id) not valid;
alter table snapshot_branches validate constraint snapshot_branches_branch_id_fkey;
\endif
-- origin_visit
create unique index concurrently origin_visit_pkey on origin_visit(origin, visit);
alter table origin_visit add primary key using index origin_visit_pkey;
\if :dbflavor_default
create index concurrently on origin_visit(date); -- to be reviewed
create index concurrently origin_visit_type_date on origin_visit(type, date); -- to be reviewed
\endif
\if :dbflavor_does_deduplication
alter table origin_visit add constraint origin_visit_origin_fkey foreign key (origin) references origin(id) not valid;
alter table origin_visit validate constraint origin_visit_origin_fkey;
\endif
-- origin_visit_status
create unique index concurrently origin_visit_status_pkey on origin_visit_status(origin, visit, date);
alter table origin_visit_status add primary key using index origin_visit_status_pkey;
\if :dbflavor_default
alter table origin_visit_status
add constraint origin_visit_status_origin_visit_fkey
foreign key (origin, visit)
references origin_visit(origin, visit) not valid;
alter table origin_visit_status validate constraint origin_visit_status_origin_visit_fkey;
\endif
-- release
create unique index concurrently release_pkey on release(id);
alter table release add primary key using index release_pkey;
\if :dbflavor_default
create index concurrently on release(target, target_type); -- to be reviewed
create unique index concurrently on release(object_id); -- to be reviewed
\endif
\if :dbflavor_does_deduplication
alter table release add constraint release_author_fkey foreign key (author) references person(id) not valid;
alter table release validate constraint release_author_fkey;
alter table release
add constraint release_date_neg_utc_offset_not_null
check (date is null or date_neg_utc_offset is not null)
not valid;
alter table release
validate constraint release_date_neg_utc_offset_not_null;
-- if the author is null, then the date must be null
alter table release add constraint release_author_date_check check ((date is null) or (author is not null)) not valid;
alter table release validate constraint release_author_date_check;
\endif
-- metadata_fetcher
create unique index metadata_fetcher_pkey on metadata_fetcher(id);
alter table metadata_fetcher add primary key using index metadata_fetcher_pkey;
\if :dbflavor_does_deduplication
create unique index metadata_fetcher_name_version on metadata_fetcher(name, version);
\else
create index metadata_fetcher_name_version on metadata_fetcher(name, version);
\endif
-- metadata_authority
create unique index concurrently metadata_authority_pkey on metadata_authority(id);
alter table metadata_authority add primary key using index metadata_authority_pkey;
\if :dbflavor_does_deduplication
create unique index concurrently metadata_authority_type_url on metadata_authority(type, url);
\else
create index concurrently metadata_authority_type_url on metadata_authority(type, url);
\endif
-- raw_extrinsic_metadata
create unique index concurrently raw_extrinsic_metadata_pkey on raw_extrinsic_metadata(id);
alter table raw_extrinsic_metadata add primary key using index raw_extrinsic_metadata_pkey;
create index concurrently raw_extrinsic_metadata_content_authority_date on raw_extrinsic_metadata(target, authority_id, discovery_date);
\if :dbflavor_default
alter table raw_extrinsic_metadata add constraint raw_extrinsic_metadata_authority_fkey foreign key (authority_id) references metadata_authority(id) not valid;
alter table raw_extrinsic_metadata validate constraint raw_extrinsic_metadata_authority_fkey;
alter table raw_extrinsic_metadata add constraint raw_extrinsic_metadata_fetcher_fkey foreign key (fetcher_id) references metadata_fetcher(id) not valid;
alter table raw_extrinsic_metadata validate constraint raw_extrinsic_metadata_fetcher_fkey;
\endif
-- object_counts
create unique index concurrently object_counts_pkey on object_counts(object_type);
alter table object_counts add primary key using index object_counts_pkey;
-- object_counts_bucketed
create unique index concurrently object_counts_bucketed_pkey on object_counts_bucketed(line);
alter table object_counts_bucketed add primary key using index object_counts_bucketed_pkey;
-- extid
-- used to query by (extid_type, extid) + to deduplicate the whole row
-create unique index concurrently on extid(extid_type, extid, target_type, target);
+create unique index concurrently on extid(extid_type, extid, extid_version, target_type, target);
create index concurrently on extid(target_type, target);
diff --git a/swh/storage/tests/storage_data.py b/swh/storage/tests/storage_data.py
index 43e9d98f..6ce8f08c 100644
--- a/swh/storage/tests/storage_data.py
+++ b/swh/storage/tests/storage_data.py
@@ -1,706 +1,715 @@
# Copyright (C) 2015-2021 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import datetime
from typing import Tuple
import attr
from swh.model import from_disk
from swh.model.hashutil import hash_to_bytes
from swh.model.identifiers import CoreSWHID, ExtendedObjectType, ExtendedSWHID
from swh.model.identifiers import ObjectType as SwhidObjectType
from swh.model.model import (
Content,
Directory,
DirectoryEntry,
ExtID,
MetadataAuthority,
MetadataAuthorityType,
MetadataFetcher,
ObjectType,
Origin,
OriginVisit,
Person,
RawExtrinsicMetadata,
Release,
Revision,
RevisionType,
SkippedContent,
Snapshot,
SnapshotBranch,
TargetType,
Timestamp,
TimestampWithTimezone,
)
class StorageData:
"""Data model objects to use within tests.
"""
content = Content(
data=b"42\n",
length=3,
sha1=hash_to_bytes("34973274ccef6ab4dfaaf86599792fa9c3fe4689"),
sha1_git=hash_to_bytes("d81cc0710eb6cf9efd5b920a8453e1e07157b6cd"),
sha256=hash_to_bytes(
"084c799cd551dd1d8d5c5f9a5d593b2e931f5e36122ee5c793c1d08a19839cc0"
),
blake2s256=hash_to_bytes(
"d5fe1939576527e42cfd76a9455a2432fe7f56669564577dd93c4280e76d661d"
),
status="visible",
)
content2 = Content(
data=b"4242\n",
length=5,
sha1=hash_to_bytes("61c2b3a30496d329e21af70dd2d7e097046d07b7"),
sha1_git=hash_to_bytes("36fade77193cb6d2bd826161a0979d64c28ab4fa"),
sha256=hash_to_bytes(
"859f0b154fdb2d630f45e1ecae4a862915435e663248bb8461d914696fc047cd"
),
blake2s256=hash_to_bytes(
"849c20fad132b7c2d62c15de310adfe87be94a379941bed295e8141c6219810d"
),
status="visible",
)
content3 = Content(
data=b"424242\n",
length=7,
sha1=hash_to_bytes("3e21cc4942a4234c9e5edd8a9cacd1670fe59f13"),
sha1_git=hash_to_bytes("c932c7649c6dfa4b82327d121215116909eb3bea"),
sha256=hash_to_bytes(
"92fb72daf8c6818288a35137b72155f507e5de8d892712ab96277aaed8cf8a36"
),
blake2s256=hash_to_bytes(
"76d0346f44e5a27f6bafdd9c2befd304aff83780f93121d801ab6a1d4769db11"
),
status="visible",
ctime=datetime.datetime(2019, 12, 1, tzinfo=datetime.timezone.utc),
)
contents: Tuple[Content, ...] = (content, content2, content3)
skipped_content = SkippedContent(
length=1024 * 1024 * 200,
sha1_git=hash_to_bytes("33e45d56f88993aae6a0198013efa80716fd8920"),
sha1=hash_to_bytes("43e45d56f88993aae6a0198013efa80716fd8920"),
sha256=hash_to_bytes(
"7bbd052ab054ef222c1c87be60cd191addedd24cc882d1f5f7f7be61dc61bb3a"
),
blake2s256=hash_to_bytes(
"ade18b1adecb33f891ca36664da676e12c772cc193778aac9a137b8dc5834b9b"
),
reason="Content too long",
status="absent",
origin="file:///dev/zero",
)
skipped_content2 = SkippedContent(
length=1024 * 1024 * 300,
sha1_git=hash_to_bytes("44e45d56f88993aae6a0198013efa80716fd8921"),
sha1=hash_to_bytes("54e45d56f88993aae6a0198013efa80716fd8920"),
sha256=hash_to_bytes(
"8cbd052ab054ef222c1c87be60cd191addedd24cc882d1f5f7f7be61dc61bb3a"
),
blake2s256=hash_to_bytes(
"9ce18b1adecb33f891ca36664da676e12c772cc193778aac9a137b8dc5834b9b"
),
reason="Content too long",
status="absent",
)
skipped_contents: Tuple[SkippedContent, ...] = (skipped_content, skipped_content2)
directory5 = Directory(
id=hash_to_bytes("4b825dc642cb6eb9a060e54bf8d69288fbee4904"), entries=(),
)
directory = Directory(
id=hash_to_bytes("5256e856a0a0898966d6ba14feb4388b8b82d302"),
entries=tuple(
[
DirectoryEntry(
name=b"foo",
type="file",
target=content.sha1_git,
perms=from_disk.DentryPerms.content,
),
DirectoryEntry(
name=b"bar\xc3",
type="dir",
target=directory5.id,
perms=from_disk.DentryPerms.directory,
),
],
),
)
directory2 = Directory(
id=hash_to_bytes("8505808532953da7d2581741f01b29c04b1cb9ab"),
entries=tuple(
[
DirectoryEntry(
name=b"oof",
type="file",
target=content2.sha1_git,
perms=from_disk.DentryPerms.content,
)
],
),
)
directory3 = Directory(
id=hash_to_bytes("13089e6e544f78df7c9a40a3059050d10dee686a"),
entries=tuple(
[
DirectoryEntry(
name=b"foo",
type="file",
target=content.sha1_git,
perms=from_disk.DentryPerms.content,
),
DirectoryEntry(
name=b"subdir",
type="dir",
target=directory.id,
perms=from_disk.DentryPerms.directory,
),
DirectoryEntry(
name=b"hello",
type="file",
target=content2.sha1_git,
perms=from_disk.DentryPerms.content,
),
],
),
)
directory4 = Directory(
id=hash_to_bytes("cd5dfd9c09d9e99ed123bc7937a0d5fddc3cd531"),
entries=tuple(
[
DirectoryEntry(
name=b"subdir1",
type="dir",
target=directory3.id,
perms=from_disk.DentryPerms.directory,
)
],
),
)
directories: Tuple[Directory, ...] = (
directory2,
directory,
directory3,
directory4,
directory5,
)
revision = Revision(
id=hash_to_bytes("01a7114f36fddd5ef2511b2cadda237a68adbb12"),
message=b"hello",
author=Person(
name=b"Nicolas Dandrimont",
email=b"nicolas@example.com",
fullname=b"Nicolas Dandrimont ",
),
date=TimestampWithTimezone(
timestamp=Timestamp(seconds=1234567890, microseconds=0),
offset=120,
negative_utc=False,
),
committer=Person(
name=b"St\xc3fano Zacchiroli",
email=b"stefano@example.com",
fullname=b"St\xc3fano Zacchiroli ",
),
committer_date=TimestampWithTimezone(
timestamp=Timestamp(seconds=1123456789, microseconds=0),
offset=120,
negative_utc=False,
),
parents=(),
type=RevisionType.GIT,
directory=directory.id,
metadata={
"checksums": {"sha1": "tarball-sha1", "sha256": "tarball-sha256",},
"signed-off-by": "some-dude",
},
extra_headers=(
(b"gpgsig", b"test123"),
(b"mergetag", b"foo\\bar"),
(b"mergetag", b"\x22\xaf\x89\x80\x01\x00"),
),
synthetic=True,
)
revision2 = Revision(
id=hash_to_bytes("a646dd94c912829659b22a1e7e143d2fa5ebde1b"),
message=b"hello again",
author=Person(
name=b"Roberto Dicosmo",
email=b"roberto@example.com",
fullname=b"Roberto Dicosmo ",
),
date=TimestampWithTimezone(
timestamp=Timestamp(seconds=1234567843, microseconds=220000,),
offset=-720,
negative_utc=False,
),
committer=Person(
name=b"tony", email=b"ar@dumont.fr", fullname=b"tony ",
),
committer_date=TimestampWithTimezone(
timestamp=Timestamp(seconds=1123456789, microseconds=220000,),
offset=0,
negative_utc=False,
),
parents=tuple([revision.id]),
type=RevisionType.GIT,
directory=directory2.id,
metadata=None,
extra_headers=(),
synthetic=False,
)
revision3 = Revision(
id=hash_to_bytes("beb2844dff30658e27573cb46eb55980e974b391"),
message=b"a simple revision with no parents this time",
author=Person(
name=b"Roberto Dicosmo",
email=b"roberto@example.com",
fullname=b"Roberto Dicosmo ",
),
date=TimestampWithTimezone(
timestamp=Timestamp(seconds=1234567843, microseconds=220000,),
offset=-720,
negative_utc=False,
),
committer=Person(
name=b"tony", email=b"ar@dumont.fr", fullname=b"tony ",
),
committer_date=TimestampWithTimezone(
timestamp=Timestamp(seconds=1127351742, microseconds=220000,),
offset=0,
negative_utc=False,
),
parents=tuple([revision.id, revision2.id]),
type=RevisionType.GIT,
directory=directory2.id,
metadata=None,
extra_headers=(),
synthetic=True,
)
revision4 = Revision(
id=hash_to_bytes("ae860aec43700c7f5a295e2ef47e2ae41b535dfe"),
message=b"parent of self.revision2",
author=Person(
name=b"me", email=b"me@soft.heri", fullname=b"me ",
),
date=TimestampWithTimezone(
timestamp=Timestamp(seconds=1234567843, microseconds=220000,),
offset=-720,
negative_utc=False,
),
committer=Person(
name=b"committer-dude",
email=b"committer@dude.com",
fullname=b"committer-dude ",
),
committer_date=TimestampWithTimezone(
timestamp=Timestamp(seconds=1244567843, microseconds=220000,),
offset=-720,
negative_utc=False,
),
parents=tuple([revision3.id]),
type=RevisionType.GIT,
directory=directory.id,
metadata=None,
extra_headers=(),
synthetic=False,
)
git_revisions: Tuple[Revision, ...] = (revision, revision2, revision3, revision4)
hg_revision = Revision(
id=hash_to_bytes("951c9503541e7beaf002d7aebf2abd1629084c68"),
message=b"hello",
author=Person(
name=b"Nicolas Dandrimont",
email=b"nicolas@example.com",
fullname=b"Nicolas Dandrimont ",
),
date=TimestampWithTimezone(
timestamp=Timestamp(seconds=1234567890, microseconds=0),
offset=120,
negative_utc=False,
),
committer=Person(
name=b"St\xc3fano Zacchiroli",
email=b"stefano@example.com",
fullname=b"St\xc3fano Zacchiroli ",
),
committer_date=TimestampWithTimezone(
timestamp=Timestamp(seconds=1123456789, microseconds=0),
offset=120,
negative_utc=False,
),
parents=(),
type=RevisionType.MERCURIAL,
directory=directory.id,
metadata={
"checksums": {"sha1": "tarball-sha1", "sha256": "tarball-sha256",},
"signed-off-by": "some-dude",
"node": "a316dfb434af2b451c1f393496b7eaeda343f543",
},
extra_headers=(),
synthetic=True,
)
hg_revision2 = Revision(
id=hash_to_bytes("df4afb063236300eb13b96a0d7fff03f7b7cbbaf"),
message=b"hello again",
author=Person(
name=b"Roberto Dicosmo",
email=b"roberto@example.com",
fullname=b"Roberto Dicosmo ",
),
date=TimestampWithTimezone(
timestamp=Timestamp(seconds=1234567843, microseconds=220000,),
offset=-720,
negative_utc=False,
),
committer=Person(
name=b"tony", email=b"ar@dumont.fr", fullname=b"tony ",
),
committer_date=TimestampWithTimezone(
timestamp=Timestamp(seconds=1123456789, microseconds=220000,),
offset=0,
negative_utc=False,
),
parents=tuple([hg_revision.id]),
type=RevisionType.MERCURIAL,
directory=directory2.id,
metadata=None,
extra_headers=(
(b"node", hash_to_bytes("fa1b7c84a9b40605b67653700f268349a6d6aca1")),
),
synthetic=False,
)
hg_revision3 = Revision(
id=hash_to_bytes("84d8e7081b47ebb88cad9fa1f25de5f330872a37"),
message=b"a simple revision with no parents this time",
author=Person(
name=b"Roberto Dicosmo",
email=b"roberto@example.com",
fullname=b"Roberto Dicosmo ",
),
date=TimestampWithTimezone(
timestamp=Timestamp(seconds=1234567843, microseconds=220000,),
offset=-720,
negative_utc=False,
),
committer=Person(
name=b"tony", email=b"ar@dumont.fr", fullname=b"tony ",
),
committer_date=TimestampWithTimezone(
timestamp=Timestamp(seconds=1127351742, microseconds=220000,),
offset=0,
negative_utc=False,
),
parents=tuple([hg_revision.id, hg_revision2.id]),
type=RevisionType.MERCURIAL,
directory=directory2.id,
metadata=None,
extra_headers=(
(b"node", hash_to_bytes("7f294a01c49065a90b3fe8b4ad49f08ce9656ef6")),
),
synthetic=True,
)
hg_revision4 = Revision(
id=hash_to_bytes("4683324ba26dfe941a72cc7552e86eaaf7c27fe3"),
message=b"parent of self.revision2",
author=Person(
name=b"me", email=b"me@soft.heri", fullname=b"me ",
),
date=TimestampWithTimezone(
timestamp=Timestamp(seconds=1234567843, microseconds=220000,),
offset=-720,
negative_utc=False,
),
committer=Person(
name=b"committer-dude",
email=b"committer@dude.com",
fullname=b"committer-dude ",
),
committer_date=TimestampWithTimezone(
timestamp=Timestamp(seconds=1244567843, microseconds=220000,),
offset=-720,
negative_utc=False,
),
parents=tuple([hg_revision3.id]),
type=RevisionType.MERCURIAL,
directory=directory.id,
metadata=None,
extra_headers=(
(b"node", hash_to_bytes("f4160af0485c85823d9e829bae2c00b00a2e6297")),
),
synthetic=False,
)
hg_revisions: Tuple[Revision, ...] = (
hg_revision,
hg_revision2,
hg_revision3,
hg_revision4,
)
revisions: Tuple[Revision, ...] = git_revisions + hg_revisions
origins: Tuple[Origin, ...] = (
Origin(url="https://github.com/user1/repo1"),
Origin(url="https://github.com/user2/repo1"),
Origin(url="https://github.com/user3/repo1"),
Origin(url="https://gitlab.com/user1/repo1"),
Origin(url="https://gitlab.com/user2/repo1"),
Origin(url="https://forge.softwareheritage.org/source/repo1"),
Origin(url="https://example.рф/🏛️.txt"),
)
origin, origin2 = origins[:2]
metadata_authority = MetadataAuthority(
type=MetadataAuthorityType.DEPOSIT_CLIENT, url="http://hal.inria.example.com/",
)
metadata_authority2 = MetadataAuthority(
type=MetadataAuthorityType.REGISTRY, url="http://wikidata.example.com/",
)
authorities: Tuple[MetadataAuthority, ...] = (
metadata_authority,
metadata_authority2,
)
metadata_fetcher = MetadataFetcher(name="swh-deposit", version="0.0.1",)
metadata_fetcher2 = MetadataFetcher(name="swh-example", version="0.0.1",)
fetchers: Tuple[MetadataFetcher, ...] = (metadata_fetcher, metadata_fetcher2)
date_visit1 = datetime.datetime(2015, 1, 1, 23, 0, 0, tzinfo=datetime.timezone.utc)
date_visit2 = datetime.datetime(2017, 1, 1, 23, 0, 0, tzinfo=datetime.timezone.utc)
date_visit3 = datetime.datetime(2018, 1, 1, 23, 0, 0, tzinfo=datetime.timezone.utc)
type_visit1 = "git"
type_visit2 = "hg"
type_visit3 = "deb"
origin_visit = OriginVisit(
origin=origin.url, visit=1, date=date_visit1, type=type_visit1,
)
origin_visit2 = OriginVisit(
origin=origin.url, visit=2, date=date_visit2, type=type_visit1,
)
origin_visit3 = OriginVisit(
origin=origin2.url, visit=1, date=date_visit1, type=type_visit2,
)
origin_visits: Tuple[OriginVisit, ...] = (
origin_visit,
origin_visit2,
origin_visit3,
)
release = Release(
id=hash_to_bytes("f7f222093a18ec60d781070abec4a630c850b837"),
name=b"v0.0.1",
author=Person(
name=b"olasd", email=b"nic@olasd.fr", fullname=b"olasd ",
),
date=TimestampWithTimezone(
timestamp=Timestamp(seconds=1234567890, microseconds=0),
offset=42,
negative_utc=False,
),
target=revision.id,
target_type=ObjectType.REVISION,
message=b"synthetic release",
synthetic=True,
)
release2 = Release(
id=hash_to_bytes("db81a26783a3f4a9db07b4759ffc37621f159bb2"),
name=b"v0.0.2",
author=Person(
name=b"tony", email=b"ar@dumont.fr", fullname=b"tony ",
),
date=TimestampWithTimezone(
timestamp=Timestamp(seconds=1634366813, microseconds=0),
offset=-120,
negative_utc=False,
),
target=revision2.id,
target_type=ObjectType.REVISION,
message=b"v0.0.2\nMisc performance improvements + bug fixes",
synthetic=False,
)
release3 = Release(
id=hash_to_bytes("1c5d42e603ce2eea44917fadca76c78bad76aeb9"),
name=b"v0.0.2",
author=Person(
name=b"tony",
email=b"tony@ardumont.fr",
fullname=b"tony ",
),
date=TimestampWithTimezone(
timestamp=Timestamp(seconds=1634366813, microseconds=0),
offset=-120,
negative_utc=False,
),
target=revision3.id,
target_type=ObjectType.REVISION,
message=b"yet another synthetic release",
synthetic=True,
)
releases: Tuple[Release, ...] = (release, release2, release3)
snapshot = Snapshot(
id=hash_to_bytes("9b922e6d8d5b803c1582aabe5525b7b91150788e"),
branches={
b"master": SnapshotBranch(
target=revision.id, target_type=TargetType.REVISION,
),
},
)
empty_snapshot = Snapshot(
id=hash_to_bytes("1a8893e6a86f444e8be8e7bda6cb34fb1735a00e"), branches={},
)
complete_snapshot = Snapshot(
id=hash_to_bytes("db99fda25b43dc5cd90625ee4b0744751799c917"),
branches={
b"directory": SnapshotBranch(
target=directory.id, target_type=TargetType.DIRECTORY,
),
b"directory2": SnapshotBranch(
target=directory2.id, target_type=TargetType.DIRECTORY,
),
b"content": SnapshotBranch(
target=content.sha1_git, target_type=TargetType.CONTENT,
),
b"alias": SnapshotBranch(target=b"revision", target_type=TargetType.ALIAS,),
b"revision": SnapshotBranch(
target=revision.id, target_type=TargetType.REVISION,
),
b"release": SnapshotBranch(
target=release.id, target_type=TargetType.RELEASE,
),
b"snapshot": SnapshotBranch(
target=empty_snapshot.id, target_type=TargetType.SNAPSHOT,
),
b"dangling": None,
},
)
snapshots: Tuple[Snapshot, ...] = (snapshot, empty_snapshot, complete_snapshot)
content_metadata1 = RawExtrinsicMetadata(
target=ExtendedSWHID(
object_type=ExtendedObjectType.CONTENT, object_id=content.sha1_git
),
origin=origin.url,
discovery_date=datetime.datetime(
2015, 1, 1, 21, 0, 0, tzinfo=datetime.timezone.utc
),
authority=metadata_authority,
fetcher=metadata_fetcher,
format="json",
metadata=b'{"foo": "bar"}',
)
content_metadata2 = RawExtrinsicMetadata(
target=ExtendedSWHID(
object_type=ExtendedObjectType.CONTENT, object_id=content.sha1_git
),
origin=origin2.url,
discovery_date=datetime.datetime(
2017, 1, 1, 22, 0, 0, tzinfo=datetime.timezone.utc
),
authority=metadata_authority,
fetcher=metadata_fetcher,
format="yaml",
metadata=b"foo: bar",
)
content_metadata3 = RawExtrinsicMetadata(
target=ExtendedSWHID(
object_type=ExtendedObjectType.CONTENT, object_id=content.sha1_git
),
discovery_date=datetime.datetime(
2017, 1, 1, 22, 0, 0, tzinfo=datetime.timezone.utc
),
authority=attr.evolve(metadata_authority2, metadata=None),
fetcher=attr.evolve(metadata_fetcher2, metadata=None),
format="yaml",
metadata=b"foo: bar",
origin=origin.url,
visit=42,
snapshot=snapshot.swhid(),
release=release.swhid(),
revision=revision.swhid(),
directory=directory.swhid(),
path=b"/foo/bar",
)
content_metadata: Tuple[RawExtrinsicMetadata, ...] = (
content_metadata1,
content_metadata2,
content_metadata3,
)
origin_metadata1 = RawExtrinsicMetadata(
target=Origin(origin.url).swhid(),
discovery_date=datetime.datetime(
2015, 1, 1, 21, 0, 0, tzinfo=datetime.timezone.utc
),
authority=attr.evolve(metadata_authority, metadata=None),
fetcher=attr.evolve(metadata_fetcher, metadata=None),
format="json",
metadata=b'{"foo": "bar"}',
)
origin_metadata2 = RawExtrinsicMetadata(
target=Origin(origin.url).swhid(),
discovery_date=datetime.datetime(
2017, 1, 1, 22, 0, 0, tzinfo=datetime.timezone.utc
),
authority=attr.evolve(metadata_authority, metadata=None),
fetcher=attr.evolve(metadata_fetcher, metadata=None),
format="yaml",
metadata=b"foo: bar",
)
origin_metadata3 = RawExtrinsicMetadata(
target=Origin(origin.url).swhid(),
discovery_date=datetime.datetime(
2017, 1, 1, 22, 0, 0, tzinfo=datetime.timezone.utc
),
authority=attr.evolve(metadata_authority2, metadata=None),
fetcher=attr.evolve(metadata_fetcher2, metadata=None),
format="yaml",
metadata=b"foo: bar",
)
origin_metadata: Tuple[RawExtrinsicMetadata, ...] = (
origin_metadata1,
origin_metadata2,
origin_metadata3,
)
extid1 = ExtID(
target=CoreSWHID(object_type=SwhidObjectType.REVISION, object_id=revision.id),
extid_type="git",
extid=revision.id,
)
extid2 = ExtID(
target=CoreSWHID(
object_type=SwhidObjectType.REVISION, object_id=hg_revision.id
),
extid_type="mercurial",
extid=hash_to_bytes("a316dfb434af2b451c1f393496b7eaeda343f543"),
)
extid3 = ExtID(
target=CoreSWHID(object_type=SwhidObjectType.DIRECTORY, object_id=directory.id),
extid_type="directory",
extid=b"something",
)
+ extid4 = ExtID(
+ target=CoreSWHID(
+ object_type=SwhidObjectType.DIRECTORY, object_id=directory2.id
+ ),
+ extid_type="directory",
+ extid=b"something",
+ extid_version=2,
+ )
extids: Tuple[ExtID, ...] = (
extid1,
extid2,
extid3,
+ extid4,
)
diff --git a/swh/storage/tests/storage_tests.py b/swh/storage/tests/storage_tests.py
index e98b0492..0d9686fb 100644
--- a/swh/storage/tests/storage_tests.py
+++ b/swh/storage/tests/storage_tests.py
@@ -1,4492 +1,4531 @@
# Copyright (C) 2015-2021 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
from collections import defaultdict
import datetime
from datetime import timedelta
import inspect
import itertools
import math
import random
from typing import Any, ClassVar, Dict, Iterator, Optional
from unittest.mock import MagicMock
import attr
from hypothesis import HealthCheck, given, settings, strategies
import pytest
from swh.core.api.classes import stream_results
from swh.model import from_disk
from swh.model.hashutil import DEFAULT_ALGORITHMS, hash_to_bytes
from swh.model.hypothesis_strategies import objects
from swh.model.identifiers import CoreSWHID, ObjectType
from swh.model.model import (
Content,
Directory,
ExtID,
Origin,
OriginVisit,
OriginVisitStatus,
Person,
RawExtrinsicMetadata,
Revision,
SkippedContent,
Snapshot,
SnapshotBranch,
TargetType,
)
from swh.storage import get_storage
from swh.storage.common import origin_url_to_sha1 as sha1
from swh.storage.exc import HashCollision, StorageArgumentException
from swh.storage.interface import ListOrder, PagedResult, StorageInterface
from swh.storage.tests.conftest import function_scoped_fixture_check
from swh.storage.utils import (
content_hex_hashes,
now,
remove_keys,
round_to_milliseconds,
)
def transform_entries(
storage: StorageInterface, dir_: Directory, *, prefix: bytes = b""
) -> Iterator[Dict[str, Any]]:
"""Iterate through a directory's entries, and yields the items 'directory_ls' is
expected to return; including content metadata for file entries."""
for ent in dir_.entries:
if ent.type == "dir":
yield {
"dir_id": dir_.id,
"type": ent.type,
"target": ent.target,
"name": prefix + ent.name,
"perms": ent.perms,
"status": None,
"sha1": None,
"sha1_git": None,
"sha256": None,
"length": None,
}
elif ent.type == "file":
contents = storage.content_find({"sha1_git": ent.target})
assert contents
ent_dict = contents[0].to_dict()
for key in ["ctime", "blake2s256"]:
ent_dict.pop(key, None)
ent_dict.update(
{
"dir_id": dir_.id,
"type": ent.type,
"target": ent.target,
"name": prefix + ent.name,
"perms": ent.perms,
}
)
yield ent_dict
def assert_contents_ok(
expected_contents, actual_contents, keys_to_check={"sha1", "data"}
):
"""Assert that a given list of contents matches on a given set of keys.
"""
for k in keys_to_check:
expected_list = set([c.get(k) for c in expected_contents])
actual_list = set([c.get(k) for c in actual_contents])
assert actual_list == expected_list, k
class LazyContent(Content):
def with_data(self):
return Content.from_dict({**self.to_dict(), "data": b"42\n"})
class TestStorage:
"""Main class for Storage testing.
This class is used as-is to test local storage (see TestLocalStorage
below) and remote storage (see TestRemoteStorage in
test_remote_storage.py.
We need to have the two classes inherit from this base class
separately to avoid nosetests running the tests from the base
class twice.
"""
maxDiff = None # type: ClassVar[Optional[int]]
def test_types(self, swh_storage_backend_config):
"""Checks all methods of StorageInterface are implemented by this
backend, and that they have the same signature."""
# Create an instance of the protocol (which cannot be instantiated
# directly, so this creates a subclass, then instantiates it)
interface = type("_", (StorageInterface,), {})()
storage = get_storage(**swh_storage_backend_config)
assert "content_add" in dir(interface)
missing_methods = []
for meth_name in dir(interface):
if meth_name.startswith("_"):
continue
interface_meth = getattr(interface, meth_name)
try:
concrete_meth = getattr(storage, meth_name)
except AttributeError:
if not getattr(interface_meth, "deprecated_endpoint", False):
# The backend is missing a (non-deprecated) endpoint
missing_methods.append(meth_name)
continue
expected_signature = inspect.signature(interface_meth)
actual_signature = inspect.signature(concrete_meth)
assert expected_signature == actual_signature, meth_name
assert missing_methods == []
# If all the assertions above succeed, then this one should too.
# But there's no harm in double-checking.
# And we could replace the assertions above by this one, but unlike
# the assertions above, it doesn't explain what is missing.
assert isinstance(storage, StorageInterface)
def test_check_config(self, swh_storage):
assert swh_storage.check_config(check_write=True)
assert swh_storage.check_config(check_write=False)
def test_content_add(self, swh_storage, sample_data):
cont = sample_data.content
insertion_start_time = now()
actual_result = swh_storage.content_add([cont])
insertion_end_time = now()
assert actual_result == {
"content:add": 1,
"content:add:bytes": cont.length,
}
assert swh_storage.content_get_data(cont.sha1) == cont.data
expected_cont = attr.evolve(cont, data=None)
contents = [
obj
for (obj_type, obj) in swh_storage.journal_writer.journal.objects
if obj_type == "content"
]
assert len(contents) == 1
for obj in contents:
assert insertion_start_time <= obj.ctime
assert obj.ctime <= insertion_end_time
assert obj == expected_cont
swh_storage.refresh_stat_counters()
assert swh_storage.stat_counters()["content"] == 1
def test_content_add_from_lazy_content(self, swh_storage, sample_data):
cont = sample_data.content
lazy_content = LazyContent.from_dict(cont.to_dict())
insertion_start_time = now()
actual_result = swh_storage.content_add([lazy_content])
insertion_end_time = now()
assert actual_result == {
"content:add": 1,
"content:add:bytes": cont.length,
}
# the fact that we retrieve the content object from the storage with
# the correct 'data' field ensures it has been 'called'
assert swh_storage.content_get_data(cont.sha1) == cont.data
expected_cont = attr.evolve(lazy_content, data=None, ctime=None)
contents = [
obj
for (obj_type, obj) in swh_storage.journal_writer.journal.objects
if obj_type == "content"
]
assert len(contents) == 1
for obj in contents:
assert insertion_start_time <= obj.ctime
assert obj.ctime <= insertion_end_time
assert attr.evolve(obj, ctime=None).to_dict() == expected_cont.to_dict()
swh_storage.refresh_stat_counters()
assert swh_storage.stat_counters()["content"] == 1
def test_content_get_data_missing(self, swh_storage, sample_data):
cont, cont2 = sample_data.contents[:2]
swh_storage.content_add([cont])
# Query a single missing content
actual_content_data = swh_storage.content_get_data(cont2.sha1)
assert actual_content_data is None
# Check content_get does not abort after finding a missing content
actual_content_data = swh_storage.content_get_data(cont.sha1)
assert actual_content_data == cont.data
actual_content_data = swh_storage.content_get_data(cont2.sha1)
assert actual_content_data is None
def test_content_add_different_input(self, swh_storage, sample_data):
cont, cont2 = sample_data.contents[:2]
actual_result = swh_storage.content_add([cont, cont2])
assert actual_result == {
"content:add": 2,
"content:add:bytes": cont.length + cont2.length,
}
def test_content_add_twice(self, swh_storage, sample_data):
cont, cont2 = sample_data.contents[:2]
actual_result = swh_storage.content_add([cont])
assert actual_result == {
"content:add": 1,
"content:add:bytes": cont.length,
}
assert len(swh_storage.journal_writer.journal.objects) == 1
actual_result = swh_storage.content_add([cont, cont2])
assert actual_result == {
"content:add": 1,
"content:add:bytes": cont2.length,
}
assert 2 <= len(swh_storage.journal_writer.journal.objects) <= 3
assert len(swh_storage.content_find(cont.to_dict())) == 1
assert len(swh_storage.content_find(cont2.to_dict())) == 1
def test_content_add_collision(self, swh_storage, sample_data):
cont1 = sample_data.content
# create (corrupted) content with same sha1{,_git} but != sha256
sha256_array = bytearray(cont1.sha256)
sha256_array[0] += 1
cont1b = attr.evolve(cont1, sha256=bytes(sha256_array))
with pytest.raises(HashCollision) as cm:
swh_storage.content_add([cont1, cont1b])
exc = cm.value
actual_algo = exc.algo
assert actual_algo in ["sha1", "sha1_git"]
actual_id = exc.hash_id
assert actual_id == getattr(cont1, actual_algo).hex()
collisions = exc.args[2]
assert len(collisions) == 2
assert collisions == [
content_hex_hashes(cont1.hashes()),
content_hex_hashes(cont1b.hashes()),
]
assert exc.colliding_content_hashes() == [
cont1.hashes(),
cont1b.hashes(),
]
def test_content_add_duplicate(self, swh_storage, sample_data):
cont = sample_data.content
swh_storage.content_add([cont, cont])
assert swh_storage.content_get_data(cont.sha1) == cont.data
def test_content_update(self, swh_storage, sample_data):
cont1 = sample_data.content
if hasattr(swh_storage, "journal_writer"):
swh_storage.journal_writer.journal = None # TODO, not supported
swh_storage.content_add([cont1])
# alter the sha1_git for example
cont1b = attr.evolve(
cont1, sha1_git=hash_to_bytes("3a60a5275d0333bf13468e8b3dcab90f4046e654")
)
swh_storage.content_update([cont1b.to_dict()], keys=["sha1_git"])
actual_contents = swh_storage.content_get([cont1.sha1])
expected_content = attr.evolve(cont1b, data=None)
assert actual_contents == [expected_content]
def test_content_add_metadata(self, swh_storage, sample_data):
cont = attr.evolve(sample_data.content, data=None, ctime=now())
actual_result = swh_storage.content_add_metadata([cont])
assert actual_result == {
"content:add": 1,
}
expected_cont = cont
assert swh_storage.content_get([cont.sha1]) == [expected_cont]
contents = [
obj
for (obj_type, obj) in swh_storage.journal_writer.journal.objects
if obj_type == "content"
]
assert len(contents) == 1
for obj in contents:
obj = attr.evolve(obj, ctime=None)
assert obj == cont
def test_content_add_metadata_different_input(self, swh_storage, sample_data):
contents = sample_data.contents[:2]
cont = attr.evolve(contents[0], data=None, ctime=now())
cont2 = attr.evolve(contents[1], data=None, ctime=now())
actual_result = swh_storage.content_add_metadata([cont, cont2])
assert actual_result == {
"content:add": 2,
}
def test_content_add_metadata_collision(self, swh_storage, sample_data):
cont1 = attr.evolve(sample_data.content, data=None, ctime=now())
# create (corrupted) content with same sha1{,_git} but != sha256
sha1_git_array = bytearray(cont1.sha256)
sha1_git_array[0] += 1
cont1b = attr.evolve(cont1, sha256=bytes(sha1_git_array))
with pytest.raises(HashCollision) as cm:
swh_storage.content_add_metadata([cont1, cont1b])
exc = cm.value
actual_algo = exc.algo
assert actual_algo in ["sha1", "sha1_git", "blake2s256"]
actual_id = exc.hash_id
assert actual_id == getattr(cont1, actual_algo).hex()
collisions = exc.args[2]
assert len(collisions) == 2
assert collisions == [
content_hex_hashes(cont1.hashes()),
content_hex_hashes(cont1b.hashes()),
]
assert exc.colliding_content_hashes() == [
cont1.hashes(),
cont1b.hashes(),
]
def test_content_add_objstorage_first(self, swh_storage, sample_data):
"""Tests the objstorage is written to before the DB and journal"""
cont = sample_data.content
swh_storage.objstorage.content_add = MagicMock(side_effect=Exception("Oops"))
# Try to add, but the objstorage crashes
try:
swh_storage.content_add([cont])
except Exception:
pass
# The DB must be written to after the objstorage, so the DB should be
# unchanged if the objstorage crashed
assert swh_storage.content_get_data(cont.sha1) is None
# The journal too
assert list(swh_storage.journal_writer.journal.objects) == []
def test_skipped_content_add(self, swh_storage, sample_data):
contents = sample_data.skipped_contents[:2]
cont = contents[0]
cont2 = attr.evolve(contents[1], blake2s256=None)
contents_dict = [c.to_dict() for c in [cont, cont2]]
missing = list(swh_storage.skipped_content_missing(contents_dict))
assert missing == [cont.hashes(), cont2.hashes()]
actual_result = swh_storage.skipped_content_add([cont, cont, cont2])
assert 2 <= actual_result.pop("skipped_content:add") <= 3
assert actual_result == {}
missing = list(swh_storage.skipped_content_missing(contents_dict))
assert missing == []
def test_skipped_content_add_missing_hashes(self, swh_storage, sample_data):
cont, cont2 = [
attr.evolve(c, sha1_git=None) for c in sample_data.skipped_contents[:2]
]
contents_dict = [c.to_dict() for c in [cont, cont2]]
missing = list(swh_storage.skipped_content_missing(contents_dict))
assert len(missing) == 2
actual_result = swh_storage.skipped_content_add([cont, cont, cont2])
assert 2 <= actual_result.pop("skipped_content:add") <= 3
assert actual_result == {}
missing = list(swh_storage.skipped_content_missing(contents_dict))
assert missing == []
def test_skipped_content_missing_partial_hash(self, swh_storage, sample_data):
cont = sample_data.skipped_content
cont2 = attr.evolve(cont, sha1_git=None)
contents_dict = [c.to_dict() for c in [cont, cont2]]
missing = list(swh_storage.skipped_content_missing(contents_dict))
assert len(missing) == 2
actual_result = swh_storage.skipped_content_add([cont])
assert actual_result.pop("skipped_content:add") == 1
assert actual_result == {}
missing = list(swh_storage.skipped_content_missing(contents_dict))
assert missing == [cont2.hashes()]
@pytest.mark.property_based
@settings(
deadline=None, # this test is very slow
suppress_health_check=function_scoped_fixture_check,
)
@given(
strategies.sets(
elements=strategies.sampled_from(["sha256", "sha1_git", "blake2s256"]),
min_size=0,
)
)
def test_content_missing(self, swh_storage, sample_data, algos):
algos |= {"sha1"}
content, missing_content = [sample_data.content2, sample_data.skipped_content]
swh_storage.content_add([content])
test_contents = [content.to_dict()]
missing_per_hash = defaultdict(list)
for i in range(256):
test_content = missing_content.to_dict()
for hash in algos:
test_content[hash] = bytes([i]) + test_content[hash][1:]
missing_per_hash[hash].append(test_content[hash])
test_contents.append(test_content)
assert set(swh_storage.content_missing(test_contents)) == set(
missing_per_hash["sha1"]
)
for hash in algos:
assert set(
swh_storage.content_missing(test_contents, key_hash=hash)
) == set(missing_per_hash[hash])
@pytest.mark.property_based
@settings(suppress_health_check=function_scoped_fixture_check,)
@given(
strategies.sets(
elements=strategies.sampled_from(["sha256", "sha1_git", "blake2s256"]),
min_size=0,
)
)
def test_content_missing_unknown_algo(self, swh_storage, sample_data, algos):
algos |= {"sha1"}
content, missing_content = [sample_data.content2, sample_data.skipped_content]
swh_storage.content_add([content])
test_contents = [content.to_dict()]
missing_per_hash = defaultdict(list)
for i in range(16):
test_content = missing_content.to_dict()
for hash in algos:
test_content[hash] = bytes([i]) + test_content[hash][1:]
missing_per_hash[hash].append(test_content[hash])
test_content["nonexisting_algo"] = b"\x00"
test_contents.append(test_content)
assert set(swh_storage.content_missing(test_contents)) == set(
missing_per_hash["sha1"]
)
for hash in algos:
assert set(
swh_storage.content_missing(test_contents, key_hash=hash)
) == set(missing_per_hash[hash])
def test_content_missing_per_sha1(self, swh_storage, sample_data):
# given
cont = sample_data.content
cont2 = sample_data.content2
missing_cont = sample_data.skipped_content
missing_cont2 = sample_data.skipped_content2
swh_storage.content_add([cont, cont2])
# when
gen = swh_storage.content_missing_per_sha1(
[cont.sha1, missing_cont.sha1, cont2.sha1, missing_cont2.sha1]
)
# then
assert list(gen) == [missing_cont.sha1, missing_cont2.sha1]
def test_content_missing_per_sha1_git(self, swh_storage, sample_data):
cont, cont2 = sample_data.contents[:2]
missing_cont = sample_data.skipped_content
missing_cont2 = sample_data.skipped_content2
swh_storage.content_add([cont, cont2])
contents = [
cont.sha1_git,
cont2.sha1_git,
missing_cont.sha1_git,
missing_cont2.sha1_git,
]
missing_contents = swh_storage.content_missing_per_sha1_git(contents)
assert list(missing_contents) == [missing_cont.sha1_git, missing_cont2.sha1_git]
missing_contents = swh_storage.content_missing_per_sha1_git([])
assert list(missing_contents) == []
def test_content_get_partition(self, swh_storage, swh_contents):
"""content_get_partition paginates results if limit exceeded"""
expected_contents = [
attr.evolve(c, data=None) for c in swh_contents if c.status != "absent"
]
actual_contents = []
for i in range(16):
actual_result = swh_storage.content_get_partition(i, 16)
assert actual_result.next_page_token is None
actual_contents.extend(actual_result.results)
assert len(actual_contents) == len(expected_contents)
for content in actual_contents:
assert content in expected_contents
assert content.ctime is None
def test_content_get_partition_full(self, swh_storage, swh_contents):
"""content_get_partition for a single partition returns all available contents
"""
expected_contents = [
attr.evolve(c, data=None) for c in swh_contents if c.status != "absent"
]
actual_result = swh_storage.content_get_partition(0, 1)
assert actual_result.next_page_token is None
actual_contents = actual_result.results
assert len(actual_contents) == len(expected_contents)
for content in actual_contents:
assert content in expected_contents
def test_content_get_partition_empty(self, swh_storage, swh_contents):
"""content_get_partition when at least one of the partitions is empty"""
expected_contents = {
cont.sha1 for cont in swh_contents if cont.status != "absent"
}
# nb_partitions = smallest power of 2 such that at least one of
# the partitions is empty
nb_partitions = 1 << math.floor(math.log2(len(swh_contents)) + 1)
seen_sha1s = []
for i in range(nb_partitions):
actual_result = swh_storage.content_get_partition(
i, nb_partitions, limit=len(swh_contents) + 1
)
for content in actual_result.results:
seen_sha1s.append(content.sha1)
# Limit is higher than the max number of results
assert actual_result.next_page_token is None
assert set(seen_sha1s) == expected_contents
def test_content_get_partition_limit_none(self, swh_storage):
"""content_get_partition call with wrong limit input should fail"""
with pytest.raises(StorageArgumentException, match="limit should not be None"):
swh_storage.content_get_partition(1, 16, limit=None)
def test_content_get_partition_pagination_generate(self, swh_storage, swh_contents):
"""content_get_partition returns contents within range provided"""
expected_contents = [
attr.evolve(c, data=None) for c in swh_contents if c.status != "absent"
]
# retrieve contents
actual_contents = []
for i in range(4):
page_token = None
while True:
actual_result = swh_storage.content_get_partition(
i, 4, limit=3, page_token=page_token
)
actual_contents.extend(actual_result.results)
page_token = actual_result.next_page_token
if page_token is None:
break
assert len(actual_contents) == len(expected_contents)
for content in actual_contents:
assert content in expected_contents
@pytest.mark.parametrize("algo", sorted(DEFAULT_ALGORITHMS))
def test_content_get(self, swh_storage, sample_data, algo):
cont1, cont2 = sample_data.contents[:2]
swh_storage.content_add([cont1, cont2])
actual_contents = swh_storage.content_get(
[getattr(cont1, algo), getattr(cont2, algo)], algo
)
# we only retrieve the metadata so no data nor ctime within
expected_contents = [attr.evolve(c, data=None) for c in [cont1, cont2]]
assert actual_contents == expected_contents
for content in actual_contents:
assert content.ctime is None
@pytest.mark.parametrize("algo", sorted(DEFAULT_ALGORITHMS))
def test_content_get_missing(self, swh_storage, sample_data, algo):
cont1, cont2 = sample_data.contents[:2]
assert cont1.sha1 != cont2.sha1
missing_cont = sample_data.skipped_content
swh_storage.content_add([cont1, cont2])
actual_contents = swh_storage.content_get(
[getattr(cont1, algo), getattr(cont2, algo), getattr(missing_cont, algo)],
algo,
)
expected_contents = [
attr.evolve(c, data=None) if c else None for c in [cont1, cont2, None]
]
assert actual_contents == expected_contents
def test_content_get_random(self, swh_storage, sample_data):
cont, cont2, cont3 = sample_data.contents[:3]
swh_storage.content_add([cont, cont2, cont3])
assert swh_storage.content_get_random() in {
cont.sha1_git,
cont2.sha1_git,
cont3.sha1_git,
}
def test_directory_add(self, swh_storage, sample_data):
content = sample_data.content
directory = sample_data.directories[1]
assert directory.entries[0].target == content.sha1_git
swh_storage.content_add([content])
init_missing = list(swh_storage.directory_missing([directory.id]))
assert [directory.id] == init_missing
actual_result = swh_storage.directory_add([directory])
assert actual_result == {"directory:add": 1}
assert ("directory", directory) in list(
swh_storage.journal_writer.journal.objects
)
actual_data = list(swh_storage.directory_ls(directory.id))
expected_data = list(transform_entries(swh_storage, directory))
for data in actual_data:
assert data in expected_data
after_missing = list(swh_storage.directory_missing([directory.id]))
assert after_missing == []
swh_storage.refresh_stat_counters()
assert swh_storage.stat_counters()["directory"] == 1
def test_directory_add_twice(self, swh_storage, sample_data):
directory = sample_data.directories[1]
actual_result = swh_storage.directory_add([directory])
assert actual_result == {"directory:add": 1}
assert list(swh_storage.journal_writer.journal.objects) == [
("directory", directory)
]
actual_result = swh_storage.directory_add([directory])
assert actual_result == {"directory:add": 0}
assert list(swh_storage.journal_writer.journal.objects) == [
("directory", directory)
]
def test_directory_ls_recursive(self, swh_storage, sample_data):
# create consistent dataset regarding the directories we want to list
content, content2 = sample_data.contents[:2]
swh_storage.content_add([content, content2])
dir1, dir2, dir3 = sample_data.directories[:3]
dir_ids = [d.id for d in [dir1, dir2, dir3]]
init_missing = list(swh_storage.directory_missing(dir_ids))
assert init_missing == dir_ids
actual_result = swh_storage.directory_add([dir1, dir2, dir3])
assert actual_result == {"directory:add": 3}
# List directory containing one file
actual_data = list(swh_storage.directory_ls(dir1.id, recursive=True))
expected_data = list(transform_entries(swh_storage, dir1))
for data in actual_data:
assert data in expected_data
# List directory containing a file and an unknown subdirectory
actual_data = list(swh_storage.directory_ls(dir2.id, recursive=True))
expected_data = list(transform_entries(swh_storage, dir2))
for data in actual_data:
assert data in expected_data
# List directory containing both a known and unknown subdirectory, entries
# should be both those of the directory and of the known subdir (up to contents)
actual_data = list(swh_storage.directory_ls(dir3.id, recursive=True))
expected_data = list(
itertools.chain(
transform_entries(swh_storage, dir3),
transform_entries(swh_storage, dir2, prefix=b"subdir/"),
)
)
for data in actual_data:
assert data in expected_data
def test_directory_ls_non_recursive(self, swh_storage, sample_data):
# create consistent dataset regarding the directories we want to list
content, content2 = sample_data.contents[:2]
swh_storage.content_add([content, content2])
dir1, dir2, dir3, _, dir5 = sample_data.directories[:5]
dir_ids = [d.id for d in [dir1, dir2, dir3, dir5]]
init_missing = list(swh_storage.directory_missing(dir_ids))
assert init_missing == dir_ids
actual_result = swh_storage.directory_add([dir1, dir2, dir3, dir5])
assert actual_result == {"directory:add": 4}
# List directory containing a file and an unknown subdirectory
actual_data = list(swh_storage.directory_ls(dir1.id))
expected_data = list(transform_entries(swh_storage, dir1))
for data in actual_data:
assert data in expected_data
# List directory containing a single file
actual_data = list(swh_storage.directory_ls(dir2.id))
expected_data = list(transform_entries(swh_storage, dir2))
for data in actual_data:
assert data in expected_data
# List directory containing a known subdirectory, entries should
# only be those of the parent directory, not of the subdir
actual_data = list(swh_storage.directory_ls(dir3.id))
expected_data = list(transform_entries(swh_storage, dir3))
for data in actual_data:
assert data in expected_data
def test_directory_ls_missing_content(self, swh_storage, sample_data):
swh_storage.directory_add([sample_data.directory2])
assert list(swh_storage.directory_ls(sample_data.directory2.id)) == [
{
"dir_id": sample_data.directory2.id,
"length": None,
"name": b"oof",
"perms": 33188,
"sha1": None,
"sha1_git": None,
"sha256": None,
"status": None,
"target": sample_data.directory2.entries[0].target,
"type": "file",
},
]
def test_directory_ls_skipped_content(self, swh_storage, sample_data):
swh_storage.directory_add([sample_data.directory2])
cont = SkippedContent(
sha1_git=sample_data.directory2.entries[0].target,
sha1=b"c" * 20,
sha256=None,
blake2s256=None,
length=42,
status="absent",
reason="You need a premium subscription to access this content",
)
swh_storage.skipped_content_add([cont])
assert list(swh_storage.directory_ls(sample_data.directory2.id)) == [
{
"dir_id": sample_data.directory2.id,
"length": 42,
"name": b"oof",
"perms": 33188,
"sha1": b"c" * 20,
"sha1_git": sample_data.directory2.entries[0].target,
"sha256": None,
"status": "absent",
"target": sample_data.directory2.entries[0].target,
"type": "file",
},
]
def test_directory_entry_get_by_path(self, swh_storage, sample_data):
cont, content2 = sample_data.contents[:2]
dir1, dir2, dir3, dir4, dir5 = sample_data.directories[:5]
# given
dir_ids = [d.id for d in [dir1, dir2, dir3, dir4, dir5]]
init_missing = list(swh_storage.directory_missing(dir_ids))
assert init_missing == dir_ids
actual_result = swh_storage.directory_add([dir3, dir4])
assert actual_result == {"directory:add": 2}
expected_entries = [
{
"dir_id": dir3.id,
"name": b"foo",
"type": "file",
"target": cont.sha1_git,
"sha1": None,
"sha1_git": None,
"sha256": None,
"status": None,
"perms": from_disk.DentryPerms.content,
"length": None,
},
{
"dir_id": dir3.id,
"name": b"subdir",
"type": "dir",
"target": dir2.id,
"sha1": None,
"sha1_git": None,
"sha256": None,
"status": None,
"perms": from_disk.DentryPerms.directory,
"length": None,
},
{
"dir_id": dir3.id,
"name": b"hello",
"type": "file",
"target": content2.sha1_git,
"sha1": None,
"sha1_git": None,
"sha256": None,
"status": None,
"perms": from_disk.DentryPerms.content,
"length": None,
},
]
# when (all must be found here)
for entry, expected_entry in zip(dir3.entries, expected_entries):
actual_entry = swh_storage.directory_entry_get_by_path(
dir3.id, [entry.name]
)
assert actual_entry == expected_entry
# same, but deeper
for entry, expected_entry in zip(dir3.entries, expected_entries):
actual_entry = swh_storage.directory_entry_get_by_path(
dir4.id, [b"subdir1", entry.name]
)
expected_entry = expected_entry.copy()
expected_entry["name"] = b"subdir1/" + expected_entry["name"]
assert actual_entry == expected_entry
# when (nothing should be found here since `dir` is not persisted.)
for entry in dir2.entries:
actual_entry = swh_storage.directory_entry_get_by_path(
dir2.id, [entry.name]
)
assert actual_entry is None
def test_directory_get_entries_pagination(self, swh_storage, sample_data):
# Note: this test assumes entries are returned in lexicographic order,
# which is not actually guaranteed by the interface.
dir_ = sample_data.directory3
entries = sorted(dir_.entries, key=lambda entry: entry.name)
swh_storage.directory_add(sample_data.directories)
# No pagination needed
actual_data = swh_storage.directory_get_entries(dir_.id)
assert actual_data == PagedResult(results=entries, next_page_token=None)
# A little pagination
actual_data = swh_storage.directory_get_entries(dir_.id, limit=2)
assert actual_data.results == entries[0:2]
assert actual_data.next_page_token is not None
actual_data = swh_storage.directory_get_entries(
dir_.id, page_token=actual_data.next_page_token
)
assert actual_data == PagedResult(results=entries[2:], next_page_token=None)
@pytest.mark.parametrize("limit", [1, 2, 3, 4, 5])
def test_directory_get_entries(self, swh_storage, sample_data, limit):
dir_ = sample_data.directory3
swh_storage.directory_add(sample_data.directories)
actual_data = list(
stream_results(swh_storage.directory_get_entries, dir_.id, limit=limit,)
)
assert sorted(actual_data) == sorted(dir_.entries)
def test_directory_get_random(self, swh_storage, sample_data):
dir1, dir2, dir3 = sample_data.directories[:3]
swh_storage.directory_add([dir1, dir2, dir3])
assert swh_storage.directory_get_random() in {
dir1.id,
dir2.id,
dir3.id,
}
def test_revision_add(self, swh_storage, sample_data):
revision = sample_data.revision
init_missing = swh_storage.revision_missing([revision.id])
assert list(init_missing) == [revision.id]
actual_result = swh_storage.revision_add([revision])
assert actual_result == {"revision:add": 1}
end_missing = swh_storage.revision_missing([revision.id])
assert list(end_missing) == []
assert list(swh_storage.journal_writer.journal.objects) == [
("revision", revision)
]
# already there so nothing added
actual_result = swh_storage.revision_add([revision])
assert actual_result == {"revision:add": 0}
swh_storage.refresh_stat_counters()
assert swh_storage.stat_counters()["revision"] == 1
def test_revision_add_twice(self, swh_storage, sample_data):
revision, revision2 = sample_data.revisions[:2]
actual_result = swh_storage.revision_add([revision])
assert actual_result == {"revision:add": 1}
assert list(swh_storage.journal_writer.journal.objects) == [
("revision", revision)
]
actual_result = swh_storage.revision_add([revision, revision2])
assert actual_result == {"revision:add": 1}
assert list(swh_storage.journal_writer.journal.objects) == [
("revision", revision),
("revision", revision2),
]
def test_revision_add_name_clash(self, swh_storage, sample_data):
revision, revision2 = sample_data.revisions[:2]
revision1 = attr.evolve(
revision,
author=Person(
fullname=b"John Doe ",
name=b"John Doe",
email=b"john.doe@example.com",
),
)
revision2 = attr.evolve(
revision2,
author=Person(
fullname=b"John Doe ",
name=b"John Doe ",
email=b"john.doe@example.com ",
),
)
actual_result = swh_storage.revision_add([revision1, revision2])
assert actual_result == {"revision:add": 2}
def test_revision_get_order(self, swh_storage, sample_data):
revision, revision2 = sample_data.revisions[:2]
add_result = swh_storage.revision_add([revision, revision2])
assert add_result == {"revision:add": 2}
# order 1
actual_revisions = swh_storage.revision_get([revision.id, revision2.id])
assert actual_revisions == [revision, revision2]
# order 2
actual_revisions2 = swh_storage.revision_get([revision2.id, revision.id])
assert actual_revisions2 == [revision2, revision]
def test_revision_log(self, swh_storage, sample_data):
revision1, revision2, revision3, revision4 = sample_data.revisions[:4]
# rev4 -is-child-of-> rev3 -> rev1, (rev2 -> rev1)
swh_storage.revision_add([revision1, revision2, revision3, revision4])
# when
results = list(swh_storage.revision_log([revision4.id]))
# for comparison purposes
actual_results = [Revision.from_dict(r) for r in results]
assert len(actual_results) == 4 # rev4 -child-> rev3 -> rev1, (rev2 -> rev1)
assert actual_results == [revision4, revision3, revision1, revision2]
def test_revision_log_with_limit(self, swh_storage, sample_data):
revision1, revision2, revision3, revision4 = sample_data.revisions[:4]
# revision4 -is-child-of-> revision3
swh_storage.revision_add([revision3, revision4])
results = list(swh_storage.revision_log([revision4.id], 1))
actual_results = [Revision.from_dict(r) for r in results]
assert len(actual_results) == 1
assert actual_results[0] == revision4
def test_revision_log_unknown_revision(self, swh_storage, sample_data):
revision = sample_data.revision
rev_log = list(swh_storage.revision_log([revision.id]))
assert rev_log == []
def test_revision_shortlog(self, swh_storage, sample_data):
revision1, revision2, revision3, revision4 = sample_data.revisions[:4]
# rev4 -is-child-of-> rev3 -> (rev1, rev2); rev2 -> rev1
swh_storage.revision_add([revision1, revision2, revision3, revision4])
results = list(swh_storage.revision_shortlog([revision4.id]))
actual_results = [[id, tuple(parents)] for (id, parents) in results]
assert len(actual_results) == 4
assert actual_results == [
[revision4.id, revision4.parents],
[revision3.id, revision3.parents],
[revision1.id, revision1.parents],
[revision2.id, revision2.parents],
]
def test_revision_shortlog_with_limit(self, swh_storage, sample_data):
revision1, revision2, revision3, revision4 = sample_data.revisions[:4]
# revision4 -is-child-of-> revision3
swh_storage.revision_add([revision1, revision2, revision3, revision4])
results = list(swh_storage.revision_shortlog([revision4.id], 1))
actual_results = [[id, tuple(parents)] for (id, parents) in results]
assert len(actual_results) == 1
assert list(actual_results[0]) == [revision4.id, revision4.parents]
def test_revision_get(self, swh_storage, sample_data):
revision, revision2 = sample_data.revisions[:2]
swh_storage.revision_add([revision])
actual_revisions = swh_storage.revision_get([revision.id, revision2.id])
assert len(actual_revisions) == 2
assert actual_revisions == [revision, None]
def test_revision_get_no_parents(self, swh_storage, sample_data):
revision = sample_data.revision
swh_storage.revision_add([revision])
actual_revision = swh_storage.revision_get([revision.id])[0]
assert revision.parents == ()
assert actual_revision.parents == () # no parents on this one
def test_revision_get_random(self, swh_storage, sample_data):
revision1, revision2, revision3 = sample_data.revisions[:3]
swh_storage.revision_add([revision1, revision2, revision3])
assert swh_storage.revision_get_random() in {
revision1.id,
revision2.id,
revision3.id,
}
def test_extid_add_git(self, swh_storage, sample_data):
gitids = [
revision.id
for revision in sample_data.revisions
if revision.type.value == "git"
]
extids = [
ExtID(
extid=gitid,
extid_type="git",
target=CoreSWHID(object_id=gitid, object_type=ObjectType.REVISION,),
)
for gitid in gitids
]
assert swh_storage.extid_get_from_extid("git", gitids) == []
assert swh_storage.extid_get_from_target(ObjectType.REVISION, gitids) == []
summary = swh_storage.extid_add(extids)
assert summary == {"extid:add": len(gitids)}
assert swh_storage.extid_get_from_extid("git", gitids) == extids
assert swh_storage.extid_get_from_target(ObjectType.REVISION, gitids) == extids
assert swh_storage.extid_get_from_extid("hg", gitids) == []
assert swh_storage.extid_get_from_target(ObjectType.RELEASE, gitids) == []
# check ExtIDs have been added to the journal
extids_in_journal = [
obj
for (obj_type, obj) in swh_storage.journal_writer.journal.objects
if obj_type == "extid"
]
assert extids == extids_in_journal
def test_extid_add_hg(self, swh_storage, sample_data):
def get_node(revision):
node = None
if revision.extra_headers:
node = dict(revision.extra_headers).get(b"node")
if node is None and revision.metadata:
node = hash_to_bytes(revision.metadata.get("node"))
return node
swhids = [
revision.id
for revision in sample_data.revisions
if revision.type.value == "hg"
]
extids = [
get_node(revision)
for revision in sample_data.revisions
if revision.type.value == "hg"
]
assert swh_storage.extid_get_from_extid("hg", extids) == []
assert swh_storage.extid_get_from_target(ObjectType.REVISION, swhids) == []
extid_objs = [
ExtID(
extid=hgid,
extid_type="hg",
+ extid_version=1,
target=CoreSWHID(object_id=swhid, object_type=ObjectType.REVISION,),
)
for hgid, swhid in zip(extids, swhids)
]
summary = swh_storage.extid_add(extid_objs)
assert summary == {"extid:add": len(swhids)}
assert swh_storage.extid_get_from_extid("hg", extids) == extid_objs
assert (
swh_storage.extid_get_from_target(ObjectType.REVISION, swhids) == extid_objs
)
assert swh_storage.extid_get_from_extid("git", extids) == []
assert swh_storage.extid_get_from_target(ObjectType.RELEASE, swhids) == []
# check ExtIDs have been added to the journal
extids_in_journal = [
obj
for (obj_type, obj) in swh_storage.journal_writer.journal.objects
if obj_type == "extid"
]
assert extid_objs == extids_in_journal
def test_extid_add_twice(self, swh_storage, sample_data):
gitids = [
revision.id
for revision in sample_data.revisions
if revision.type.value == "git"
]
extids = [
ExtID(
extid=gitid,
extid_type="git",
target=CoreSWHID(object_id=gitid, object_type=ObjectType.REVISION,),
)
for gitid in gitids
]
summary = swh_storage.extid_add(extids)
assert summary == {"extid:add": len(gitids)}
# add them again, should be noop
summary = swh_storage.extid_add(extids)
# assert summary == {"extid:add": 0}
assert swh_storage.extid_get_from_extid("git", gitids) == extids
assert swh_storage.extid_get_from_target(ObjectType.REVISION, gitids) == extids
def test_extid_add_extid_multicity(self, swh_storage, sample_data):
ids = [
revision.id
for revision in sample_data.revisions
if revision.type.value == "git"
]
extids = [
ExtID(
extid=extid,
extid_type="git",
+ extid_version=2,
target=CoreSWHID(object_id=extid, object_type=ObjectType.REVISION,),
)
for extid in ids
]
swh_storage.extid_add(extids)
# try to add "modified-extid" versions, should be added
extids2 = [
ExtID(
extid=extid,
extid_type="hg",
+ extid_version=2,
target=CoreSWHID(object_id=extid, object_type=ObjectType.REVISION,),
)
for extid in ids
]
swh_storage.extid_add(extids2)
assert swh_storage.extid_get_from_extid("git", ids) == extids
assert swh_storage.extid_get_from_extid("hg", ids) == extids2
assert set(swh_storage.extid_get_from_target(ObjectType.REVISION, ids)) == {
*extids,
*extids2,
}
def test_extid_add_target_multicity(self, swh_storage, sample_data):
ids = [
revision.id
for revision in sample_data.revisions
if revision.type.value == "git"
]
extids = [
ExtID(
extid=extid,
extid_type="git",
target=CoreSWHID(object_id=extid, object_type=ObjectType.REVISION,),
)
for extid in ids
]
swh_storage.extid_add(extids)
# try to add "modified" versions, should be added
extids2 = [
ExtID(
extid=extid,
extid_type="git",
target=CoreSWHID(object_id=extid, object_type=ObjectType.RELEASE,),
)
for extid in ids
]
swh_storage.extid_add(extids2)
assert set(swh_storage.extid_get_from_extid("git", ids)) == {*extids, *extids2}
assert swh_storage.extid_get_from_target(ObjectType.REVISION, ids) == extids
assert swh_storage.extid_get_from_target(ObjectType.RELEASE, ids) == extids2
+ def test_extid_version_behavior(self, swh_storage, sample_data):
+ ids = [
+ revision.id
+ for revision in sample_data.revisions
+ if revision.type.value == "git"
+ ]
+
+ # Insert extids with several different versions
+ extids = [
+ ExtID(
+ extid=extid,
+ extid_type="git",
+ target=CoreSWHID(object_id=extid, object_type=ObjectType.REVISION,),
+ )
+ for extid in ids
+ ] + [
+ ExtID(
+ extid=extid,
+ extid_type="git",
+ extid_version=1,
+ target=CoreSWHID(object_id=extid, object_type=ObjectType.REVISION,),
+ )
+ for extid in ids
+ ]
+ swh_storage.extid_add(extids)
+
+ # Check that both versions get returned
+ for git_id in ids:
+ objs = swh_storage.extid_get_from_extid("git", [git_id])
+ assert len(objs) == 2
+ assert set(obj.extid_version for obj in objs) == {0, 1}
+ for swhid in ids:
+ objs = swh_storage.extid_get_from_target(ObjectType.REVISION, [swhid])
+ assert len(objs) == 2
+ assert set(obj.extid_version for obj in objs) == {0, 1}
+
def test_release_add(self, swh_storage, sample_data):
release, release2 = sample_data.releases[:2]
init_missing = swh_storage.release_missing([release.id, release2.id])
assert list(init_missing) == [release.id, release2.id]
actual_result = swh_storage.release_add([release, release2])
assert actual_result == {"release:add": 2}
end_missing = swh_storage.release_missing([release.id, release2.id])
assert list(end_missing) == []
assert list(swh_storage.journal_writer.journal.objects) == [
("release", release),
("release", release2),
]
# already present so nothing added
actual_result = swh_storage.release_add([release, release2])
assert actual_result == {"release:add": 0}
swh_storage.refresh_stat_counters()
assert swh_storage.stat_counters()["release"] == 2
def test_release_add_no_author_date(self, swh_storage, sample_data):
full_release = sample_data.release
release = attr.evolve(full_release, author=None, date=None)
actual_result = swh_storage.release_add([release])
assert actual_result == {"release:add": 1}
end_missing = swh_storage.release_missing([release.id])
assert list(end_missing) == []
assert list(swh_storage.journal_writer.journal.objects) == [
("release", release)
]
def test_release_add_twice(self, swh_storage, sample_data):
release, release2 = sample_data.releases[:2]
actual_result = swh_storage.release_add([release])
assert actual_result == {"release:add": 1}
assert list(swh_storage.journal_writer.journal.objects) == [
("release", release)
]
actual_result = swh_storage.release_add([release, release2, release, release2])
assert actual_result == {"release:add": 1}
assert set(swh_storage.journal_writer.journal.objects) == set(
[("release", release), ("release", release2),]
)
def test_release_add_name_clash(self, swh_storage, sample_data):
release, release2 = [
attr.evolve(
c,
author=Person(
fullname=b"John Doe ",
name=b"John Doe",
email=b"john.doe@example.com",
),
)
for c in sample_data.releases[:2]
]
actual_result = swh_storage.release_add([release, release2])
assert actual_result == {"release:add": 2}
def test_release_get(self, swh_storage, sample_data):
release, release2, release3 = sample_data.releases[:3]
# given
swh_storage.release_add([release, release2])
# when
actual_releases = swh_storage.release_get([release.id, release2.id])
# then
assert actual_releases == [release, release2]
unknown_releases = swh_storage.release_get([release3.id])
assert unknown_releases[0] is None
def test_release_get_order(self, swh_storage, sample_data):
release, release2 = sample_data.releases[:2]
add_result = swh_storage.release_add([release, release2])
assert add_result == {"release:add": 2}
# order 1
actual_releases = swh_storage.release_get([release.id, release2.id])
assert actual_releases == [release, release2]
# order 2
actual_releases2 = swh_storage.release_get([release2.id, release.id])
assert actual_releases2 == [release2, release]
def test_release_get_random(self, swh_storage, sample_data):
release, release2, release3 = sample_data.releases[:3]
swh_storage.release_add([release, release2, release3])
assert swh_storage.release_get_random() in {
release.id,
release2.id,
release3.id,
}
def test_origin_add(self, swh_storage, sample_data):
origins = list(sample_data.origins)
origin_urls = [o.url for o in origins]
assert swh_storage.origin_get(origin_urls) == [None] * len(origins)
stats = swh_storage.origin_add(origins)
assert stats == {"origin:add": len(origin_urls)}
actual_origins = swh_storage.origin_get(origin_urls)
assert actual_origins == origins
assert set(swh_storage.journal_writer.journal.objects) == set(
[("origin", origin) for origin in origins]
)
swh_storage.refresh_stat_counters()
assert swh_storage.stat_counters()["origin"] == len(origins)
def test_origin_add_twice(self, swh_storage, sample_data):
origin, origin2 = sample_data.origins[:2]
add1 = swh_storage.origin_add([origin, origin2])
assert set(swh_storage.journal_writer.journal.objects) == set(
[("origin", origin), ("origin", origin2),]
)
assert add1 == {"origin:add": 2}
add2 = swh_storage.origin_add([origin, origin2])
assert set(swh_storage.journal_writer.journal.objects) == set(
[("origin", origin), ("origin", origin2),]
)
assert add2 == {"origin:add": 0}
def test_origin_add_twice_at_once(self, swh_storage, sample_data):
origin, origin2 = sample_data.origins[:2]
add1 = swh_storage.origin_add([origin, origin2, origin, origin2])
assert set(swh_storage.journal_writer.journal.objects) == set(
[("origin", origin), ("origin", origin2),]
)
assert add1 == {"origin:add": 2}
add2 = swh_storage.origin_add([origin, origin2, origin, origin2])
assert set(swh_storage.journal_writer.journal.objects) == set(
[("origin", origin), ("origin", origin2),]
)
assert add2 == {"origin:add": 0}
def test_origin_get(self, swh_storage, sample_data):
origin, origin2 = sample_data.origins[:2]
assert swh_storage.origin_get([origin.url]) == [None]
swh_storage.origin_add([origin])
actual_origins = swh_storage.origin_get([origin.url])
assert actual_origins == [origin]
actual_origins = swh_storage.origin_get([origin.url, "not://exists"])
assert actual_origins == [origin, None]
def _generate_random_visits(self, nb_visits=100, start=0, end=7):
"""Generate random visits within the last 2 months (to avoid
computations)
"""
visits = []
today = now()
for weeks in range(nb_visits, 0, -1):
hours = random.randint(0, 24)
minutes = random.randint(0, 60)
seconds = random.randint(0, 60)
days = random.randint(0, 28)
weeks = random.randint(start, end)
date_visit = today - timedelta(
weeks=weeks, hours=hours, minutes=minutes, seconds=seconds, days=days
)
visits.append(date_visit)
return visits
def test_origin_visit_get__unknown_origin(self, swh_storage):
actual_page = swh_storage.origin_visit_get("foo")
assert actual_page.next_page_token is None
assert actual_page.results == []
assert actual_page == PagedResult()
def test_origin_visit_get__validation_failure(self, swh_storage, sample_data):
origin = sample_data.origin
swh_storage.origin_add([origin])
with pytest.raises(
StorageArgumentException, match="page_token must be a string"
):
swh_storage.origin_visit_get(origin.url, page_token=10) # not bytes
with pytest.raises(
StorageArgumentException, match="order must be a ListOrder value"
):
swh_storage.origin_visit_get(origin.url, order="foobar") # wrong order
def test_origin_visit_get_all(self, swh_storage, sample_data):
origin = sample_data.origin
swh_storage.origin_add([origin])
ov1, ov2, ov3 = swh_storage.origin_visit_add(
[
OriginVisit(
origin=origin.url,
date=sample_data.date_visit1,
type=sample_data.type_visit1,
),
OriginVisit(
origin=origin.url,
date=sample_data.date_visit2,
type=sample_data.type_visit2,
),
OriginVisit(
origin=origin.url,
date=sample_data.date_visit2,
type=sample_data.type_visit2,
),
]
)
# order asc, no token, no limit
actual_page = swh_storage.origin_visit_get(origin.url)
assert actual_page.next_page_token is None
assert actual_page.results == [ov1, ov2, ov3]
# order asc, no token, limit
actual_page = swh_storage.origin_visit_get(origin.url, limit=2)
next_page_token = actual_page.next_page_token
assert next_page_token is not None
assert actual_page.results == [ov1, ov2]
# order asc, token, no limit
actual_page = swh_storage.origin_visit_get(
origin.url, page_token=next_page_token
)
assert actual_page.next_page_token is None
assert actual_page.results == [ov3]
# order asc, no token, limit
actual_page = swh_storage.origin_visit_get(origin.url, limit=1)
next_page_token = actual_page.next_page_token
assert next_page_token is not None
assert actual_page.results == [ov1]
# order asc, token, no limit
actual_page = swh_storage.origin_visit_get(
origin.url, page_token=next_page_token
)
assert actual_page.next_page_token is None
assert actual_page.results == [ov2, ov3]
# order asc, token, limit
actual_page = swh_storage.origin_visit_get(
origin.url, page_token=next_page_token, limit=2
)
assert actual_page.next_page_token is None
assert actual_page.results == [ov2, ov3]
actual_page = swh_storage.origin_visit_get(
origin.url, page_token=next_page_token, limit=1
)
next_page_token = actual_page.next_page_token
assert next_page_token is not None
assert actual_page.results == [ov2]
actual_page = swh_storage.origin_visit_get(
origin.url, page_token=next_page_token, limit=1
)
assert actual_page.next_page_token is None
assert actual_page.results == [ov3]
# order desc, no token, no limit
actual_page = swh_storage.origin_visit_get(origin.url, order=ListOrder.DESC)
assert actual_page.next_page_token is None
assert actual_page.results == [ov3, ov2, ov1]
# order desc, no token, limit
actual_page = swh_storage.origin_visit_get(
origin.url, limit=2, order=ListOrder.DESC
)
next_page_token = actual_page.next_page_token
assert next_page_token is not None
assert actual_page.results == [ov3, ov2]
# order desc, token, no limit
actual_page = swh_storage.origin_visit_get(
origin.url, page_token=next_page_token, order=ListOrder.DESC
)
assert actual_page.next_page_token is None
assert actual_page.results == [ov1]
# order desc, no token, limit
actual_page = swh_storage.origin_visit_get(
origin.url, limit=1, order=ListOrder.DESC
)
next_page_token = actual_page.next_page_token
assert next_page_token is not None
assert actual_page.results == [ov3]
# order desc, token, no limit
actual_page = swh_storage.origin_visit_get(
origin.url, page_token=next_page_token, order=ListOrder.DESC
)
assert actual_page.next_page_token is None
assert actual_page.results == [ov2, ov1]
# order desc, token, limit
actual_page = swh_storage.origin_visit_get(
origin.url, page_token=next_page_token, order=ListOrder.DESC, limit=1
)
next_page_token = actual_page.next_page_token
assert next_page_token is not None
assert actual_page.results == [ov2]
actual_page = swh_storage.origin_visit_get(
origin.url, page_token=next_page_token, order=ListOrder.DESC
)
assert actual_page.next_page_token is None
assert actual_page.results == [ov1]
def test_origin_visit_status_get__unknown_cases(self, swh_storage, sample_data):
origin = sample_data.origin
actual_page = swh_storage.origin_visit_status_get("foobar", 1)
assert actual_page.next_page_token is None
assert actual_page.results == []
actual_page = swh_storage.origin_visit_status_get(origin.url, 1)
assert actual_page.next_page_token is None
assert actual_page.results == []
origin = sample_data.origin
swh_storage.origin_add([origin])
ov1 = swh_storage.origin_visit_add(
[
OriginVisit(
origin=origin.url,
date=sample_data.date_visit1,
type=sample_data.type_visit1,
),
]
)[0]
actual_page = swh_storage.origin_visit_status_get(origin.url, ov1.visit + 10)
assert actual_page.next_page_token is None
assert actual_page.results == []
def test_origin_visit_status_add_unknown_type(self, swh_storage, sample_data):
ov = OriginVisit(
origin=sample_data.origin.url,
date=now(),
type=sample_data.type_visit1,
visit=42,
)
ovs = OriginVisitStatus(
origin=ov.origin,
visit=ov.visit,
date=now(),
status="created",
snapshot=None,
)
with pytest.raises(StorageArgumentException):
swh_storage.origin_visit_status_add([ovs])
swh_storage.origin_add([sample_data.origin])
with pytest.raises(StorageArgumentException):
swh_storage.origin_visit_status_add([ovs])
swh_storage.origin_visit_add([ov])
swh_storage.origin_visit_status_add([ovs])
def test_origin_visit_status_get_all(self, swh_storage, sample_data):
origin = sample_data.origin
swh_storage.origin_add([origin])
date_visit3 = round_to_milliseconds(now())
date_visit1 = date_visit3 - datetime.timedelta(hours=2)
date_visit2 = date_visit3 - datetime.timedelta(hours=1)
assert date_visit1 < date_visit2 < date_visit3
ov1 = swh_storage.origin_visit_add(
[
OriginVisit(
origin=origin.url, date=date_visit1, type=sample_data.type_visit1,
),
]
)[0]
ovs1 = OriginVisitStatus(
origin=ov1.origin,
visit=ov1.visit,
date=date_visit1,
type=ov1.type,
status="created",
snapshot=None,
)
ovs2 = OriginVisitStatus(
origin=ov1.origin,
visit=ov1.visit,
date=date_visit2,
type=ov1.type,
status="partial",
snapshot=None,
)
ovs3 = OriginVisitStatus(
origin=ov1.origin,
visit=ov1.visit,
date=date_visit3,
type=ov1.type,
status="full",
snapshot=sample_data.snapshot.id,
metadata={},
)
swh_storage.origin_visit_status_add([ovs2, ovs3])
# order asc, no token, no limit
actual_page = swh_storage.origin_visit_status_get(origin.url, ov1.visit)
assert actual_page.next_page_token is None
assert actual_page.results == [ovs1, ovs2, ovs3]
# order asc, no token, limit
actual_page = swh_storage.origin_visit_status_get(
origin.url, ov1.visit, limit=2
)
next_page_token = actual_page.next_page_token
assert next_page_token is not None
assert actual_page.results == [ovs1, ovs2]
# order asc, token, no limit
actual_page = swh_storage.origin_visit_status_get(
origin.url, ov1.visit, page_token=next_page_token
)
assert actual_page.next_page_token is None
assert actual_page.results == [ovs3]
# order asc, no token, limit
actual_page = swh_storage.origin_visit_status_get(
origin.url, ov1.visit, limit=1
)
next_page_token = actual_page.next_page_token
assert next_page_token is not None
assert actual_page.results == [ovs1]
actual_page = swh_storage.origin_visit_status_get(
origin.url, ov1.visit, page_token=next_page_token
)
assert actual_page.next_page_token is None
assert actual_page.results == [ovs2, ovs3]
# order asc, token, limit
actual_page = swh_storage.origin_visit_status_get(
origin.url, ov1.visit, page_token=next_page_token, limit=2
)
assert actual_page.next_page_token is None
assert actual_page.results == [ovs2, ovs3]
# order asc, no token, limit
actual_page = swh_storage.origin_visit_status_get(
origin.url, ov1.visit, limit=2
)
next_page_token = actual_page.next_page_token
assert next_page_token is not None
assert actual_page.results == [ovs1, ovs2]
actual_page = swh_storage.origin_visit_status_get(
origin.url, ov1.visit, page_token=next_page_token, limit=1
)
assert actual_page.next_page_token is None
assert actual_page.results == [ovs3]
# order desc, no token, no limit
actual_page = swh_storage.origin_visit_status_get(
origin.url, ov1.visit, order=ListOrder.DESC
)
assert actual_page.next_page_token is None
assert actual_page.results == [ovs3, ovs2, ovs1]
# order desc, no token, limit
actual_page = swh_storage.origin_visit_status_get(
origin.url, ov1.visit, limit=2, order=ListOrder.DESC
)
next_page_token = actual_page.next_page_token
assert next_page_token is not None
assert actual_page.results == [ovs3, ovs2]
actual_page = swh_storage.origin_visit_status_get(
origin.url, ov1.visit, page_token=next_page_token, order=ListOrder.DESC
)
assert actual_page.next_page_token is None
assert actual_page.results == [ovs1]
# order desc, no token, limit
actual_page = swh_storage.origin_visit_status_get(
origin.url, ov1.visit, order=ListOrder.DESC, limit=1
)
next_page_token = actual_page.next_page_token
assert next_page_token is not None
assert actual_page.results == [ovs3]
# order desc, token, no limit
actual_page = swh_storage.origin_visit_status_get(
origin.url, ov1.visit, page_token=next_page_token, order=ListOrder.DESC
)
assert actual_page.next_page_token is None
assert actual_page.results == [ovs2, ovs1]
# order desc, token, limit
actual_page = swh_storage.origin_visit_status_get(
origin.url,
ov1.visit,
page_token=next_page_token,
order=ListOrder.DESC,
limit=1,
)
next_page_token = actual_page.next_page_token
assert next_page_token is not None
assert actual_page.results == [ovs2]
actual_page = swh_storage.origin_visit_status_get(
origin.url, ov1.visit, page_token=next_page_token, order=ListOrder.DESC
)
assert actual_page.next_page_token is None
assert actual_page.results == [ovs1]
def test_origin_visit_status_get_random(self, swh_storage, sample_data):
origins = sample_data.origins[:2]
swh_storage.origin_add(origins)
# Add some random visits within the selection range
visits = self._generate_random_visits()
visit_type = "git"
# Add visits to those origins
for origin in origins:
for date_visit in visits:
visit = swh_storage.origin_visit_add(
[OriginVisit(origin=origin.url, date=date_visit, type=visit_type,)]
)[0]
swh_storage.origin_visit_status_add(
[
OriginVisitStatus(
origin=origin.url,
visit=visit.visit,
date=now(),
status="full",
snapshot=None,
)
]
)
swh_storage.refresh_stat_counters()
stats = swh_storage.stat_counters()
assert stats["origin"] == len(origins)
assert stats["origin_visit"] == len(origins) * len(visits)
random_ovs = swh_storage.origin_visit_status_get_random(visit_type)
assert random_ovs
assert random_ovs.origin is not None
assert random_ovs.origin in [o.url for o in origins]
assert random_ovs.type is not None
def test_origin_visit_status_get_random_nothing_found(
self, swh_storage, sample_data
):
origins = sample_data.origins
swh_storage.origin_add(origins)
visit_type = "hg"
# Add some visits outside of the random generation selection so nothing
# will be found by the random selection
visits = self._generate_random_visits(nb_visits=3, start=13, end=24)
for origin in origins:
for date_visit in visits:
visit = swh_storage.origin_visit_add(
[OriginVisit(origin=origin.url, date=date_visit, type=visit_type,)]
)[0]
swh_storage.origin_visit_status_add(
[
OriginVisitStatus(
origin=origin.url,
visit=visit.visit,
date=now(),
status="full",
snapshot=None,
)
]
)
random_origin_visit = swh_storage.origin_visit_status_get_random(visit_type)
assert random_origin_visit is None
def test_origin_get_by_sha1(self, swh_storage, sample_data):
origin = sample_data.origin
assert swh_storage.origin_get([origin.url])[0] is None
swh_storage.origin_add([origin])
origins = list(swh_storage.origin_get_by_sha1([sha1(origin.url)]))
assert len(origins) == 1
assert origins[0]["url"] == origin.url
def test_origin_get_by_sha1_not_found(self, swh_storage, sample_data):
unknown_origin = sample_data.origin
assert swh_storage.origin_get([unknown_origin.url])[0] is None
origins = list(swh_storage.origin_get_by_sha1([sha1(unknown_origin.url)]))
assert len(origins) == 1
assert origins[0] is None
def test_origin_search_single_result(self, swh_storage, sample_data):
origin, origin2 = sample_data.origins[:2]
actual_page = swh_storage.origin_search(origin.url)
assert actual_page.next_page_token is None
assert actual_page.results == []
actual_page = swh_storage.origin_search(origin.url, regexp=True)
assert actual_page.next_page_token is None
assert actual_page.results == []
swh_storage.origin_add([origin])
actual_page = swh_storage.origin_search(origin.url)
assert actual_page.next_page_token is None
assert actual_page.results == [origin]
actual_page = swh_storage.origin_search(f".{origin.url[1:-1]}.", regexp=True)
assert actual_page.next_page_token is None
assert actual_page.results == [origin]
swh_storage.origin_add([origin2])
actual_page = swh_storage.origin_search(origin2.url)
assert actual_page.next_page_token is None
assert actual_page.results == [origin2]
actual_page = swh_storage.origin_search(f".{origin2.url[1:-1]}.", regexp=True)
assert actual_page.next_page_token is None
assert actual_page.results == [origin2]
def test_origin_search_no_regexp(self, swh_storage, sample_data):
origin, origin2 = sample_data.origins[:2]
swh_storage.origin_add([origin, origin2])
# no pagination
actual_page = swh_storage.origin_search("/")
assert actual_page.next_page_token is None
assert actual_page.results == [origin, origin2]
# offset=0
actual_page = swh_storage.origin_search("/", page_token=None, limit=1)
next_page_token = actual_page.next_page_token
assert next_page_token is not None
assert actual_page.results == [origin]
# offset=1
actual_page = swh_storage.origin_search(
"/", page_token=next_page_token, limit=1
)
assert actual_page.next_page_token is None
assert actual_page.results == [origin2]
def test_origin_search_regexp_substring(self, swh_storage, sample_data):
origin, origin2 = sample_data.origins[:2]
swh_storage.origin_add([origin, origin2])
# no pagination
actual_page = swh_storage.origin_search("/", regexp=True)
assert actual_page.next_page_token is None
assert actual_page.results == [origin, origin2]
# offset=0
actual_page = swh_storage.origin_search(
"/", page_token=None, limit=1, regexp=True
)
next_page_token = actual_page.next_page_token
assert next_page_token is not None
assert actual_page.results == [origin]
# offset=1
actual_page = swh_storage.origin_search(
"/", page_token=next_page_token, limit=1, regexp=True
)
assert actual_page.next_page_token is None
assert actual_page.results == [origin2]
def test_origin_search_regexp_fullstring(self, swh_storage, sample_data):
origin, origin2 = sample_data.origins[:2]
swh_storage.origin_add([origin, origin2])
# no pagination
actual_page = swh_storage.origin_search(".*/.*", regexp=True)
assert actual_page.next_page_token is None
assert actual_page.results == [origin, origin2]
# offset=0
actual_page = swh_storage.origin_search(
".*/.*", page_token=None, limit=1, regexp=True
)
next_page_token = actual_page.next_page_token
assert next_page_token is not None
assert actual_page.results == [origin]
# offset=1
actual_page = swh_storage.origin_search(
".*/.*", page_token=next_page_token, limit=1, regexp=True
)
assert actual_page.next_page_token is None
assert actual_page.results == [origin2]
def test_origin_search_no_visit_types(self, swh_storage, sample_data):
origin = sample_data.origins[0]
swh_storage.origin_add([origin])
actual_page = swh_storage.origin_search(origin.url, visit_types=["git"])
assert actual_page.next_page_token is None
assert actual_page.results == []
def test_origin_search_with_visit_types(self, swh_storage, sample_data):
origin, origin2 = sample_data.origins[:2]
swh_storage.origin_add([origin, origin2])
swh_storage.origin_visit_add(
[
OriginVisit(origin=origin.url, date=now(), type="git"),
OriginVisit(origin=origin2.url, date=now(), type="svn"),
]
)
actual_page = swh_storage.origin_search(origin.url, visit_types=["git"])
assert actual_page.next_page_token is None
assert actual_page.results == [origin]
actual_page = swh_storage.origin_search(origin2.url, visit_types=["svn"])
assert actual_page.next_page_token is None
assert actual_page.results == [origin2]
def test_origin_search_multiple_visit_types(self, swh_storage, sample_data):
origin = sample_data.origins[0]
swh_storage.origin_add([origin])
def _add_visit_type(visit_type):
swh_storage.origin_visit_add(
[OriginVisit(origin=origin.url, date=now(), type=visit_type)]
)
def _check_visit_types(visit_types):
actual_page = swh_storage.origin_search(origin.url, visit_types=visit_types)
assert actual_page.next_page_token is None
assert actual_page.results == [origin]
_add_visit_type("git")
_check_visit_types(["git"])
_check_visit_types(["git", "hg"])
_add_visit_type("hg")
_check_visit_types(["hg"])
_check_visit_types(["git", "hg"])
def test_origin_visit_add(self, swh_storage, sample_data):
origin1 = sample_data.origins[1]
swh_storage.origin_add([origin1])
date_visit = now()
date_visit2 = date_visit + datetime.timedelta(minutes=1)
date_visit = round_to_milliseconds(date_visit)
date_visit2 = round_to_milliseconds(date_visit2)
visit1 = OriginVisit(
origin=origin1.url, date=date_visit, type=sample_data.type_visit1,
)
visit2 = OriginVisit(
origin=origin1.url, date=date_visit2, type=sample_data.type_visit2,
)
# add once
ov1, ov2 = swh_storage.origin_visit_add([visit1, visit2])
# then again (will be ignored as they already exist)
origin_visit1, origin_visit2 = swh_storage.origin_visit_add([ov1, ov2])
assert ov1 == origin_visit1
assert ov2 == origin_visit2
ovs1 = OriginVisitStatus(
origin=ov1.origin,
visit=ov1.visit,
date=date_visit,
type=ov1.type,
status="created",
snapshot=None,
)
ovs2 = OriginVisitStatus(
origin=ov2.origin,
visit=ov2.visit,
date=date_visit2,
type=ov2.type,
status="created",
snapshot=None,
)
actual_visits = swh_storage.origin_visit_get(origin1.url).results
expected_visits = [ov1, ov2]
assert len(expected_visits) == len(actual_visits)
for visit in expected_visits:
assert visit in actual_visits
actual_objects = list(swh_storage.journal_writer.journal.objects)
expected_objects = list(
[("origin", origin1)]
+ [("origin_visit", visit) for visit in expected_visits] * 2
+ [("origin_visit_status", ovs) for ovs in [ovs1, ovs2]]
)
for obj in expected_objects:
assert obj in actual_objects
def test_origin_visit_add_validation(self, swh_storage, sample_data):
"""Unknown origin when adding visits should raise"""
visit = attr.evolve(sample_data.origin_visit, origin="something-unknonw")
with pytest.raises(StorageArgumentException, match="Unknown origin"):
swh_storage.origin_visit_add([visit])
objects = list(swh_storage.journal_writer.journal.objects)
assert not objects
def test_origin_visit_status_add_validation(self, swh_storage):
"""Wrong origin_visit_status input should raise storage argument error"""
date_visit = now()
visit_status1 = OriginVisitStatus(
origin="unknown-origin-url",
visit=10,
date=date_visit,
status="full",
snapshot=None,
)
with pytest.raises(StorageArgumentException, match="Unknown origin"):
swh_storage.origin_visit_status_add([visit_status1])
objects = list(swh_storage.journal_writer.journal.objects)
assert not objects
def test_origin_visit_status_add(self, swh_storage, sample_data):
"""Correct origin visit statuses should add a new visit status
"""
snapshot = sample_data.snapshot
origin1 = sample_data.origins[1]
origin2 = Origin(url="new-origin")
swh_storage.origin_add([origin1, origin2])
ov1, ov2 = swh_storage.origin_visit_add(
[
OriginVisit(
origin=origin1.url,
date=sample_data.date_visit1,
type=sample_data.type_visit1,
),
OriginVisit(
origin=origin2.url,
date=sample_data.date_visit2,
type=sample_data.type_visit2,
),
]
)
ovs1 = OriginVisitStatus(
origin=ov1.origin,
visit=ov1.visit,
date=sample_data.date_visit1,
type=ov1.type,
status="created",
snapshot=None,
)
ovs2 = OriginVisitStatus(
origin=ov2.origin,
visit=ov2.visit,
date=sample_data.date_visit2,
type=ov2.type,
status="created",
snapshot=None,
)
date_visit_now = round_to_milliseconds(now())
visit_status1 = OriginVisitStatus(
origin=ov1.origin,
visit=ov1.visit,
date=date_visit_now,
type=ov1.type,
status="full",
snapshot=snapshot.id,
)
date_visit_now = round_to_milliseconds(now())
visit_status2 = OriginVisitStatus(
origin=ov2.origin,
visit=ov2.visit,
date=date_visit_now,
type=ov2.type,
status="ongoing",
snapshot=None,
metadata={"intrinsic": "something"},
)
stats = swh_storage.origin_visit_status_add([visit_status1, visit_status2])
assert stats == {"origin_visit_status:add": 2}
visit = swh_storage.origin_visit_get_latest(origin1.url, require_snapshot=True)
visit_status = swh_storage.origin_visit_status_get_latest(
origin1.url, visit.visit, require_snapshot=True
)
assert visit_status == visit_status1
visit = swh_storage.origin_visit_get_latest(origin2.url, require_snapshot=False)
visit_status = swh_storage.origin_visit_status_get_latest(
origin2.url, visit.visit, require_snapshot=False
)
assert origin2.url != origin1.url
assert visit_status == visit_status2
actual_objects = list(swh_storage.journal_writer.journal.objects)
expected_origins = [origin1, origin2]
expected_visits = [ov1, ov2]
expected_visit_statuses = [ovs1, ovs2, visit_status1, visit_status2]
expected_objects = (
[("origin", o) for o in expected_origins]
+ [("origin_visit", v) for v in expected_visits]
+ [("origin_visit_status", ovs) for ovs in expected_visit_statuses]
)
for obj in expected_objects:
assert obj in actual_objects
def test_origin_visit_status_add_twice(self, swh_storage, sample_data):
"""Correct origin visit statuses should add a new visit status
"""
snapshot = sample_data.snapshot
origin1 = sample_data.origins[1]
swh_storage.origin_add([origin1])
ov1 = swh_storage.origin_visit_add(
[
OriginVisit(
origin=origin1.url,
date=sample_data.date_visit1,
type=sample_data.type_visit1,
),
]
)[0]
ovs1 = OriginVisitStatus(
origin=ov1.origin,
visit=ov1.visit,
date=sample_data.date_visit1,
type=ov1.type,
status="created",
snapshot=None,
)
date_visit_now = round_to_milliseconds(now())
visit_status1 = OriginVisitStatus(
origin=ov1.origin,
visit=ov1.visit,
date=date_visit_now,
type=ov1.type,
status="full",
snapshot=snapshot.id,
)
stats = swh_storage.origin_visit_status_add([visit_status1])
assert stats == {"origin_visit_status:add": 1}
# second call will ignore existing entries (will send to storage though)
stats = swh_storage.origin_visit_status_add([visit_status1])
# ...so the storage still returns it as an addition
assert stats == {"origin_visit_status:add": 1}
visit_status = swh_storage.origin_visit_status_get_latest(ov1.origin, ov1.visit)
assert visit_status == visit_status1
actual_objects = list(swh_storage.journal_writer.journal.objects)
expected_origins = [origin1]
expected_visits = [ov1]
expected_visit_statuses = [ovs1, visit_status1, visit_status1]
# write twice in the journal
expected_objects = (
[("origin", o) for o in expected_origins]
+ [("origin_visit", v) for v in expected_visits]
+ [("origin_visit_status", ovs) for ovs in expected_visit_statuses]
)
for obj in expected_objects:
assert obj in actual_objects
def test_origin_visit_find_by_date(self, swh_storage, sample_data):
origin = sample_data.origin
swh_storage.origin_add([origin])
visit1 = OriginVisit(
origin=origin.url,
date=sample_data.date_visit2,
type=sample_data.type_visit1,
)
visit2 = OriginVisit(
origin=origin.url,
date=sample_data.date_visit3,
type=sample_data.type_visit2,
)
visit3 = OriginVisit(
origin=origin.url,
date=sample_data.date_visit2,
type=sample_data.type_visit3,
)
ov1, ov2, ov3 = swh_storage.origin_visit_add([visit1, visit2, visit3])
ovs1 = OriginVisitStatus(
origin=origin.url,
visit=ov1.visit,
date=sample_data.date_visit2,
status="ongoing",
snapshot=None,
)
ovs2 = OriginVisitStatus(
origin=origin.url,
visit=ov2.visit,
date=sample_data.date_visit3,
status="ongoing",
snapshot=None,
)
ovs3 = OriginVisitStatus(
origin=origin.url,
visit=ov3.visit,
date=sample_data.date_visit2,
status="ongoing",
snapshot=None,
)
swh_storage.origin_visit_status_add([ovs1, ovs2, ovs3])
# Simple case
actual_visit = swh_storage.origin_visit_find_by_date(
origin.url, sample_data.date_visit3
)
assert actual_visit == ov2
# There are two visits at the same date, the latest must be returned
actual_visit = swh_storage.origin_visit_find_by_date(
origin.url, sample_data.date_visit2
)
assert actual_visit == ov3
def test_origin_visit_find_by_date__unknown_origin(self, swh_storage, sample_data):
actual_visit = swh_storage.origin_visit_find_by_date(
"foo", sample_data.date_visit2
)
assert actual_visit is None
def test_origin_visit_get_by(self, swh_storage, sample_data):
snapshot = sample_data.snapshot
origins = sample_data.origins[:2]
swh_storage.origin_add(origins)
origin_url, origin_url2 = [o.url for o in origins]
visit = OriginVisit(
origin=origin_url,
date=sample_data.date_visit2,
type=sample_data.type_visit2,
)
origin_visit1 = swh_storage.origin_visit_add([visit])[0]
swh_storage.snapshot_add([snapshot])
swh_storage.origin_visit_status_add(
[
OriginVisitStatus(
origin=origin_url,
visit=origin_visit1.visit,
date=now(),
status="ongoing",
snapshot=snapshot.id,
)
]
)
# Add some other {origin, visit} entries
visit2 = OriginVisit(
origin=origin_url,
date=sample_data.date_visit3,
type=sample_data.type_visit3,
)
visit3 = OriginVisit(
origin=origin_url2,
date=sample_data.date_visit3,
type=sample_data.type_visit3,
)
swh_storage.origin_visit_add([visit2, visit3])
# when
visit1_metadata = {
"contents": 42,
"directories": 22,
}
swh_storage.origin_visit_status_add(
[
OriginVisitStatus(
origin=origin_url,
visit=origin_visit1.visit,
date=now(),
status="full",
snapshot=snapshot.id,
metadata=visit1_metadata,
)
]
)
actual_visit = swh_storage.origin_visit_get_by(origin_url, origin_visit1.visit)
assert actual_visit == origin_visit1
def test_origin_visit_get_by__no_result(self, swh_storage, sample_data):
actual_visit = swh_storage.origin_visit_get_by("unknown", 10) # unknown origin
assert actual_visit is None
origin = sample_data.origin
swh_storage.origin_add([origin])
actual_visit = swh_storage.origin_visit_get_by(origin.url, 999) # unknown visit
assert actual_visit is None
def test_origin_visit_get_latest_edge_cases(self, swh_storage, sample_data):
# unknown origin so no result
assert swh_storage.origin_visit_get_latest("unknown-origin") is None
# unknown type so no result
origin = sample_data.origin
swh_storage.origin_add([origin])
assert swh_storage.origin_visit_get_latest(origin.url, type="unknown") is None
# unknown allowed statuses should raise
with pytest.raises(StorageArgumentException, match="Unknown allowed statuses"):
swh_storage.origin_visit_get_latest(
origin.url, allowed_statuses=["unknown"]
)
def test_origin_visit_get_latest_filter_type(self, swh_storage, sample_data):
"""Filtering origin visit get latest with filter type should be ok
"""
origin = sample_data.origin
swh_storage.origin_add([origin])
visit1 = OriginVisit(
origin=origin.url, date=sample_data.date_visit1, type="git",
)
visit2 = OriginVisit(
origin=origin.url, date=sample_data.date_visit2, type="hg",
)
date_now = round_to_milliseconds(now())
visit3 = OriginVisit(origin=origin.url, date=date_now, type="hg",)
assert sample_data.date_visit1 < sample_data.date_visit2
assert sample_data.date_visit2 < date_now
ov1, ov2, ov3 = swh_storage.origin_visit_add([visit1, visit2, visit3])
# Check type filter is ok
actual_visit = swh_storage.origin_visit_get_latest(origin.url, type="git")
assert actual_visit == ov1
actual_visit = swh_storage.origin_visit_get_latest(origin.url, type="hg")
assert actual_visit == ov3
actual_visit_unknown_type = swh_storage.origin_visit_get_latest(
origin.url, type="npm", # no visit matching that type
)
assert actual_visit_unknown_type is None
def test_origin_visit_get_latest(self, swh_storage, sample_data):
empty_snapshot, complete_snapshot = sample_data.snapshots[1:3]
origin = sample_data.origin
swh_storage.origin_add([origin])
visit1 = OriginVisit(
origin=origin.url, date=sample_data.date_visit1, type="git",
)
visit2 = OriginVisit(
origin=origin.url, date=sample_data.date_visit2, type="hg",
)
date_now = round_to_milliseconds(now())
visit3 = OriginVisit(origin=origin.url, date=date_now, type="hg",)
assert visit1.date < visit2.date
assert visit2.date < visit3.date
ov1, ov2, ov3 = swh_storage.origin_visit_add([visit1, visit2, visit3])
# no filters, latest visit is the last one (whose date is most recent)
actual_visit = swh_storage.origin_visit_get_latest(origin.url)
assert actual_visit == ov3
# 3 visits, none has snapshot so nothing is returned
actual_visit = swh_storage.origin_visit_get_latest(
origin.url, require_snapshot=True
)
assert actual_visit is None
# visit are created with "created" status, so nothing will get returned
actual_visit = swh_storage.origin_visit_get_latest(
origin.url, allowed_statuses=["partial"]
)
assert actual_visit is None
# visit are created with "created" status, so most recent again
actual_visit = swh_storage.origin_visit_get_latest(
origin.url, allowed_statuses=["created"]
)
assert actual_visit == ov3
# Add snapshot to visit1; require_snapshot=True makes it return first visit
swh_storage.snapshot_add([complete_snapshot])
visit_status_with_snapshot = OriginVisitStatus(
origin=ov1.origin,
visit=ov1.visit,
date=round_to_milliseconds(now()),
type=ov1.type,
status="ongoing",
snapshot=complete_snapshot.id,
)
swh_storage.origin_visit_status_add([visit_status_with_snapshot])
# only the first visit has a snapshot now
actual_visit = swh_storage.origin_visit_get_latest(
origin.url, require_snapshot=True
)
assert actual_visit == ov1
# only the first visit has a status ongoing now
actual_visit = swh_storage.origin_visit_get_latest(
origin.url, allowed_statuses=["ongoing"]
)
assert actual_visit == ov1
actual_visit_status = swh_storage.origin_visit_status_get_latest(
origin.url, ov1.visit, require_snapshot=True
)
assert actual_visit_status == visit_status_with_snapshot
# ... and require_snapshot=False (defaults) still returns latest visit (3rd)
actual_visit = swh_storage.origin_visit_get_latest(
origin.url, require_snapshot=False
)
assert actual_visit == ov3
# no specific filter, this returns as before the latest visit
actual_visit = swh_storage.origin_visit_get_latest(origin.url)
assert actual_visit == ov3
# Status filter: all three visits are status=ongoing, so no visit
# returned
actual_visit = swh_storage.origin_visit_get_latest(
origin.url, allowed_statuses=["full"]
)
assert actual_visit is None
visit_status1_full = OriginVisitStatus(
origin=ov1.origin,
visit=ov1.visit,
date=round_to_milliseconds(now()),
type=ov1.type,
status="full",
snapshot=complete_snapshot.id,
)
# Mark the first visit as completed and check status filter again
swh_storage.origin_visit_status_add([visit_status1_full])
# only the first visit has the full status
actual_visit = swh_storage.origin_visit_get_latest(
origin.url, allowed_statuses=["full"]
)
assert actual_visit == ov1
actual_visit_status = swh_storage.origin_visit_status_get_latest(
origin.url, ov1.visit, allowed_statuses=["full"]
)
assert actual_visit_status == visit_status1_full
# no specific filter, this returns as before the latest visit
actual_visit = swh_storage.origin_visit_get_latest(origin.url)
assert actual_visit == ov3
# Add snapshot to visit2 and check that the new snapshot is returned
swh_storage.snapshot_add([empty_snapshot])
visit_status2_full = OriginVisitStatus(
origin=ov2.origin,
visit=ov2.visit,
date=round_to_milliseconds(now()),
type=ov2.type,
status="ongoing",
snapshot=empty_snapshot.id,
)
swh_storage.origin_visit_status_add([visit_status2_full])
actual_visit = swh_storage.origin_visit_get_latest(
origin.url, require_snapshot=True
)
# 2nd visit is most recent with a snapshot
assert actual_visit == ov2
actual_visit_status = swh_storage.origin_visit_status_get_latest(
origin.url, ov2.visit, require_snapshot=True
)
assert actual_visit_status == visit_status2_full
# no specific filter, this returns as before the latest visit, 3rd one
actual_origin = swh_storage.origin_visit_get_latest(origin.url)
assert actual_origin == ov3
# full status is still the first visit
actual_visit = swh_storage.origin_visit_get_latest(
origin.url, allowed_statuses=["full"]
)
assert actual_visit == ov1
# Add snapshot to visit3 (same date as visit2)
visit_status3_with_snapshot = OriginVisitStatus(
origin=ov3.origin,
visit=ov3.visit,
date=round_to_milliseconds(now()),
type=ov3.type,
status="ongoing",
snapshot=complete_snapshot.id,
)
swh_storage.origin_visit_status_add([visit_status3_with_snapshot])
# full status is still the first visit
actual_visit = swh_storage.origin_visit_get_latest(
origin.url, allowed_statuses=["full"], require_snapshot=True,
)
assert actual_visit == ov1
actual_visit_status = swh_storage.origin_visit_status_get_latest(
origin.url,
visit=actual_visit.visit,
allowed_statuses=["full"],
require_snapshot=True,
)
assert actual_visit_status == visit_status1_full
# most recent is still the 3rd visit
actual_visit = swh_storage.origin_visit_get_latest(origin.url)
assert actual_visit == ov3
# 3rd visit has a snapshot now, so it's elected
actual_visit = swh_storage.origin_visit_get_latest(
origin.url, require_snapshot=True
)
assert actual_visit == ov3
actual_visit_status = swh_storage.origin_visit_status_get_latest(
origin.url, ov3.visit, require_snapshot=True
)
assert actual_visit_status == visit_status3_with_snapshot
def test_origin_visit_get_latest__same_date(self, swh_storage, sample_data):
empty_snapshot, complete_snapshot = sample_data.snapshots[1:3]
origin = sample_data.origin
swh_storage.origin_add([origin])
visit1 = OriginVisit(
origin=origin.url, date=sample_data.date_visit1, type="git",
)
visit2 = OriginVisit(
origin=origin.url, date=sample_data.date_visit1, type="hg",
)
ov1, ov2 = swh_storage.origin_visit_add([visit1, visit2])
# ties should be broken by using the visit id
actual_visit = swh_storage.origin_visit_get_latest(origin.url)
assert actual_visit == ov2
def test_origin_visit_get_latest__not_last(self, swh_storage, sample_data):
origin = sample_data.origin
swh_storage.origin_add([origin])
visit1, visit2 = sample_data.origin_visits[:2]
assert visit1.origin == origin.url
swh_storage.origin_visit_add([visit1])
ov1 = swh_storage.origin_visit_get_latest(origin.url)
# Add snapshot to visit1, latest snapshot = visit 1 snapshot
complete_snapshot = sample_data.snapshots[2]
swh_storage.snapshot_add([complete_snapshot])
swh_storage.origin_visit_status_add(
[
OriginVisitStatus(
origin=origin.url,
visit=ov1.visit,
date=visit2.date,
status="partial",
snapshot=None,
)
]
)
assert visit1.date < visit2.date
# no snapshot associated to the visit, so None
visit = swh_storage.origin_visit_get_latest(
origin.url, allowed_statuses=["partial"], require_snapshot=True,
)
assert visit is None
date_now = now()
assert visit2.date < date_now
swh_storage.origin_visit_status_add(
[
OriginVisitStatus(
origin=origin.url,
visit=ov1.visit,
date=date_now,
status="full",
snapshot=complete_snapshot.id,
)
]
)
swh_storage.origin_visit_add(
[OriginVisit(origin=origin.url, date=now(), type=visit1.type,)]
)
visit = swh_storage.origin_visit_get_latest(origin.url, require_snapshot=True)
assert visit is not None
def test_origin_visit_status_get_latest__validation(self, swh_storage, sample_data):
origin = sample_data.origin
swh_storage.origin_add([origin])
visit1 = OriginVisit(
origin=origin.url, date=sample_data.date_visit1, type="git",
)
# unknown allowed statuses should raise
with pytest.raises(StorageArgumentException, match="Unknown allowed statuses"):
swh_storage.origin_visit_status_get_latest(
origin.url, visit1.visit, allowed_statuses=["unknown"]
)
def test_origin_visit_status_get_latest(self, swh_storage, sample_data):
snapshot = sample_data.snapshots[2]
origin1 = sample_data.origin
swh_storage.origin_add([origin1])
# to have some reference visits
ov1, ov2 = swh_storage.origin_visit_add(
[
OriginVisit(
origin=origin1.url,
date=sample_data.date_visit1,
type=sample_data.type_visit1,
),
OriginVisit(
origin=origin1.url,
date=sample_data.date_visit2,
type=sample_data.type_visit2,
),
]
)
swh_storage.snapshot_add([snapshot])
date_now = round_to_milliseconds(now())
assert sample_data.date_visit1 < sample_data.date_visit2
assert sample_data.date_visit2 < date_now
ovs1 = OriginVisitStatus(
origin=ov1.origin,
visit=ov1.visit,
date=sample_data.date_visit1,
type=ov1.type,
status="partial",
snapshot=None,
)
ovs2 = OriginVisitStatus(
origin=ov1.origin,
visit=ov1.visit,
date=sample_data.date_visit2,
type=ov1.type,
status="ongoing",
snapshot=None,
)
ovs3 = OriginVisitStatus(
origin=ov2.origin,
visit=ov2.visit,
date=sample_data.date_visit2
+ datetime.timedelta(minutes=1), # to not be ignored
type=ov2.type,
status="ongoing",
snapshot=None,
)
ovs4 = OriginVisitStatus(
origin=ov2.origin,
visit=ov2.visit,
date=date_now,
type=ov2.type,
status="full",
snapshot=snapshot.id,
metadata={"something": "wicked"},
)
swh_storage.origin_visit_status_add([ovs1, ovs2, ovs3, ovs4])
# unknown origin so no result
actual_origin_visit = swh_storage.origin_visit_status_get_latest(
"unknown-origin", ov1.visit
)
assert actual_origin_visit is None
# unknown visit so no result
actual_origin_visit = swh_storage.origin_visit_status_get_latest(
ov1.origin, ov1.visit + 10
)
assert actual_origin_visit is None
# Two visits, both with no snapshot, take the most recent
actual_origin_visit2 = swh_storage.origin_visit_status_get_latest(
origin1.url, ov1.visit
)
assert isinstance(actual_origin_visit2, OriginVisitStatus)
assert actual_origin_visit2 == ovs2
assert ovs2.origin == origin1.url
assert ovs2.visit == ov1.visit
actual_origin_visit = swh_storage.origin_visit_status_get_latest(
origin1.url, ov1.visit, require_snapshot=True
)
# there is no visit with snapshot yet for that visit
assert actual_origin_visit is None
actual_origin_visit2 = swh_storage.origin_visit_status_get_latest(
origin1.url, ov1.visit, allowed_statuses=["partial", "ongoing"]
)
# visit status with partial status visit elected
assert actual_origin_visit2 == ovs2
assert actual_origin_visit2.status == "ongoing"
actual_origin_visit4 = swh_storage.origin_visit_status_get_latest(
origin1.url, ov2.visit, require_snapshot=True
)
assert actual_origin_visit4 == ovs4
assert actual_origin_visit4.snapshot == snapshot.id
actual_origin_visit = swh_storage.origin_visit_status_get_latest(
origin1.url, ov2.visit, require_snapshot=True, allowed_statuses=["ongoing"]
)
# nothing matches so nothing
assert actual_origin_visit is None # there is no visit with status full
actual_origin_visit3 = swh_storage.origin_visit_status_get_latest(
origin1.url, ov2.visit, allowed_statuses=["ongoing"]
)
assert actual_origin_visit3 == ovs3
def test_person_fullname_unicity(self, swh_storage, sample_data):
revision, rev2 = sample_data.revisions[0:2]
# create a revision with same committer fullname but wo name and email
revision2 = attr.evolve(
rev2,
committer=Person(
fullname=revision.committer.fullname, name=None, email=None
),
)
swh_storage.revision_add([revision, revision2])
# when getting added revisions
revisions = swh_storage.revision_get([revision.id, revision2.id])
# then check committers are the same
assert revisions[0].committer == revisions[1].committer
def test_snapshot_add_get_empty(self, swh_storage, sample_data):
empty_snapshot = sample_data.snapshots[1]
empty_snapshot_dict = empty_snapshot.to_dict()
origin = sample_data.origin
swh_storage.origin_add([origin])
ov1 = swh_storage.origin_visit_add(
[
OriginVisit(
origin=origin.url,
date=sample_data.date_visit1,
type=sample_data.type_visit1,
)
]
)[0]
actual_result = swh_storage.snapshot_add([empty_snapshot])
assert actual_result == {"snapshot:add": 1}
date_now = now()
swh_storage.origin_visit_status_add(
[
OriginVisitStatus(
origin=ov1.origin,
visit=ov1.visit,
date=date_now,
type=ov1.type,
status="full",
snapshot=empty_snapshot.id,
)
]
)
by_id = swh_storage.snapshot_get(empty_snapshot.id)
assert by_id == {**empty_snapshot_dict, "next_branch": None}
ovs1 = OriginVisitStatus.from_dict(
{
"origin": ov1.origin,
"date": sample_data.date_visit1,
"type": ov1.type,
"visit": ov1.visit,
"status": "created",
"snapshot": None,
"metadata": None,
}
)
ovs2 = OriginVisitStatus.from_dict(
{
"origin": ov1.origin,
"date": date_now,
"type": ov1.type,
"visit": ov1.visit,
"status": "full",
"metadata": None,
"snapshot": empty_snapshot.id,
}
)
actual_objects = list(swh_storage.journal_writer.journal.objects)
expected_objects = [
("origin", origin),
("origin_visit", ov1),
("origin_visit_status", ovs1,),
("snapshot", empty_snapshot),
("origin_visit_status", ovs2,),
]
for obj in expected_objects:
assert obj in actual_objects
def test_snapshot_add_get_complete(self, swh_storage, sample_data):
complete_snapshot = sample_data.snapshots[2]
complete_snapshot_dict = complete_snapshot.to_dict()
origin = sample_data.origin
swh_storage.origin_add([origin])
visit = OriginVisit(
origin=origin.url,
date=sample_data.date_visit1,
type=sample_data.type_visit1,
)
origin_visit1 = swh_storage.origin_visit_add([visit])[0]
actual_result = swh_storage.snapshot_add([complete_snapshot])
swh_storage.origin_visit_status_add(
[
OriginVisitStatus(
origin=origin.url,
visit=origin_visit1.visit,
date=now(),
status="ongoing",
snapshot=complete_snapshot.id,
)
]
)
assert actual_result == {"snapshot:add": 1}
by_id = swh_storage.snapshot_get(complete_snapshot.id)
assert by_id == {**complete_snapshot_dict, "next_branch": None}
def test_snapshot_add_many(self, swh_storage, sample_data):
snapshot, _, complete_snapshot = sample_data.snapshots[:3]
actual_result = swh_storage.snapshot_add([snapshot, complete_snapshot])
assert actual_result == {"snapshot:add": 2}
assert swh_storage.snapshot_get(complete_snapshot.id) == {
**complete_snapshot.to_dict(),
"next_branch": None,
}
assert swh_storage.snapshot_get(snapshot.id) == {
**snapshot.to_dict(),
"next_branch": None,
}
swh_storage.refresh_stat_counters()
assert swh_storage.stat_counters()["snapshot"] == 2
def test_snapshot_add_many_incremental(self, swh_storage, sample_data):
snapshot, _, complete_snapshot = sample_data.snapshots[:3]
actual_result = swh_storage.snapshot_add([complete_snapshot])
assert actual_result == {"snapshot:add": 1}
actual_result2 = swh_storage.snapshot_add([snapshot, complete_snapshot])
assert actual_result2 == {"snapshot:add": 1}
assert swh_storage.snapshot_get(complete_snapshot.id) == {
**complete_snapshot.to_dict(),
"next_branch": None,
}
assert swh_storage.snapshot_get(snapshot.id) == {
**snapshot.to_dict(),
"next_branch": None,
}
def test_snapshot_add_twice(self, swh_storage, sample_data):
snapshot, empty_snapshot = sample_data.snapshots[:2]
actual_result = swh_storage.snapshot_add([empty_snapshot])
assert actual_result == {"snapshot:add": 1}
assert list(swh_storage.journal_writer.journal.objects) == [
("snapshot", empty_snapshot)
]
actual_result = swh_storage.snapshot_add([snapshot])
assert actual_result == {"snapshot:add": 1}
assert list(swh_storage.journal_writer.journal.objects) == [
("snapshot", empty_snapshot),
("snapshot", snapshot),
]
def test_snapshot_add_count_branches(self, swh_storage, sample_data):
complete_snapshot = sample_data.snapshots[2]
actual_result = swh_storage.snapshot_add([complete_snapshot])
assert actual_result == {"snapshot:add": 1}
snp_size = swh_storage.snapshot_count_branches(complete_snapshot.id)
expected_snp_size = {
"alias": 1,
"content": 1,
"directory": 2,
"release": 1,
"revision": 1,
"snapshot": 1,
None: 1,
}
assert snp_size == expected_snp_size
def test_snapshot_add_count_branches_with_filtering(self, swh_storage, sample_data):
complete_snapshot = sample_data.snapshots[2]
actual_result = swh_storage.snapshot_add([complete_snapshot])
assert actual_result == {"snapshot:add": 1}
snp_size = swh_storage.snapshot_count_branches(
complete_snapshot.id, branch_name_exclude_prefix=b"release"
)
expected_snp_size = {
"alias": 1,
"content": 1,
"directory": 2,
"revision": 1,
"snapshot": 1,
None: 1,
}
assert snp_size == expected_snp_size
def test_snapshot_add_count_branches_with_filtering_edge_cases(
self, swh_storage, sample_data
):
snapshot = Snapshot(
branches={
b"\xaa\xff": SnapshotBranch(
target=sample_data.revision.id, target_type=TargetType.REVISION,
),
b"\xaa\xff\x00": SnapshotBranch(
target=sample_data.revision.id, target_type=TargetType.REVISION,
),
b"\xff\xff": SnapshotBranch(
target=sample_data.release.id, target_type=TargetType.RELEASE,
),
b"\xff\xff\x00": SnapshotBranch(
target=sample_data.release.id, target_type=TargetType.RELEASE,
),
b"dangling": None,
},
)
swh_storage.snapshot_add([snapshot])
assert swh_storage.snapshot_count_branches(
snapshot.id, branch_name_exclude_prefix=b"\xaa\xff"
) == {None: 1, "release": 2}
assert swh_storage.snapshot_count_branches(
snapshot.id, branch_name_exclude_prefix=b"\xff\xff"
) == {None: 1, "revision": 2}
def test_snapshot_add_get_paginated(self, swh_storage, sample_data):
complete_snapshot = sample_data.snapshots[2]
swh_storage.snapshot_add([complete_snapshot])
snp_id = complete_snapshot.id
branches = complete_snapshot.branches
branch_names = list(sorted(branches))
# Test branch_from
snapshot = swh_storage.snapshot_get_branches(snp_id, branches_from=b"release")
rel_idx = branch_names.index(b"release")
expected_snapshot = {
"id": snp_id,
"branches": {name: branches[name] for name in branch_names[rel_idx:]},
"next_branch": None,
}
assert snapshot == expected_snapshot
# Test branches_count
snapshot = swh_storage.snapshot_get_branches(snp_id, branches_count=1)
expected_snapshot = {
"id": snp_id,
"branches": {branch_names[0]: branches[branch_names[0]],},
"next_branch": b"content",
}
assert snapshot == expected_snapshot
# test branch_from + branches_count
snapshot = swh_storage.snapshot_get_branches(
snp_id, branches_from=b"directory", branches_count=3
)
dir_idx = branch_names.index(b"directory")
expected_snapshot = {
"id": snp_id,
"branches": {
name: branches[name] for name in branch_names[dir_idx : dir_idx + 3]
},
"next_branch": branch_names[dir_idx + 3],
}
assert snapshot == expected_snapshot
def test_snapshot_add_get_filtered(self, swh_storage, sample_data):
origin = sample_data.origin
complete_snapshot = sample_data.snapshots[2]
swh_storage.origin_add([origin])
visit = OriginVisit(
origin=origin.url,
date=sample_data.date_visit1,
type=sample_data.type_visit1,
)
origin_visit1 = swh_storage.origin_visit_add([visit])[0]
swh_storage.snapshot_add([complete_snapshot])
swh_storage.origin_visit_status_add(
[
OriginVisitStatus(
origin=origin.url,
visit=origin_visit1.visit,
date=now(),
status="ongoing",
snapshot=complete_snapshot.id,
)
]
)
snp_id = complete_snapshot.id
branches = complete_snapshot.branches
snapshot = swh_storage.snapshot_get_branches(
snp_id, target_types=["release", "revision"]
)
expected_snapshot = {
"id": snp_id,
"branches": {
name: tgt
for name, tgt in branches.items()
if tgt and tgt.target_type in [TargetType.RELEASE, TargetType.REVISION]
},
"next_branch": None,
}
assert snapshot == expected_snapshot
snapshot = swh_storage.snapshot_get_branches(snp_id, target_types=["alias"])
expected_snapshot = {
"id": snp_id,
"branches": {
name: tgt
for name, tgt in branches.items()
if tgt and tgt.target_type == TargetType.ALIAS
},
"next_branch": None,
}
assert snapshot == expected_snapshot
def test_snapshot_add_get_filtered_and_paginated(self, swh_storage, sample_data):
complete_snapshot = sample_data.snapshots[2]
swh_storage.snapshot_add([complete_snapshot])
snp_id = complete_snapshot.id
branches = complete_snapshot.branches
branch_names = list(sorted(branches))
# Test branch_from
snapshot = swh_storage.snapshot_get_branches(
snp_id, target_types=["directory", "release"], branches_from=b"directory2"
)
expected_snapshot = {
"id": snp_id,
"branches": {name: branches[name] for name in (b"directory2", b"release")},
"next_branch": None,
}
assert snapshot == expected_snapshot
# Test branches_count
snapshot = swh_storage.snapshot_get_branches(
snp_id, target_types=["directory", "release"], branches_count=1
)
expected_snapshot = {
"id": snp_id,
"branches": {b"directory": branches[b"directory"]},
"next_branch": b"directory2",
}
assert snapshot == expected_snapshot
# Test branches_count
snapshot = swh_storage.snapshot_get_branches(
snp_id, target_types=["directory", "release"], branches_count=2
)
expected_snapshot = {
"id": snp_id,
"branches": {
name: branches[name] for name in (b"directory", b"directory2")
},
"next_branch": b"release",
}
assert snapshot == expected_snapshot
# test branch_from + branches_count
snapshot = swh_storage.snapshot_get_branches(
snp_id,
target_types=["directory", "release"],
branches_from=b"directory2",
branches_count=1,
)
dir_idx = branch_names.index(b"directory2")
expected_snapshot = {
"id": snp_id,
"branches": {branch_names[dir_idx]: branches[branch_names[dir_idx]],},
"next_branch": b"release",
}
assert snapshot == expected_snapshot
def test_snapshot_add_get_branch_by_type(self, swh_storage, sample_data):
complete_snapshot = sample_data.snapshots[2]
snapshot = complete_snapshot.to_dict()
alias1 = b"alias1"
alias2 = b"alias2"
target1 = random.choice(list(snapshot["branches"].keys()))
target2 = random.choice(list(snapshot["branches"].keys()))
snapshot["branches"][alias2] = {
"target": target2,
"target_type": "alias",
}
snapshot["branches"][alias1] = {
"target": target1,
"target_type": "alias",
}
new_snapshot = Snapshot.from_dict(snapshot)
swh_storage.snapshot_add([new_snapshot])
branches = swh_storage.snapshot_get_branches(
new_snapshot.id,
target_types=["alias"],
branches_from=alias1,
branches_count=1,
)["branches"]
assert len(branches) == 1
assert alias1 in branches
def test_snapshot_add_get_by_branches_name_pattern(self, swh_storage, sample_data):
snapshot = Snapshot(
branches={
b"refs/heads/master": SnapshotBranch(
target=sample_data.revision.id, target_type=TargetType.REVISION,
),
b"refs/heads/incoming": SnapshotBranch(
target=sample_data.revision.id, target_type=TargetType.REVISION,
),
b"refs/pull/1": SnapshotBranch(
target=sample_data.revision.id, target_type=TargetType.REVISION,
),
b"refs/pull/2": SnapshotBranch(
target=sample_data.revision.id, target_type=TargetType.REVISION,
),
b"dangling": None,
b"\xaa\xff": SnapshotBranch(
target=sample_data.revision.id, target_type=TargetType.REVISION,
),
b"\xaa\xff\x00": SnapshotBranch(
target=sample_data.revision.id, target_type=TargetType.REVISION,
),
b"\xff\xff": SnapshotBranch(
target=sample_data.revision.id, target_type=TargetType.REVISION,
),
b"\xff\xff\x00": SnapshotBranch(
target=sample_data.revision.id, target_type=TargetType.REVISION,
),
},
)
swh_storage.snapshot_add([snapshot])
for include_pattern, exclude_prefix, nb_results in (
(b"pull", None, 2),
(b"incoming", None, 1),
(b"dangling", None, 1),
(None, b"refs/heads/", 7),
(b"refs", b"refs/heads/master", 3),
(b"refs", b"refs/heads/master", 3),
(None, b"\xaa\xff", 7),
(None, b"\xff\xff", 7),
):
branches = swh_storage.snapshot_get_branches(
snapshot.id,
branch_name_include_substring=include_pattern,
branch_name_exclude_prefix=exclude_prefix,
)["branches"]
expected_branches = [
branch_name
for branch_name in snapshot.branches
if (include_pattern is None or include_pattern in branch_name)
and (
exclude_prefix is None or not branch_name.startswith(exclude_prefix)
)
]
assert sorted(branches) == sorted(expected_branches)
assert len(branches) == nb_results
def test_snapshot_add_get_by_branches_name_pattern_filtered_paginated(
self, swh_storage, sample_data
):
pattern = b"foo"
nb_branches_by_target_type = 10
branches = {}
for i in range(nb_branches_by_target_type):
branches[f"branch/directory/bar{i}".encode()] = SnapshotBranch(
target=sample_data.directory.id, target_type=TargetType.DIRECTORY,
)
branches[f"branch/revision/bar{i}".encode()] = SnapshotBranch(
target=sample_data.revision.id, target_type=TargetType.REVISION,
)
branches[f"branch/directory/{pattern}{i}".encode()] = SnapshotBranch(
target=sample_data.directory.id, target_type=TargetType.DIRECTORY,
)
branches[f"branch/revision/{pattern}{i}".encode()] = SnapshotBranch(
target=sample_data.revision.id, target_type=TargetType.REVISION,
)
snapshot = Snapshot(branches=branches)
swh_storage.snapshot_add([snapshot])
branches_count = nb_branches_by_target_type // 2
for target_type in (
TargetType.DIRECTORY,
TargetType.REVISION,
):
target_type_str = target_type.value
partial_branches = swh_storage.snapshot_get_branches(
snapshot.id,
branch_name_include_substring=pattern,
target_types=[target_type_str],
branches_count=branches_count,
)
branches = partial_branches["branches"]
expected_branches = [
branch_name
for branch_name, branch_data in snapshot.branches.items()
if pattern in branch_name and branch_data.target_type == target_type
][:branches_count]
assert sorted(branches) == sorted(expected_branches)
assert (
partial_branches["next_branch"]
== f"branch/{target_type_str}/{pattern}{branches_count}".encode()
)
partial_branches = swh_storage.snapshot_get_branches(
snapshot.id,
branch_name_include_substring=pattern,
target_types=[target_type_str],
branches_from=partial_branches["next_branch"],
)
branches = partial_branches["branches"]
expected_branches = [
branch_name
for branch_name, branch_data in snapshot.branches.items()
if pattern in branch_name and branch_data.target_type == target_type
][branches_count:]
assert sorted(branches) == sorted(expected_branches)
assert partial_branches["next_branch"] is None
def test_snapshot_add_get(self, swh_storage, sample_data):
snapshot = sample_data.snapshot
origin = sample_data.origin
swh_storage.origin_add([origin])
visit = OriginVisit(
origin=origin.url,
date=sample_data.date_visit1,
type=sample_data.type_visit1,
)
ov1 = swh_storage.origin_visit_add([visit])[0]
swh_storage.snapshot_add([snapshot])
swh_storage.origin_visit_status_add(
[
OriginVisitStatus(
origin=origin.url,
visit=ov1.visit,
date=now(),
status="ongoing",
snapshot=snapshot.id,
)
]
)
expected_snapshot = {**snapshot.to_dict(), "next_branch": None}
by_id = swh_storage.snapshot_get(snapshot.id)
assert by_id == expected_snapshot
actual_visit = swh_storage.origin_visit_get_by(origin.url, ov1.visit)
assert actual_visit == ov1
visit_status = swh_storage.origin_visit_status_get_latest(
origin.url, ov1.visit, require_snapshot=True
)
assert visit_status.snapshot == snapshot.id
def test_snapshot_get_random(self, swh_storage, sample_data):
snapshot, empty_snapshot, complete_snapshot = sample_data.snapshots[:3]
swh_storage.snapshot_add([snapshot, empty_snapshot, complete_snapshot])
assert swh_storage.snapshot_get_random() in {
snapshot.id,
empty_snapshot.id,
complete_snapshot.id,
}
def test_snapshot_missing(self, swh_storage, sample_data):
snapshot, missing_snapshot = sample_data.snapshots[:2]
snapshots = [snapshot.id, missing_snapshot.id]
swh_storage.snapshot_add([snapshot])
missing_snapshots = swh_storage.snapshot_missing(snapshots)
assert list(missing_snapshots) == [missing_snapshot.id]
def test_stat_counters(self, swh_storage, sample_data):
origin = sample_data.origin
snapshot = sample_data.snapshot
revision = sample_data.revision
release = sample_data.release
directory = sample_data.directory
content = sample_data.content
expected_keys = ["content", "directory", "origin", "revision"]
# Initially, all counters are 0
swh_storage.refresh_stat_counters()
counters = swh_storage.stat_counters()
assert set(expected_keys) <= set(counters)
for key in expected_keys:
assert counters[key] == 0
# Add a content. Only the content counter should increase.
swh_storage.content_add([content])
swh_storage.refresh_stat_counters()
counters = swh_storage.stat_counters()
assert set(expected_keys) <= set(counters)
for key in expected_keys:
if key != "content":
assert counters[key] == 0
assert counters["content"] == 1
# Add other objects. Check their counter increased as well.
swh_storage.origin_add([origin])
visit = OriginVisit(
origin=origin.url,
date=sample_data.date_visit2,
type=sample_data.type_visit2,
)
origin_visit1 = swh_storage.origin_visit_add([visit])[0]
swh_storage.snapshot_add([snapshot])
swh_storage.origin_visit_status_add(
[
OriginVisitStatus(
origin=origin.url,
visit=origin_visit1.visit,
date=now(),
status="ongoing",
snapshot=snapshot.id,
)
]
)
swh_storage.directory_add([directory])
swh_storage.revision_add([revision])
swh_storage.release_add([release])
swh_storage.refresh_stat_counters()
counters = swh_storage.stat_counters()
assert counters["content"] == 1
assert counters["directory"] == 1
assert counters["snapshot"] == 1
assert counters["origin"] == 1
assert counters["origin_visit"] == 1
assert counters["revision"] == 1
assert counters["release"] == 1
assert counters["snapshot"] == 1
if "person" in counters:
assert counters["person"] == 3
def test_content_find_ctime(self, swh_storage, sample_data):
origin_content = sample_data.content
ctime = round_to_milliseconds(now())
content = attr.evolve(origin_content, data=None, ctime=ctime)
swh_storage.content_add_metadata([content])
actually_present = swh_storage.content_find({"sha1": content.sha1})
assert actually_present[0] == content
assert actually_present[0].ctime is not None
assert actually_present[0].ctime.tzinfo is not None
def test_content_find_with_present_content(self, swh_storage, sample_data):
content = sample_data.content
expected_content = attr.evolve(content, data=None)
# 1. with something to find
swh_storage.content_add([content])
actually_present = swh_storage.content_find({"sha1": content.sha1})
assert 1 == len(actually_present)
assert actually_present[0] == expected_content
# 2. with something to find
actually_present = swh_storage.content_find({"sha1_git": content.sha1_git})
assert 1 == len(actually_present)
assert actually_present[0] == expected_content
# 3. with something to find
actually_present = swh_storage.content_find({"sha256": content.sha256})
assert 1 == len(actually_present)
assert actually_present[0] == expected_content
# 4. with something to find
actually_present = swh_storage.content_find(content.hashes())
assert 1 == len(actually_present)
assert actually_present[0] == expected_content
def test_content_find_with_non_present_content(self, swh_storage, sample_data):
missing_content = sample_data.skipped_content
# 1. with something that does not exist
actually_present = swh_storage.content_find({"sha1": missing_content.sha1})
assert actually_present == []
# 2. with something that does not exist
actually_present = swh_storage.content_find(
{"sha1_git": missing_content.sha1_git}
)
assert actually_present == []
# 3. with something that does not exist
actually_present = swh_storage.content_find({"sha256": missing_content.sha256})
assert actually_present == []
def test_content_find_with_duplicate_input(self, swh_storage, sample_data):
content = sample_data.content
# Create fake data with colliding sha256 and blake2s256
sha1_array = bytearray(content.sha1)
sha1_array[0] += 1
sha1git_array = bytearray(content.sha1_git)
sha1git_array[0] += 1
duplicated_content = attr.evolve(
content, sha1=bytes(sha1_array), sha1_git=bytes(sha1git_array)
)
# Inject the data
swh_storage.content_add([content, duplicated_content])
actual_result = swh_storage.content_find(
{
"blake2s256": duplicated_content.blake2s256,
"sha256": duplicated_content.sha256,
}
)
expected_content = attr.evolve(content, data=None)
expected_duplicated_content = attr.evolve(duplicated_content, data=None)
for result in actual_result:
assert result in [expected_content, expected_duplicated_content]
def test_content_find_with_duplicate_sha256(self, swh_storage, sample_data):
content = sample_data.content
hashes = {}
# Create fake data with colliding sha256
for hashalgo in ("sha1", "sha1_git", "blake2s256"):
value = bytearray(getattr(content, hashalgo))
value[0] += 1
hashes[hashalgo] = bytes(value)
duplicated_content = attr.evolve(
content,
sha1=hashes["sha1"],
sha1_git=hashes["sha1_git"],
blake2s256=hashes["blake2s256"],
)
swh_storage.content_add([content, duplicated_content])
actual_result = swh_storage.content_find({"sha256": duplicated_content.sha256})
assert len(actual_result) == 2
expected_content = attr.evolve(content, data=None)
expected_duplicated_content = attr.evolve(duplicated_content, data=None)
for result in actual_result:
assert result in [expected_content, expected_duplicated_content]
# Find with both sha256 and blake2s256
actual_result = swh_storage.content_find(
{
"sha256": duplicated_content.sha256,
"blake2s256": duplicated_content.blake2s256,
}
)
assert len(actual_result) == 1
assert actual_result == [expected_duplicated_content]
def test_content_find_with_duplicate_blake2s256(self, swh_storage, sample_data):
content = sample_data.content
# Create fake data with colliding sha256 and blake2s256
sha1_array = bytearray(content.sha1)
sha1_array[0] += 1
sha1git_array = bytearray(content.sha1_git)
sha1git_array[0] += 1
sha256_array = bytearray(content.sha256)
sha256_array[0] += 1
duplicated_content = attr.evolve(
content,
sha1=bytes(sha1_array),
sha1_git=bytes(sha1git_array),
sha256=bytes(sha256_array),
)
swh_storage.content_add([content, duplicated_content])
actual_result = swh_storage.content_find(
{"blake2s256": duplicated_content.blake2s256}
)
expected_content = attr.evolve(content, data=None)
expected_duplicated_content = attr.evolve(duplicated_content, data=None)
for result in actual_result:
assert result in [expected_content, expected_duplicated_content]
# Find with both sha256 and blake2s256
actual_result = swh_storage.content_find(
{
"sha256": duplicated_content.sha256,
"blake2s256": duplicated_content.blake2s256,
}
)
assert actual_result == [expected_duplicated_content]
def test_content_find_bad_input(self, swh_storage):
# 1. with no hash to lookup
with pytest.raises(StorageArgumentException):
swh_storage.content_find({}) # need at least one hash
# 2. with bad hash
with pytest.raises(StorageArgumentException):
swh_storage.content_find({"unknown-sha1": "something"}) # not the right key
def test_object_find_by_sha1_git(self, swh_storage, sample_data):
content = sample_data.content
directory = sample_data.directory
revision = sample_data.revision
release = sample_data.release
sha1_gits = [b"00000000000000000000"]
expected = {
b"00000000000000000000": [],
}
swh_storage.content_add([content])
sha1_gits.append(content.sha1_git)
expected[content.sha1_git] = [
{"sha1_git": content.sha1_git, "type": "content",}
]
swh_storage.directory_add([directory])
sha1_gits.append(directory.id)
expected[directory.id] = [{"sha1_git": directory.id, "type": "directory",}]
swh_storage.revision_add([revision])
sha1_gits.append(revision.id)
expected[revision.id] = [{"sha1_git": revision.id, "type": "revision",}]
swh_storage.release_add([release])
sha1_gits.append(release.id)
expected[release.id] = [{"sha1_git": release.id, "type": "release",}]
ret = swh_storage.object_find_by_sha1_git(sha1_gits)
assert expected == ret
def test_metadata_fetcher_add_get(self, swh_storage, sample_data):
fetcher = sample_data.metadata_fetcher
actual_fetcher = swh_storage.metadata_fetcher_get(fetcher.name, fetcher.version)
assert actual_fetcher is None # does not exist
swh_storage.metadata_fetcher_add([fetcher])
res = swh_storage.metadata_fetcher_get(fetcher.name, fetcher.version)
assert res == fetcher
actual_objects = list(swh_storage.journal_writer.journal.objects)
expected_objects = [
("metadata_fetcher", fetcher),
]
for obj in expected_objects:
assert obj in actual_objects
def test_metadata_fetcher_add_zero(self, swh_storage, sample_data):
fetcher = sample_data.metadata_fetcher
actual_fetcher = swh_storage.metadata_fetcher_get(fetcher.name, fetcher.version)
assert actual_fetcher is None # does not exist
swh_storage.metadata_fetcher_add([])
def test_metadata_authority_add_get(self, swh_storage, sample_data):
authority = sample_data.metadata_authority
actual_authority = swh_storage.metadata_authority_get(
authority.type, authority.url
)
assert actual_authority is None # does not exist
swh_storage.metadata_authority_add([authority])
res = swh_storage.metadata_authority_get(authority.type, authority.url)
assert res == authority
actual_objects = list(swh_storage.journal_writer.journal.objects)
expected_objects = [
("metadata_authority", authority),
]
for obj in expected_objects:
assert obj in actual_objects
def test_metadata_authority_add_zero(self, swh_storage, sample_data):
authority = sample_data.metadata_authority
actual_authority = swh_storage.metadata_authority_get(
authority.type, authority.url
)
assert actual_authority is None # does not exist
swh_storage.metadata_authority_add([])
def test_content_metadata_add(self, swh_storage, sample_data):
content = sample_data.content
fetcher = sample_data.metadata_fetcher
authority = sample_data.metadata_authority
content_metadata = sample_data.content_metadata[:2]
swh_storage.metadata_fetcher_add([fetcher])
swh_storage.metadata_authority_add([authority])
swh_storage.raw_extrinsic_metadata_add(content_metadata)
result = swh_storage.raw_extrinsic_metadata_get(
content.swhid().to_extended(), authority
)
assert result.next_page_token is None
assert list(sorted(result.results, key=lambda x: x.discovery_date,)) == list(
content_metadata
)
actual_objects = list(swh_storage.journal_writer.journal.objects)
expected_objects = [
("metadata_authority", authority),
("metadata_fetcher", fetcher),
] + [("raw_extrinsic_metadata", item) for item in content_metadata]
for obj in expected_objects:
assert obj in actual_objects
def test_content_metadata_add_duplicate(self, swh_storage, sample_data):
"""Duplicates should be silently ignored."""
content = sample_data.content
fetcher = sample_data.metadata_fetcher
authority = sample_data.metadata_authority
content_metadata, content_metadata2 = sample_data.content_metadata[:2]
swh_storage.metadata_fetcher_add([fetcher])
swh_storage.metadata_authority_add([authority])
swh_storage.raw_extrinsic_metadata_add([content_metadata, content_metadata2])
swh_storage.raw_extrinsic_metadata_add([content_metadata2, content_metadata])
result = swh_storage.raw_extrinsic_metadata_get(
content.swhid().to_extended(), authority
)
assert result.next_page_token is None
expected_results = (content_metadata, content_metadata2)
assert (
tuple(sorted(result.results, key=lambda x: x.discovery_date,))
== expected_results
)
def test_content_metadata_get(self, swh_storage, sample_data):
content, content2 = sample_data.contents[:2]
fetcher, fetcher2 = sample_data.fetchers[:2]
authority, authority2 = sample_data.authorities[:2]
(
content1_metadata1,
content1_metadata2,
content1_metadata3,
) = sample_data.content_metadata[:3]
content2_metadata = RawExtrinsicMetadata.from_dict(
{
**remove_keys(content1_metadata2.to_dict(), ("id",)), # recompute id
"target": str(content2.swhid()),
}
)
swh_storage.metadata_authority_add([authority, authority2])
swh_storage.metadata_fetcher_add([fetcher, fetcher2])
swh_storage.raw_extrinsic_metadata_add(
[
content1_metadata1,
content1_metadata2,
content1_metadata3,
content2_metadata,
]
)
result = swh_storage.raw_extrinsic_metadata_get(
content.swhid().to_extended(), authority
)
assert result.next_page_token is None
assert [content1_metadata1, content1_metadata2] == list(
sorted(result.results, key=lambda x: x.discovery_date,)
)
result = swh_storage.raw_extrinsic_metadata_get(
content.swhid().to_extended(), authority2
)
assert result.next_page_token is None
assert [content1_metadata3] == list(
sorted(result.results, key=lambda x: x.discovery_date,)
)
result = swh_storage.raw_extrinsic_metadata_get(
content2.swhid().to_extended(), authority
)
assert result.next_page_token is None
assert [content2_metadata] == list(result.results,)
def test_content_metadata_get_after(self, swh_storage, sample_data):
content = sample_data.content
fetcher = sample_data.metadata_fetcher
authority = sample_data.metadata_authority
content_metadata, content_metadata2 = sample_data.content_metadata[:2]
swh_storage.metadata_fetcher_add([fetcher])
swh_storage.metadata_authority_add([authority])
swh_storage.raw_extrinsic_metadata_add([content_metadata, content_metadata2])
result = swh_storage.raw_extrinsic_metadata_get(
content.swhid().to_extended(),
authority,
after=content_metadata.discovery_date - timedelta(seconds=1),
)
assert result.next_page_token is None
assert [content_metadata, content_metadata2] == list(
sorted(result.results, key=lambda x: x.discovery_date,)
)
result = swh_storage.raw_extrinsic_metadata_get(
content.swhid().to_extended(),
authority,
after=content_metadata.discovery_date,
)
assert result.next_page_token is None
assert result.results == [content_metadata2]
result = swh_storage.raw_extrinsic_metadata_get(
content.swhid().to_extended(),
authority,
after=content_metadata2.discovery_date,
)
assert result.next_page_token is None
assert result.results == []
def test_content_metadata_get_paginate(self, swh_storage, sample_data):
content = sample_data.content
fetcher = sample_data.metadata_fetcher
authority = sample_data.metadata_authority
content_metadata, content_metadata2 = sample_data.content_metadata[:2]
swh_storage.metadata_fetcher_add([fetcher])
swh_storage.metadata_authority_add([authority])
swh_storage.raw_extrinsic_metadata_add([content_metadata, content_metadata2])
swh_storage.raw_extrinsic_metadata_get(content.swhid().to_extended(), authority)
result = swh_storage.raw_extrinsic_metadata_get(
content.swhid().to_extended(), authority, limit=1
)
assert result.next_page_token is not None
assert result.results == [content_metadata]
result = swh_storage.raw_extrinsic_metadata_get(
content.swhid().to_extended(),
authority,
limit=1,
page_token=result.next_page_token,
)
assert result.next_page_token is None
assert result.results == [content_metadata2]
def test_content_metadata_get_paginate_same_date(self, swh_storage, sample_data):
content = sample_data.content
fetcher1, fetcher2 = sample_data.fetchers[:2]
authority = sample_data.metadata_authority
content_metadata, content_metadata2 = sample_data.content_metadata[:2]
swh_storage.metadata_fetcher_add([fetcher1, fetcher2])
swh_storage.metadata_authority_add([authority])
new_content_metadata2 = RawExtrinsicMetadata.from_dict(
{
**remove_keys(content_metadata2.to_dict(), ("id",)), # recompute id
"discovery_date": content_metadata2.discovery_date,
"fetcher": attr.evolve(fetcher2, metadata=None).to_dict(),
}
)
swh_storage.raw_extrinsic_metadata_add(
[content_metadata, new_content_metadata2]
)
result = swh_storage.raw_extrinsic_metadata_get(
content.swhid().to_extended(), authority, limit=1
)
assert result.next_page_token is not None
assert result.results == [content_metadata]
result = swh_storage.raw_extrinsic_metadata_get(
content.swhid().to_extended(),
authority,
limit=1,
page_token=result.next_page_token,
)
assert result.next_page_token is None
assert result.results[0].to_dict() == new_content_metadata2.to_dict()
assert result.results == [new_content_metadata2]
def test_content_metadata_get_by_ids(self, swh_storage, sample_data):
content, content2 = sample_data.contents[:2]
fetcher, fetcher2 = sample_data.fetchers[:2]
authority, authority2 = sample_data.authorities[:2]
(
content1_metadata1,
content1_metadata2,
content1_metadata3,
) = sample_data.content_metadata[:3]
content2_metadata = RawExtrinsicMetadata.from_dict(
{
**remove_keys(content1_metadata2.to_dict(), ("id",)), # recompute id
"target": str(content2.swhid()),
}
)
swh_storage.metadata_authority_add([authority, authority2])
swh_storage.metadata_fetcher_add([fetcher, fetcher2])
swh_storage.raw_extrinsic_metadata_add(
[
content1_metadata1,
content1_metadata2,
content1_metadata3,
content2_metadata,
]
)
assert set(
swh_storage.raw_extrinsic_metadata_get_by_ids(
[content1_metadata1.id, b"\x00" * 20, content2_metadata.id]
)
) == {content1_metadata1, content2_metadata}
def test_content_metadata_get_authorities(self, swh_storage, sample_data):
content1, content2, content3 = sample_data.contents[:3]
fetcher, fetcher2 = sample_data.fetchers[:2]
authority, authority2 = sample_data.authorities[:2]
(
content1_metadata1,
content1_metadata2,
content1_metadata3,
) = sample_data.content_metadata[:3]
content2_metadata = RawExtrinsicMetadata.from_dict(
{
**remove_keys(content1_metadata2.to_dict(), ("id",)), # recompute id
"target": str(content2.swhid()),
}
)
content1_metadata2 = RawExtrinsicMetadata.from_dict(
{
**remove_keys(content1_metadata2.to_dict(), ("id",)), # recompute id
"authority": authority2.to_dict(),
}
)
swh_storage.metadata_authority_add([authority, authority2])
swh_storage.metadata_fetcher_add([fetcher, fetcher2])
swh_storage.raw_extrinsic_metadata_add(
[
content1_metadata1,
content1_metadata2,
content1_metadata3,
content2_metadata,
]
)
assert swh_storage.raw_extrinsic_metadata_get_authorities(content1.swhid()) in (
[authority, authority2],
[authority2, authority],
)
assert swh_storage.raw_extrinsic_metadata_get_authorities(content2.swhid()) == [
authority
]
assert (
swh_storage.raw_extrinsic_metadata_get_authorities(content3.swhid()) == []
)
def test_origin_metadata_add(self, swh_storage, sample_data):
origin = sample_data.origin
fetcher = sample_data.metadata_fetcher
authority = sample_data.metadata_authority
origin_metadata, origin_metadata2 = sample_data.origin_metadata[:2]
assert swh_storage.origin_add([origin]) == {"origin:add": 1}
swh_storage.metadata_fetcher_add([fetcher])
swh_storage.metadata_authority_add([authority])
swh_storage.raw_extrinsic_metadata_add([origin_metadata, origin_metadata2])
result = swh_storage.raw_extrinsic_metadata_get(
Origin(origin.url).swhid(), authority
)
assert result.next_page_token is None
assert list(sorted(result.results, key=lambda x: x.discovery_date)) == [
origin_metadata,
origin_metadata2,
]
actual_objects = list(swh_storage.journal_writer.journal.objects)
expected_objects = [
("metadata_authority", authority),
("metadata_fetcher", fetcher),
("raw_extrinsic_metadata", origin_metadata),
("raw_extrinsic_metadata", origin_metadata2),
]
for obj in expected_objects:
assert obj in actual_objects
def test_origin_metadata_add_duplicate(self, swh_storage, sample_data):
"""Duplicates should be silently updated."""
origin = sample_data.origin
fetcher = sample_data.metadata_fetcher
authority = sample_data.metadata_authority
origin_metadata, origin_metadata2 = sample_data.origin_metadata[:2]
assert swh_storage.origin_add([origin]) == {"origin:add": 1}
swh_storage.metadata_fetcher_add([fetcher])
swh_storage.metadata_authority_add([authority])
swh_storage.raw_extrinsic_metadata_add([origin_metadata, origin_metadata2])
swh_storage.raw_extrinsic_metadata_add([origin_metadata2, origin_metadata])
result = swh_storage.raw_extrinsic_metadata_get(
Origin(origin.url).swhid(), authority
)
assert result.next_page_token is None
# which of the two behavior happens is backend-specific.
expected_results = (origin_metadata, origin_metadata2)
assert (
tuple(sorted(result.results, key=lambda x: x.discovery_date,))
== expected_results
)
def test_origin_metadata_get(self, swh_storage, sample_data):
origin, origin2 = sample_data.origins[:2]
fetcher, fetcher2 = sample_data.fetchers[:2]
authority, authority2 = sample_data.authorities[:2]
(
origin1_metadata1,
origin1_metadata2,
origin1_metadata3,
) = sample_data.origin_metadata[:3]
assert swh_storage.origin_add([origin, origin2]) == {"origin:add": 2}
origin2_metadata = RawExtrinsicMetadata.from_dict(
{
**remove_keys(origin1_metadata2.to_dict(), ("id",)), # recompute id
"target": str(Origin(origin2.url).swhid()),
}
)
swh_storage.metadata_authority_add([authority, authority2])
swh_storage.metadata_fetcher_add([fetcher, fetcher2])
swh_storage.raw_extrinsic_metadata_add(
[origin1_metadata1, origin1_metadata2, origin1_metadata3, origin2_metadata]
)
result = swh_storage.raw_extrinsic_metadata_get(
Origin(origin.url).swhid(), authority
)
assert result.next_page_token is None
assert [origin1_metadata1, origin1_metadata2] == list(
sorted(result.results, key=lambda x: x.discovery_date,)
)
result = swh_storage.raw_extrinsic_metadata_get(
Origin(origin.url).swhid(), authority2
)
assert result.next_page_token is None
assert [origin1_metadata3] == list(
sorted(result.results, key=lambda x: x.discovery_date,)
)
result = swh_storage.raw_extrinsic_metadata_get(
Origin(origin2.url).swhid(), authority
)
assert result.next_page_token is None
assert [origin2_metadata] == list(result.results,)
def test_origin_metadata_get_after(self, swh_storage, sample_data):
origin = sample_data.origin
fetcher = sample_data.metadata_fetcher
authority = sample_data.metadata_authority
origin_metadata, origin_metadata2 = sample_data.origin_metadata[:2]
assert swh_storage.origin_add([origin]) == {"origin:add": 1}
swh_storage.metadata_fetcher_add([fetcher])
swh_storage.metadata_authority_add([authority])
swh_storage.raw_extrinsic_metadata_add([origin_metadata, origin_metadata2])
result = swh_storage.raw_extrinsic_metadata_get(
Origin(origin.url).swhid(),
authority,
after=origin_metadata.discovery_date - timedelta(seconds=1),
)
assert result.next_page_token is None
assert list(sorted(result.results, key=lambda x: x.discovery_date,)) == [
origin_metadata,
origin_metadata2,
]
result = swh_storage.raw_extrinsic_metadata_get(
Origin(origin.url).swhid(), authority, after=origin_metadata.discovery_date,
)
assert result.next_page_token is None
assert result.results == [origin_metadata2]
result = swh_storage.raw_extrinsic_metadata_get(
Origin(origin.url).swhid(),
authority,
after=origin_metadata2.discovery_date,
)
assert result.next_page_token is None
assert result.results == []
def test_origin_metadata_get_paginate(self, swh_storage, sample_data):
origin = sample_data.origin
fetcher = sample_data.metadata_fetcher
authority = sample_data.metadata_authority
origin_metadata, origin_metadata2 = sample_data.origin_metadata[:2]
assert swh_storage.origin_add([origin]) == {"origin:add": 1}
swh_storage.metadata_fetcher_add([fetcher])
swh_storage.metadata_authority_add([authority])
swh_storage.raw_extrinsic_metadata_add([origin_metadata, origin_metadata2])
swh_storage.raw_extrinsic_metadata_get(Origin(origin.url).swhid(), authority)
result = swh_storage.raw_extrinsic_metadata_get(
Origin(origin.url).swhid(), authority, limit=1
)
assert result.next_page_token is not None
assert result.results == [origin_metadata]
result = swh_storage.raw_extrinsic_metadata_get(
Origin(origin.url).swhid(),
authority,
limit=1,
page_token=result.next_page_token,
)
assert result.next_page_token is None
assert result.results == [origin_metadata2]
def test_origin_metadata_get_paginate_same_date(self, swh_storage, sample_data):
origin = sample_data.origin
fetcher1, fetcher2 = sample_data.fetchers[:2]
authority = sample_data.metadata_authority
origin_metadata, origin_metadata2 = sample_data.origin_metadata[:2]
assert swh_storage.origin_add([origin]) == {"origin:add": 1}
swh_storage.metadata_fetcher_add([fetcher1, fetcher2])
swh_storage.metadata_authority_add([authority])
new_origin_metadata2 = RawExtrinsicMetadata.from_dict(
{
**remove_keys(origin_metadata2.to_dict(), ("id",)), # recompute id
"discovery_date": origin_metadata2.discovery_date,
"fetcher": attr.evolve(fetcher2, metadata=None).to_dict(),
}
)
swh_storage.raw_extrinsic_metadata_add([origin_metadata, new_origin_metadata2])
result = swh_storage.raw_extrinsic_metadata_get(
Origin(origin.url).swhid(), authority, limit=1
)
assert result.next_page_token is not None
assert result.results == [origin_metadata]
result = swh_storage.raw_extrinsic_metadata_get(
Origin(origin.url).swhid(),
authority,
limit=1,
page_token=result.next_page_token,
)
assert result.next_page_token is None
assert result.results == [new_origin_metadata2]
def test_origin_metadata_add_missing_authority(self, swh_storage, sample_data):
origin = sample_data.origin
fetcher = sample_data.metadata_fetcher
origin_metadata, origin_metadata2 = sample_data.origin_metadata[:2]
assert swh_storage.origin_add([origin]) == {"origin:add": 1}
swh_storage.metadata_fetcher_add([fetcher])
with pytest.raises(StorageArgumentException, match="authority"):
swh_storage.raw_extrinsic_metadata_add([origin_metadata, origin_metadata2])
def test_origin_metadata_add_missing_fetcher(self, swh_storage, sample_data):
origin = sample_data.origin
authority = sample_data.metadata_authority
origin_metadata, origin_metadata2 = sample_data.origin_metadata[:2]
assert swh_storage.origin_add([origin]) == {"origin:add": 1}
swh_storage.metadata_authority_add([authority])
with pytest.raises(StorageArgumentException, match="fetcher"):
swh_storage.raw_extrinsic_metadata_add([origin_metadata, origin_metadata2])
class TestStorageGeneratedData:
def test_generate_content_get_data(self, swh_storage, swh_contents):
contents_with_data = [c for c in swh_contents if c.status != "absent"]
# retrieve contents
for content in contents_with_data:
actual_content_data = swh_storage.content_get_data(content.sha1)
assert actual_content_data is not None
assert actual_content_data == content.data
def test_generate_content_get(self, swh_storage, swh_contents):
expected_contents = [
attr.evolve(c, data=None) for c in swh_contents if c.status != "absent"
]
actual_contents = swh_storage.content_get([c.sha1 for c in expected_contents])
assert len(actual_contents) == len(expected_contents)
assert actual_contents == expected_contents
@pytest.mark.parametrize("limit", [1, 7, 10, 100, 1000])
def test_origin_list(self, swh_storage, swh_origins, limit):
returned_origins = []
page_token = None
i = 0
while True:
actual_page = swh_storage.origin_list(page_token=page_token, limit=limit)
assert len(actual_page.results) <= limit
returned_origins.extend(actual_page.results)
i += 1
page_token = actual_page.next_page_token
if page_token is None:
assert i * limit >= len(swh_origins)
break
else:
assert len(actual_page.results) == limit
assert sorted(returned_origins) == sorted(swh_origins)
def test_origin_count(self, swh_storage, sample_data):
swh_storage.origin_add(sample_data.origins)
assert swh_storage.origin_count("github") == 3
assert swh_storage.origin_count("gitlab") == 2
assert swh_storage.origin_count(".*user.*", regexp=True) == 5
assert swh_storage.origin_count(".*user.*", regexp=False) == 0
assert swh_storage.origin_count(".*user1.*", regexp=True) == 2
assert swh_storage.origin_count(".*user1.*", regexp=False) == 0
def test_origin_count_with_visit_no_visits(self, swh_storage, sample_data):
swh_storage.origin_add(sample_data.origins)
# none of them have visits, so with_visit=True => 0
assert swh_storage.origin_count("github", with_visit=True) == 0
assert swh_storage.origin_count("gitlab", with_visit=True) == 0
assert swh_storage.origin_count(".*user.*", regexp=True, with_visit=True) == 0
assert swh_storage.origin_count(".*user.*", regexp=False, with_visit=True) == 0
assert swh_storage.origin_count(".*user1.*", regexp=True, with_visit=True) == 0
assert swh_storage.origin_count(".*user1.*", regexp=False, with_visit=True) == 0
def test_origin_count_with_visit_with_visits_no_snapshot(
self, swh_storage, sample_data
):
swh_storage.origin_add(sample_data.origins)
origin_url = "https://github.com/user1/repo1"
visit = OriginVisit(origin=origin_url, date=now(), type="git",)
swh_storage.origin_visit_add([visit])
assert swh_storage.origin_count("github", with_visit=False) == 3
# it has a visit, but no snapshot, so with_visit=True => 0
assert swh_storage.origin_count("github", with_visit=True) == 0
assert swh_storage.origin_count("gitlab", with_visit=False) == 2
# these gitlab origins have no visit
assert swh_storage.origin_count("gitlab", with_visit=True) == 0
assert (
swh_storage.origin_count("github.*user1", regexp=True, with_visit=False)
== 1
)
assert (
swh_storage.origin_count("github.*user1", regexp=True, with_visit=True) == 0
)
assert swh_storage.origin_count("github", regexp=True, with_visit=True) == 0
def test_origin_count_with_visit_with_visits_and_snapshot(
self, swh_storage, sample_data
):
snapshot = sample_data.snapshot
swh_storage.origin_add(sample_data.origins)
swh_storage.snapshot_add([snapshot])
origin_url = "https://github.com/user1/repo1"
visit = OriginVisit(origin=origin_url, date=now(), type="git",)
visit = swh_storage.origin_visit_add([visit])[0]
swh_storage.origin_visit_status_add(
[
OriginVisitStatus(
origin=origin_url,
visit=visit.visit,
date=now(),
status="ongoing",
snapshot=snapshot.id,
)
]
)
assert swh_storage.origin_count("github", with_visit=False) == 3
# github/user1 has a visit and a snapshot, so with_visit=True => 1
assert swh_storage.origin_count("github", with_visit=True) == 1
assert (
swh_storage.origin_count("github.*user1", regexp=True, with_visit=False)
== 1
)
assert (
swh_storage.origin_count("github.*user1", regexp=True, with_visit=True) == 1
)
assert swh_storage.origin_count("github", regexp=True, with_visit=True) == 1
@settings(
suppress_health_check=[HealthCheck.too_slow] + function_scoped_fixture_check,
)
@given(strategies.lists(objects(split_content=True), max_size=2))
def test_add_arbitrary(self, swh_storage, objects):
for (obj_type, obj) in objects:
if obj.object_type == "origin_visit":
swh_storage.origin_add([Origin(url=obj.origin)])
visit = OriginVisit(origin=obj.origin, date=obj.date, type=obj.type,)
swh_storage.origin_visit_add([visit])
elif obj.object_type == "raw_extrinsic_metadata":
swh_storage.metadata_authority_add([obj.authority])
swh_storage.metadata_fetcher_add([obj.fetcher])
swh_storage.raw_extrinsic_metadata_add([obj])
else:
method = getattr(swh_storage, obj_type + "_add")
try:
method([obj])
except HashCollision:
pass
diff --git a/swh/storage/tests/test_cassandra.py b/swh/storage/tests/test_cassandra.py
index 8bb9b82a..970938d3 100644
--- a/swh/storage/tests/test_cassandra.py
+++ b/swh/storage/tests/test_cassandra.py
@@ -1,703 +1,704 @@
# Copyright (C) 2018-2021 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import datetime
import itertools
import os
import resource
import signal
import socket
import subprocess
import time
from typing import Any, Dict
import attr
from cassandra.cluster import NoHostAvailable
import pytest
from swh.core.api.classes import stream_results
from swh.model.model import Directory, DirectoryEntry, Snapshot, SnapshotBranch
from swh.storage import get_storage
from swh.storage.cassandra import create_keyspace
from swh.storage.cassandra.model import ContentRow, ExtIDRow
from swh.storage.cassandra.schema import HASH_ALGORITHMS, TABLES
from swh.storage.tests.storage_data import StorageData
from swh.storage.tests.storage_tests import (
TestStorageGeneratedData as _TestStorageGeneratedData,
)
from swh.storage.tests.storage_tests import TestStorage as _TestStorage
from swh.storage.utils import now, remove_keys
CONFIG_TEMPLATE = """
data_file_directories:
- {data_dir}/data
commitlog_directory: {data_dir}/commitlog
hints_directory: {data_dir}/hints
saved_caches_directory: {data_dir}/saved_caches
commitlog_sync: periodic
commitlog_sync_period_in_ms: 1000000
partitioner: org.apache.cassandra.dht.Murmur3Partitioner
endpoint_snitch: SimpleSnitch
seed_provider:
- class_name: org.apache.cassandra.locator.SimpleSeedProvider
parameters:
- seeds: "127.0.0.1"
storage_port: {storage_port}
native_transport_port: {native_transport_port}
start_native_transport: true
listen_address: 127.0.0.1
enable_user_defined_functions: true
# speed-up by disabling period saving to disk
key_cache_save_period: 0
row_cache_save_period: 0
trickle_fsync: false
commitlog_sync_period_in_ms: 100000
"""
SCYLLA_EXTRA_CONFIG_TEMPLATE = """
experimental_features:
- udf
view_hints_directory: {data_dir}/view_hints
prometheus_port: 0 # disable prometheus server
start_rpc: false # disable thrift server
api_port: {api_port}
"""
def free_port():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("127.0.0.1", 0))
port = sock.getsockname()[1]
sock.close()
return port
def wait_for_peer(addr, port):
wait_until = time.time() + 60
while time.time() < wait_until:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((addr, port))
except ConnectionRefusedError:
time.sleep(0.1)
else:
sock.close()
return True
return False
@pytest.fixture(scope="session")
def cassandra_cluster(tmpdir_factory):
cassandra_conf = tmpdir_factory.mktemp("cassandra_conf")
cassandra_data = tmpdir_factory.mktemp("cassandra_data")
cassandra_log = tmpdir_factory.mktemp("cassandra_log")
native_transport_port = free_port()
storage_port = free_port()
jmx_port = free_port()
api_port = free_port()
use_scylla = bool(os.environ.get("SWH_USE_SCYLLADB", ""))
cassandra_bin = os.environ.get(
"SWH_CASSANDRA_BIN", "/usr/bin/scylla" if use_scylla else "/usr/sbin/cassandra"
)
if use_scylla:
os.makedirs(cassandra_conf.join("conf"))
config_path = cassandra_conf.join("conf/scylla.yaml")
config_template = CONFIG_TEMPLATE + SCYLLA_EXTRA_CONFIG_TEMPLATE
else:
config_path = cassandra_conf.join("cassandra.yaml")
config_template = CONFIG_TEMPLATE
with open(str(config_path), "w") as fd:
fd.write(
config_template.format(
data_dir=str(cassandra_data),
storage_port=storage_port,
native_transport_port=native_transport_port,
api_port=api_port,
)
)
if os.environ.get("SWH_CASSANDRA_LOG"):
stdout = stderr = None
else:
stdout = stderr = subprocess.DEVNULL
env = {
"MAX_HEAP_SIZE": "300M",
"HEAP_NEWSIZE": "50M",
"JVM_OPTS": "-Xlog:gc=error:file=%s/gc.log" % cassandra_log,
}
if "JAVA_HOME" in os.environ:
env["JAVA_HOME"] = os.environ["JAVA_HOME"]
if use_scylla:
env = {
**env,
"SCYLLA_HOME": cassandra_conf,
}
# prevent "NOFILE rlimit too low (recommended setting 200000,
# minimum setting 10000; refusing to start."
resource.setrlimit(resource.RLIMIT_NOFILE, (200000, 200000))
proc = subprocess.Popen(
[cassandra_bin, "--developer-mode=1",],
start_new_session=True,
env=env,
stdout=stdout,
stderr=stderr,
)
else:
proc = subprocess.Popen(
[
cassandra_bin,
"-Dcassandra.config=file://%s/cassandra.yaml" % cassandra_conf,
"-Dcassandra.logdir=%s" % cassandra_log,
"-Dcassandra.jmx.local.port=%d" % jmx_port,
"-Dcassandra-foreground=yes",
],
start_new_session=True,
env=env,
stdout=stdout,
stderr=stderr,
)
listening = wait_for_peer("127.0.0.1", native_transport_port)
if listening:
yield (["127.0.0.1"], native_transport_port)
if not listening or os.environ.get("SWH_CASSANDRA_LOG"):
debug_log_path = str(cassandra_log.join("debug.log"))
if os.path.exists(debug_log_path):
with open(debug_log_path) as fd:
print(fd.read())
if not listening:
if proc.poll() is None:
raise Exception("cassandra process unexpectedly not listening.")
else:
raise Exception("cassandra process unexpectedly stopped.")
pgrp = os.getpgid(proc.pid)
os.killpg(pgrp, signal.SIGKILL)
class RequestHandler:
def on_request(self, rf):
if hasattr(rf.message, "query"):
print()
print(rf.message.query)
@pytest.fixture(scope="session")
def keyspace(cassandra_cluster):
(hosts, port) = cassandra_cluster
keyspace = os.urandom(10).hex()
create_keyspace(hosts, keyspace, port)
return keyspace
# tests are executed using imported classes (TestStorage and
# TestStorageGeneratedData) using overloaded swh_storage fixture
# below
@pytest.fixture
def swh_storage_backend_config(cassandra_cluster, keyspace):
(hosts, port) = cassandra_cluster
storage_config = dict(
cls="cassandra",
hosts=hosts,
port=port,
keyspace=keyspace,
journal_writer={"cls": "memory"},
objstorage={"cls": "memory"},
)
yield storage_config
storage = get_storage(**storage_config)
for table in TABLES:
storage._cql_runner._session.execute('TRUNCATE TABLE "%s"' % table)
storage._cql_runner._cluster.shutdown()
@pytest.mark.cassandra
class TestCassandraStorage(_TestStorage):
def test_config_wrong_consistency_should_raise(self):
storage_config = dict(
cls="cassandra",
hosts=["first"],
port=9999,
keyspace="any",
consistency_level="fake",
journal_writer={"cls": "memory"},
objstorage={"cls": "memory"},
)
with pytest.raises(ValueError, match="Unknown consistency"):
get_storage(**storage_config)
def test_config_consistency_used(self, swh_storage_backend_config):
config_with_consistency = dict(
swh_storage_backend_config, **{"consistency_level": "THREE"}
)
storage = get_storage(**config_with_consistency)
with pytest.raises(NoHostAvailable):
storage.content_get_random()
def test_content_add_murmur3_collision(self, swh_storage, mocker, sample_data):
"""The Murmur3 token is used as link from index tables to the main
table; and non-matching contents with colliding murmur3-hash
are filtered-out when reading the main table.
This test checks the content methods do filter out these collision.
"""
called = 0
cont, cont2 = sample_data.contents[:2]
# always return a token
def mock_cgtfsh(algo, hash_):
nonlocal called
called += 1
assert algo in ("sha1", "sha1_git")
return [123456]
mocker.patch.object(
swh_storage._cql_runner, "content_get_tokens_from_single_hash", mock_cgtfsh,
)
# For all tokens, always return cont
def mock_cgft(token):
nonlocal called
called += 1
return [
ContentRow(
length=10,
ctime=datetime.datetime.now(),
status="present",
**{algo: getattr(cont, algo) for algo in HASH_ALGORITHMS},
)
]
mocker.patch.object(
swh_storage._cql_runner, "content_get_from_token", mock_cgft
)
actual_result = swh_storage.content_add([cont2])
assert called == 4
assert actual_result == {
"content:add": 1,
"content:add:bytes": cont2.length,
}
def test_content_get_metadata_murmur3_collision(
self, swh_storage, mocker, sample_data
):
"""The Murmur3 token is used as link from index tables to the main
table; and non-matching contents with colliding murmur3-hash
are filtered-out when reading the main table.
This test checks the content methods do filter out these collisions.
"""
called = 0
cont, cont2 = [attr.evolve(c, ctime=now()) for c in sample_data.contents[:2]]
# always return a token
def mock_cgtfsh(algo, hash_):
nonlocal called
called += 1
assert algo in ("sha1", "sha1_git")
return [123456]
mocker.patch.object(
swh_storage._cql_runner, "content_get_tokens_from_single_hash", mock_cgtfsh,
)
# For all tokens, always return cont and cont2
cols = list(set(cont.to_dict()) - {"data"})
def mock_cgft(token):
nonlocal called
called += 1
return [
ContentRow(**{col: getattr(cont, col) for col in cols},)
for cont in [cont, cont2]
]
mocker.patch.object(
swh_storage._cql_runner, "content_get_from_token", mock_cgft
)
actual_result = swh_storage.content_get([cont.sha1])
assert called == 2
# dropping extra column not returned
expected_cont = attr.evolve(cont, data=None)
# but cont2 should be filtered out
assert actual_result == [expected_cont]
def test_content_find_murmur3_collision(self, swh_storage, mocker, sample_data):
"""The Murmur3 token is used as link from index tables to the main
table; and non-matching contents with colliding murmur3-hash
are filtered-out when reading the main table.
This test checks the content methods do filter out these collisions.
"""
called = 0
cont, cont2 = [attr.evolve(c, ctime=now()) for c in sample_data.contents[:2]]
# always return a token
def mock_cgtfsh(algo, hash_):
nonlocal called
called += 1
assert algo in ("sha1", "sha1_git")
return [123456]
mocker.patch.object(
swh_storage._cql_runner, "content_get_tokens_from_single_hash", mock_cgtfsh,
)
# For all tokens, always return cont and cont2
cols = list(set(cont.to_dict()) - {"data"})
def mock_cgft(token):
nonlocal called
called += 1
return [
ContentRow(**{col: getattr(cont, col) for col in cols})
for cont in [cont, cont2]
]
mocker.patch.object(
swh_storage._cql_runner, "content_get_from_token", mock_cgft
)
expected_content = attr.evolve(cont, data=None)
actual_result = swh_storage.content_find({"sha1": cont.sha1})
assert called == 2
# but cont2 should be filtered out
assert actual_result == [expected_content]
def test_content_get_partition_murmur3_collision(
self, swh_storage, mocker, sample_data
):
"""The Murmur3 token is used as link from index tables to the main table; and
non-matching contents with colliding murmur3-hash are filtered-out when reading
the main table.
This test checks the content_get_partition endpoints return all contents, even
the collisions.
"""
called = 0
rows: Dict[int, Dict] = {}
for tok, content in enumerate(sample_data.contents):
cont = attr.evolve(content, data=None, ctime=now())
row_d = {**cont.to_dict(), "tok": tok}
rows[tok] = row_d
# For all tokens, always return cont
def mock_content_get_token_range(range_start, range_end, limit):
nonlocal called
called += 1
for tok in list(rows.keys()) * 3: # yield multiple times the same tok
row_d = dict(rows[tok].items())
row_d.pop("tok")
yield (tok, ContentRow(**row_d))
mocker.patch.object(
swh_storage._cql_runner,
"content_get_token_range",
mock_content_get_token_range,
)
actual_results = list(
stream_results(
swh_storage.content_get_partition, partition_id=0, nb_partitions=1
)
)
assert called > 0
# everything is listed, even collisions
assert len(actual_results) == 3 * len(sample_data.contents)
# as we duplicated the returned results, dropping duplicate should yield
# the original length
assert len(set(actual_results)) == len(sample_data.contents)
@pytest.mark.skip("content_update is not yet implemented for Cassandra")
def test_content_update(self):
pass
def test_extid_murmur3_collision(self, swh_storage, mocker, sample_data):
"""The Murmur3 token is used as link from index table to the main
table; and non-matching extid with colliding murmur3-hash
are filtered-out when reading the main table.
This test checks the extid methods do filter out these collision.
"""
swh_storage.extid_add(sample_data.extids)
# For any token, always return all extids, i.e. make as if all tokens
# for all extid entries collide
def mock_egft(token):
return [
ExtIDRow(
extid_type=extid.extid_type,
extid=extid.extid,
+ extid_version=extid.extid_version,
target_type=extid.target.object_type.value,
target=extid.target.object_id,
)
for extid in sample_data.extids
]
mocker.patch.object(
swh_storage._cql_runner, "extid_get_from_token", mock_egft,
)
for extid in sample_data.extids:
extids = swh_storage.extid_get_from_target(
target_type=extid.target.object_type, ids=[extid.target.object_id]
)
assert extids == [extid]
def test_directory_add_atomic(self, swh_storage, sample_data, mocker):
"""Checks that a crash occurring after some directory entries were written
does not cause the directory to be (partially) visible.
ie. checks directories are added somewhat atomically."""
# Disable the journal writer, it would detect the CrashyEntry exception too
# early for this test to be relevant
swh_storage.journal_writer.journal = None
class MyException(Exception):
pass
class CrashyEntry(DirectoryEntry):
def __init__(self):
pass
def to_dict(self):
raise MyException()
directory = sample_data.directory3
entries = directory.entries
directory = attr.evolve(directory, entries=entries + (CrashyEntry(),))
with pytest.raises(MyException):
swh_storage.directory_add([directory])
# This should have written some of the entries to the database:
entry_rows = swh_storage._cql_runner.directory_entry_get([directory.id])
assert {row.name for row in entry_rows} == {entry.name for entry in entries}
# BUT, because not all the entries were written, the directory should
# be considered not written.
assert swh_storage.directory_missing([directory.id]) == [directory.id]
assert list(swh_storage.directory_ls(directory.id)) == []
assert swh_storage.directory_get_entries(directory.id) is None
def test_snapshot_add_atomic(self, swh_storage, sample_data, mocker):
"""Checks that a crash occurring after some snapshot branches were written
does not cause the snapshot to be (partially) visible.
ie. checks snapshots are added somewhat atomically."""
# Disable the journal writer, it would detect the CrashyBranch exception too
# early for this test to be relevant
swh_storage.journal_writer.journal = None
class MyException(Exception):
pass
class CrashyBranch(SnapshotBranch):
def __getattribute__(self, name):
if name == "target" and should_raise:
raise MyException()
else:
return super().__getattribute__(name)
snapshot = sample_data.complete_snapshot
branches = snapshot.branches
should_raise = False # just so that we can construct the object
crashy_branch = CrashyBranch.from_dict(branches[b"directory"].to_dict())
should_raise = True
snapshot = attr.evolve(
snapshot, branches={**branches, b"crashy": crashy_branch,},
)
with pytest.raises(MyException):
swh_storage.snapshot_add([snapshot])
# This should have written some of the branches to the database:
branch_rows = swh_storage._cql_runner.snapshot_branch_get(snapshot.id, b"", 10)
assert {row.name for row in branch_rows} == set(branches)
# BUT, because not all the branches were written, the snapshot should
# be considered not written.
assert swh_storage.snapshot_missing([snapshot.id]) == [snapshot.id]
assert swh_storage.snapshot_get(snapshot.id) is None
assert swh_storage.snapshot_count_branches(snapshot.id) is None
assert swh_storage.snapshot_get_branches(snapshot.id) is None
@pytest.mark.skip(
'The "person" table of the pgsql is a legacy thing, and not '
"supported by the cassandra backend."
)
def test_person_fullname_unicity(self):
pass
@pytest.mark.skip(
'The "person" table of the pgsql is a legacy thing, and not '
"supported by the cassandra backend."
)
def test_person_get(self):
pass
@pytest.mark.skip("Not supported by Cassandra")
def test_origin_count(self):
pass
@pytest.mark.cassandra
class TestCassandraStorageGeneratedData(_TestStorageGeneratedData):
@pytest.mark.skip("Not supported by Cassandra")
def test_origin_count(self):
pass
@pytest.mark.skip("Not supported by Cassandra")
def test_origin_count_with_visit_no_visits(self):
pass
@pytest.mark.skip("Not supported by Cassandra")
def test_origin_count_with_visit_with_visits_and_snapshot(self):
pass
@pytest.mark.skip("Not supported by Cassandra")
def test_origin_count_with_visit_with_visits_no_snapshot(self):
pass
@pytest.mark.parametrize(
"allow_overwrite,object_type",
itertools.product(
[False, True],
# Note the absence of "content", it's tested above.
["directory", "revision", "release", "snapshot", "origin", "extid"],
),
)
def test_allow_overwrite(
allow_overwrite: bool, object_type: str, swh_storage_backend_config
):
if object_type in ("origin", "extid"):
pytest.skip(
f"test_disallow_overwrite not implemented for {object_type} objects, "
f"because all their columns are in the primary key."
)
swh_storage = get_storage(
allow_overwrite=allow_overwrite, **swh_storage_backend_config
)
# directory_ls joins with content and directory table, and needs those to return
# non-None entries:
if object_type == "directory":
swh_storage.directory_add([StorageData.directory5])
swh_storage.content_add([StorageData.content, StorageData.content2])
obj1: Any
obj2: Any
# Get two test objects
if object_type == "directory":
(obj1, obj2, *_) = StorageData.directories
elif object_type == "snapshot":
# StorageData.snapshots[1] is the empty snapshot, which is the corner case
# that makes this test succeed for the wrong reasons
obj1 = StorageData.snapshot
obj2 = StorageData.complete_snapshot
else:
(obj1, obj2, *_) = getattr(StorageData, (object_type + "s"))
# Let's make both objects have the same hash, but different content
obj1 = attr.evolve(obj1, id=obj2.id)
# Get the methods used to add and get these objects
add = getattr(swh_storage, object_type + "_add")
if object_type == "directory":
def get(ids):
return [
Directory(
id=ids[0],
entries=tuple(
map(
lambda entry: DirectoryEntry(
name=entry["name"],
type=entry["type"],
target=entry["sha1_git"],
perms=entry["perms"],
),
swh_storage.directory_ls(ids[0]),
)
),
)
]
elif object_type == "snapshot":
def get(ids):
return [
Snapshot.from_dict(
remove_keys(swh_storage.snapshot_get(ids[0]), ("next_branch",))
)
]
else:
get = getattr(swh_storage, object_type + "_get")
# Add the first object
add([obj1])
# It should be returned as-is
assert get([obj1.id]) == [obj1]
# Add the second object
add([obj2])
if allow_overwrite:
# obj1 was overwritten by obj2
expected = obj2
else:
# obj2 was not written, because obj1 already exists and has the same hash
expected = obj1
if allow_overwrite and object_type in ("directory", "snapshot"):
# TODO
pytest.xfail(
"directory entries and snapshot branches are concatenated "
"instead of being replaced"
)
assert get([obj1.id]) == [expected]
diff --git a/swh/storage/tests/test_storage_data.py b/swh/storage/tests/test_storage_data.py
index 821b7f66..4bc7d3b3 100644
--- a/swh/storage/tests/test_storage_data.py
+++ b/swh/storage/tests/test_storage_data.py
@@ -1,42 +1,43 @@
# Copyright (C) 2020-2021 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import pytest
from swh.model.model import BaseModel
from swh.storage.tests.storage_data import StorageData
def test_storage_data():
data = StorageData()
for attribute_key in [
"contents",
"skipped_contents",
"directories",
"revisions",
"releases",
"snapshots",
"origins",
"origin_visits",
"fetchers",
"authorities",
"origin_metadata",
"content_metadata",
+ "extids",
]:
for obj in getattr(data, attribute_key):
assert isinstance(obj, BaseModel)
@pytest.mark.parametrize(
"collection",
("directories", "git_revisions", "hg_revisions", "releases", "snapshots"),
)
def test_storage_data_hash(collection):
data = StorageData()
for obj in getattr(data, collection):
assert (
obj.compute_hash() == obj.id
), f"{obj.compute_hash().hex()} != {obj.id.hex()}"