diff --git a/swh/storage/api/serializers.py b/swh/storage/api/serializers.py
index e440a3f0..abc458b3 100644
--- a/swh/storage/api/serializers.py
+++ b/swh/storage/api/serializers.py
@@ -1,39 +1,52 @@
# Copyright (C) 2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
"""Decoder and encoders for swh-model objects."""
from typing import Callable, Dict, List, Tuple
from swh.model.identifiers import SWHID, parse_swhid
import swh.model.model as model
+from swh.storage import interface
+
def _encode_model_object(obj):
d = obj.to_dict()
d["__type__"] = type(obj).__name__
return d
-def _encode_model_enum(obj):
+def _encode_enum(obj):
return {
"value": obj.value,
"__type__": type(obj).__name__,
}
+def _decode_model_enum(d):
+ return getattr(model, d.pop("__type__"))(d["value"])
+
+
+def _decode_storage_enum(d):
+ return getattr(interface, d.pop("__type__"))(d["value"])
+
+
ENCODERS: List[Tuple[type, str, Callable]] = [
(model.BaseModel, "model", _encode_model_object),
(SWHID, "swhid", str),
- (model.MetadataTargetType, "model_enum", _encode_model_enum),
- (model.MetadataAuthorityType, "model_enum", _encode_model_enum),
+ (model.MetadataTargetType, "model_enum", _encode_enum),
+ (model.MetadataAuthorityType, "model_enum", _encode_enum),
+ (interface.ListOrder, "storage_enum", _encode_enum),
]
DECODERS: Dict[str, Callable] = {
"swhid": parse_swhid,
"model": lambda d: getattr(model, d.pop("__type__")).from_dict(d),
- "model_enum": lambda d: getattr(model, d.pop("__type__"))(d["value"]),
+ "model_enum": _decode_model_enum,
+ "model_enum": _decode_model_enum,
+ "storage_enum": _decode_storage_enum,
}
diff --git a/swh/storage/cassandra/cql.py b/swh/storage/cassandra/cql.py
index b8a9f2d3..254d1daf 100644
--- a/swh/storage/cassandra/cql.py
+++ b/swh/storage/cassandra/cql.py
@@ -1,977 +1,976 @@
# Copyright (C) 2019-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import datetime
import functools
import json
import logging
import random
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
Optional,
Tuple,
TypeVar,
)
from cassandra import CoordinationFailure
from cassandra.cluster import Cluster, EXEC_PROFILE_DEFAULT, ExecutionProfile, ResultSet
from cassandra.policies import DCAwareRoundRobinPolicy, TokenAwarePolicy
from cassandra.query import PreparedStatement, BoundStatement
from tenacity import (
retry,
stop_after_attempt,
wait_random_exponential,
retry_if_exception_type,
)
from swh.model.model import (
Sha1Git,
TimestampWithTimezone,
Timestamp,
Person,
Content,
SkippedContent,
OriginVisit,
OriginVisitStatus,
Origin,
)
+from swh.storage.interface import ListOrder
+
from .common import Row, TOKEN_BEGIN, TOKEN_END, hash_url
from .schema import CREATE_TABLES_QUERIES, HASH_ALGORITHMS
logger = logging.getLogger(__name__)
_execution_profiles = {
EXEC_PROFILE_DEFAULT: ExecutionProfile(
load_balancing_policy=TokenAwarePolicy(DCAwareRoundRobinPolicy())
),
}
# Configuration for cassandra-driver's access to servers:
# * hit the right server directly when sending a query (TokenAwarePolicy),
# * if there's more than one, then pick one at random that's in the same
# datacenter as the client (DCAwareRoundRobinPolicy)
def create_keyspace(
hosts: List[str], keyspace: str, port: int = 9042, *, durable_writes=True
):
cluster = Cluster(hosts, port=port, execution_profiles=_execution_profiles)
session = cluster.connect()
extra_params = ""
if not durable_writes:
extra_params = "AND durable_writes = false"
session.execute(
"""CREATE KEYSPACE IF NOT EXISTS "%s"
WITH REPLICATION = {
'class' : 'SimpleStrategy',
'replication_factor' : 1
} %s;
"""
% (keyspace, extra_params)
)
session.execute('USE "%s"' % keyspace)
for query in CREATE_TABLES_QUERIES:
session.execute(query)
T = TypeVar("T")
def _prepared_statement(query: str) -> Callable[[Callable[..., T]], Callable[..., T]]:
"""Returns a decorator usable on methods of CqlRunner, to
inject them with a 'statement' argument, that is a prepared
statement corresponding to the query.
This only works on methods of CqlRunner, as preparing a
statement requires a connection to a Cassandra server."""
def decorator(f):
@functools.wraps(f)
def newf(self, *args, **kwargs) -> T:
if f.__name__ not in self._prepared_statements:
statement: PreparedStatement = self._session.prepare(query)
self._prepared_statements[f.__name__] = statement
return f(
self, *args, **kwargs, statement=self._prepared_statements[f.__name__]
)
return newf
return decorator
def _prepared_insert_statement(table_name: str, columns: List[str]):
"""Shorthand for using `_prepared_statement` for `INSERT INTO`
statements."""
return _prepared_statement(
"INSERT INTO %s (%s) VALUES (%s)"
% (table_name, ", ".join(columns), ", ".join("?" for _ in columns),)
)
def _prepared_exists_statement(table_name: str):
"""Shorthand for using `_prepared_statement` for queries that only
check which ids in a list exist in the table."""
return _prepared_statement(f"SELECT id FROM {table_name} WHERE id IN ?")
class CqlRunner:
"""Class managing prepared statements and building queries to be sent
to Cassandra."""
def __init__(self, hosts: List[str], keyspace: str, port: int):
self._cluster = Cluster(
hosts, port=port, execution_profiles=_execution_profiles
)
self._session = self._cluster.connect(keyspace)
self._cluster.register_user_type(
keyspace, "microtimestamp_with_timezone", TimestampWithTimezone
)
self._cluster.register_user_type(keyspace, "microtimestamp", Timestamp)
self._cluster.register_user_type(keyspace, "person", Person)
self._prepared_statements: Dict[str, PreparedStatement] = {}
##########################
# Common utility functions
##########################
MAX_RETRIES = 3
@retry(
wait=wait_random_exponential(multiplier=1, max=10),
stop=stop_after_attempt(MAX_RETRIES),
retry=retry_if_exception_type(CoordinationFailure),
)
def _execute_with_retries(self, statement, args) -> ResultSet:
return self._session.execute(statement, args, timeout=1000.0)
@_prepared_statement(
"UPDATE object_count SET count = count + ? "
"WHERE partition_key = 0 AND object_type = ?"
)
def _increment_counter(
self, object_type: str, nb: int, *, statement: PreparedStatement
) -> None:
self._execute_with_retries(statement, [nb, object_type])
def _add_one(self, statement, object_type: str, obj, keys: List[str]) -> None:
self._increment_counter(object_type, 1)
self._execute_with_retries(statement, [getattr(obj, key) for key in keys])
def _get_random_row(self, statement) -> Optional[Row]:
"""Takes a prepared statement of the form
"SELECT * FROM
WHERE token() > ? LIMIT 1"
and uses it to return a random row"""
token = random.randint(TOKEN_BEGIN, TOKEN_END)
rows = self._execute_with_retries(statement, [token])
if not rows:
# There are no row with a greater token; wrap around to get
# the row with the smallest token
rows = self._execute_with_retries(statement, [TOKEN_BEGIN])
if rows:
return rows.one()
else:
return None
def _missing(self, statement, ids):
res = self._execute_with_retries(statement, [ids])
found_ids = {id_ for (id_,) in res}
return [id_ for id_ in ids if id_ not in found_ids]
##########################
# 'content' table
##########################
_content_pk = ["sha1", "sha1_git", "sha256", "blake2s256"]
_content_keys = [
"sha1",
"sha1_git",
"sha256",
"blake2s256",
"length",
"ctime",
"status",
]
def _content_add_finalize(self, statement: BoundStatement) -> None:
"""Returned currified by content_add_prepare, to be called when the
content row should be added to the primary table."""
self._execute_with_retries(statement, None)
self._increment_counter("content", 1)
@_prepared_insert_statement("content", _content_keys)
def content_add_prepare(
self, content, *, statement
) -> Tuple[int, Callable[[], None]]:
"""Prepares insertion of a Content to the main 'content' table.
Returns a token (to be used in secondary tables), and a function to be
called to perform the insertion in the main table."""
statement = statement.bind(
[getattr(content, key) for key in self._content_keys]
)
# Type used for hashing keys (usually, it will be
# cassandra.metadata.Murmur3Token)
token_class = self._cluster.metadata.token_map.token_class
# Token of the row when it will be inserted. This is equivalent to
# "SELECT token({', '.join(self._content_pk)}) FROM content WHERE ..."
# after the row is inserted; but we need the token to insert in the
# index tables *before* inserting to the main 'content' table
token = token_class.from_key(statement.routing_key).value
assert TOKEN_BEGIN <= token <= TOKEN_END
# Function to be called after the indexes contain their respective
# row
finalizer = functools.partial(self._content_add_finalize, statement)
return (token, finalizer)
@_prepared_statement(
"SELECT * FROM content WHERE "
+ " AND ".join(map("%s = ?".__mod__, HASH_ALGORITHMS))
)
def content_get_from_pk(
self, content_hashes: Dict[str, bytes], *, statement
) -> Optional[Row]:
rows = list(
self._execute_with_retries(
statement, [content_hashes[algo] for algo in HASH_ALGORITHMS]
)
)
assert len(rows) <= 1
if rows:
return rows[0]
else:
return None
@_prepared_statement(
"SELECT * FROM content WHERE token(" + ", ".join(_content_pk) + ") = ?"
)
def content_get_from_token(self, token, *, statement) -> Iterable[Row]:
return self._execute_with_retries(statement, [token])
@_prepared_statement(
"SELECT * FROM content WHERE token(%s) > ? LIMIT 1" % ", ".join(_content_pk)
)
def content_get_random(self, *, statement) -> Optional[Row]:
return self._get_random_row(statement)
@_prepared_statement(
(
"SELECT token({0}) AS tok, {1} FROM content "
"WHERE token({0}) >= ? AND token({0}) <= ? LIMIT ?"
).format(", ".join(_content_pk), ", ".join(_content_keys))
)
def content_get_token_range(
self, start: int, end: int, limit: int, *, statement
) -> Iterable[Row]:
return self._execute_with_retries(statement, [start, end, limit])
##########################
# 'content_by_*' tables
##########################
@_prepared_statement("SELECT sha1_git FROM content_by_sha1_git WHERE sha1_git IN ?")
def content_missing_by_sha1_git(
self, ids: List[bytes], *, statement
) -> List[bytes]:
return self._missing(statement, ids)
def content_index_add_one(self, algo: str, content: Content, token: int) -> None:
"""Adds a row mapping content[algo] to the token of the Content in
the main 'content' table."""
query = (
f"INSERT INTO content_by_{algo} ({algo}, target_token) " f"VALUES (%s, %s)"
)
self._execute_with_retries(query, [content.get_hash(algo), token])
def content_get_tokens_from_single_hash(
self, algo: str, hash_: bytes
) -> Iterable[int]:
assert algo in HASH_ALGORITHMS
query = f"SELECT target_token FROM content_by_{algo} WHERE {algo} = %s"
return (tok for (tok,) in self._execute_with_retries(query, [hash_]))
##########################
# 'skipped_content' table
##########################
_skipped_content_pk = ["sha1", "sha1_git", "sha256", "blake2s256"]
_skipped_content_keys = [
"sha1",
"sha1_git",
"sha256",
"blake2s256",
"length",
"ctime",
"status",
"reason",
"origin",
]
_magic_null_pk = b""
"""
NULLs (or all-empty blobs) are not allowed in primary keys; instead use a
special value that can't possibly be a valid hash.
"""
def _skipped_content_add_finalize(self, statement: BoundStatement) -> None:
"""Returned currified by skipped_content_add_prepare, to be called
when the content row should be added to the primary table."""
self._execute_with_retries(statement, None)
self._increment_counter("skipped_content", 1)
@_prepared_insert_statement("skipped_content", _skipped_content_keys)
def skipped_content_add_prepare(
self, content, *, statement
) -> Tuple[int, Callable[[], None]]:
"""Prepares insertion of a Content to the main 'skipped_content' table.
Returns a token (to be used in secondary tables), and a function to be
called to perform the insertion in the main table."""
# Replace NULLs (which are not allowed in the partition key) with
# an empty byte string
content = content.to_dict()
for key in self._skipped_content_pk:
if content[key] is None:
content[key] = self._magic_null_pk
statement = statement.bind(
[content.get(key) for key in self._skipped_content_keys]
)
# Type used for hashing keys (usually, it will be
# cassandra.metadata.Murmur3Token)
token_class = self._cluster.metadata.token_map.token_class
# Token of the row when it will be inserted. This is equivalent to
# "SELECT token({', '.join(self._content_pk)})
# FROM skipped_content WHERE ..."
# after the row is inserted; but we need the token to insert in the
# index tables *before* inserting to the main 'skipped_content' table
token = token_class.from_key(statement.routing_key).value
assert TOKEN_BEGIN <= token <= TOKEN_END
# Function to be called after the indexes contain their respective
# row
finalizer = functools.partial(self._skipped_content_add_finalize, statement)
return (token, finalizer)
@_prepared_statement(
"SELECT * FROM skipped_content WHERE "
+ " AND ".join(map("%s = ?".__mod__, HASH_ALGORITHMS))
)
def skipped_content_get_from_pk(
self, content_hashes: Dict[str, bytes], *, statement
) -> Optional[Row]:
rows = list(
self._execute_with_retries(
statement,
[
content_hashes[algo] or self._magic_null_pk
for algo in HASH_ALGORITHMS
],
)
)
assert len(rows) <= 1
if rows:
# TODO: convert _magic_null_pk back to None?
return rows[0]
else:
return None
##########################
# 'skipped_content_by_*' tables
##########################
def skipped_content_index_add_one(
self, algo: str, content: SkippedContent, token: int
) -> None:
"""Adds a row mapping content[algo] to the token of the SkippedContent
in the main 'skipped_content' table."""
query = (
f"INSERT INTO skipped_content_by_{algo} ({algo}, target_token) "
f"VALUES (%s, %s)"
)
self._execute_with_retries(
query, [content.get_hash(algo) or self._magic_null_pk, token]
)
##########################
# 'revision' table
##########################
_revision_keys = [
"id",
"date",
"committer_date",
"type",
"directory",
"message",
"author",
"committer",
"synthetic",
"metadata",
"extra_headers",
]
@_prepared_exists_statement("revision")
def revision_missing(self, ids: List[bytes], *, statement) -> List[bytes]:
return self._missing(statement, ids)
@_prepared_insert_statement("revision", _revision_keys)
def revision_add_one(self, revision: Dict[str, Any], *, statement) -> None:
self._execute_with_retries(
statement, [revision[key] for key in self._revision_keys]
)
self._increment_counter("revision", 1)
@_prepared_statement("SELECT id FROM revision WHERE id IN ?")
def revision_get_ids(self, revision_ids, *, statement) -> ResultSet:
return self._execute_with_retries(statement, [revision_ids])
@_prepared_statement("SELECT * FROM revision WHERE id IN ?")
def revision_get(self, revision_ids, *, statement) -> ResultSet:
return self._execute_with_retries(statement, [revision_ids])
@_prepared_statement("SELECT * FROM revision WHERE token(id) > ? LIMIT 1")
def revision_get_random(self, *, statement) -> Optional[Row]:
return self._get_random_row(statement)
##########################
# 'revision_parent' table
##########################
_revision_parent_keys = ["id", "parent_rank", "parent_id"]
@_prepared_insert_statement("revision_parent", _revision_parent_keys)
def revision_parent_add_one(
self, id_: Sha1Git, parent_rank: int, parent_id: Sha1Git, *, statement
) -> None:
self._execute_with_retries(statement, [id_, parent_rank, parent_id])
@_prepared_statement("SELECT parent_id FROM revision_parent WHERE id = ?")
def revision_parent_get(self, revision_id: Sha1Git, *, statement) -> ResultSet:
return self._execute_with_retries(statement, [revision_id])
##########################
# 'release' table
##########################
_release_keys = [
"id",
"target",
"target_type",
"date",
"name",
"message",
"author",
"synthetic",
]
@_prepared_exists_statement("release")
def release_missing(self, ids: List[bytes], *, statement) -> List[bytes]:
return self._missing(statement, ids)
@_prepared_insert_statement("release", _release_keys)
def release_add_one(self, release: Dict[str, Any], *, statement) -> None:
self._execute_with_retries(
statement, [release[key] for key in self._release_keys]
)
self._increment_counter("release", 1)
@_prepared_statement("SELECT * FROM release WHERE id in ?")
def release_get(self, release_ids: List[str], *, statement) -> None:
return self._execute_with_retries(statement, [release_ids])
@_prepared_statement("SELECT * FROM release WHERE token(id) > ? LIMIT 1")
def release_get_random(self, *, statement) -> Optional[Row]:
return self._get_random_row(statement)
##########################
# 'directory' table
##########################
_directory_keys = ["id"]
@_prepared_exists_statement("directory")
def directory_missing(self, ids: List[bytes], *, statement) -> List[bytes]:
return self._missing(statement, ids)
@_prepared_insert_statement("directory", _directory_keys)
def directory_add_one(self, directory_id: Sha1Git, *, statement) -> None:
"""Called after all calls to directory_entry_add_one, to
commit/finalize the directory."""
self._execute_with_retries(statement, [directory_id])
self._increment_counter("directory", 1)
@_prepared_statement("SELECT * FROM directory WHERE token(id) > ? LIMIT 1")
def directory_get_random(self, *, statement) -> Optional[Row]:
return self._get_random_row(statement)
##########################
# 'directory_entry' table
##########################
_directory_entry_keys = ["directory_id", "name", "type", "target", "perms"]
@_prepared_insert_statement("directory_entry", _directory_entry_keys)
def directory_entry_add_one(self, entry: Dict[str, Any], *, statement) -> None:
self._execute_with_retries(
statement, [entry[key] for key in self._directory_entry_keys]
)
@_prepared_statement("SELECT * FROM directory_entry WHERE directory_id IN ?")
def directory_entry_get(self, directory_ids, *, statement) -> ResultSet:
return self._execute_with_retries(statement, [directory_ids])
##########################
# 'snapshot' table
##########################
_snapshot_keys = ["id"]
@_prepared_exists_statement("snapshot")
def snapshot_missing(self, ids: List[bytes], *, statement) -> List[bytes]:
return self._missing(statement, ids)
@_prepared_insert_statement("snapshot", _snapshot_keys)
def snapshot_add_one(self, snapshot_id: Sha1Git, *, statement) -> None:
self._execute_with_retries(statement, [snapshot_id])
self._increment_counter("snapshot", 1)
@_prepared_statement("SELECT * FROM snapshot WHERE id = ?")
def snapshot_get(self, snapshot_id: Sha1Git, *, statement) -> ResultSet:
return self._execute_with_retries(statement, [snapshot_id])
@_prepared_statement("SELECT * FROM snapshot WHERE token(id) > ? LIMIT 1")
def snapshot_get_random(self, *, statement) -> Optional[Row]:
return self._get_random_row(statement)
##########################
# 'snapshot_branch' table
##########################
_snapshot_branch_keys = ["snapshot_id", "name", "target_type", "target"]
@_prepared_insert_statement("snapshot_branch", _snapshot_branch_keys)
def snapshot_branch_add_one(self, branch: Dict[str, Any], *, statement) -> None:
self._execute_with_retries(
statement, [branch[key] for key in self._snapshot_branch_keys]
)
@_prepared_statement(
"SELECT ascii_bins_count(target_type) AS counts "
"FROM snapshot_branch "
"WHERE snapshot_id = ? "
)
def snapshot_count_branches(self, snapshot_id: Sha1Git, *, statement) -> ResultSet:
return self._execute_with_retries(statement, [snapshot_id])
@_prepared_statement(
"SELECT * FROM snapshot_branch WHERE snapshot_id = ? AND name >= ? LIMIT ?"
)
def snapshot_branch_get(
self, snapshot_id: Sha1Git, from_: bytes, limit: int, *, statement
) -> None:
return self._execute_with_retries(statement, [snapshot_id, from_, limit])
##########################
# 'origin' table
##########################
origin_keys = ["sha1", "url", "type", "next_visit_id"]
@_prepared_statement(
"INSERT INTO origin (sha1, url, next_visit_id) "
"VALUES (?, ?, 1) IF NOT EXISTS"
)
def origin_add_one(self, origin: Origin, *, statement) -> None:
self._execute_with_retries(statement, [hash_url(origin.url), origin.url])
self._increment_counter("origin", 1)
@_prepared_statement("SELECT * FROM origin WHERE sha1 = ?")
def origin_get_by_sha1(self, sha1: bytes, *, statement) -> ResultSet:
return self._execute_with_retries(statement, [sha1])
def origin_get_by_url(self, url: str) -> ResultSet:
return self.origin_get_by_sha1(hash_url(url))
@_prepared_statement(
f'SELECT token(sha1) AS tok, {", ".join(origin_keys)} '
f"FROM origin WHERE token(sha1) >= ? LIMIT ?"
)
def origin_list(self, start_token: int, limit: int, *, statement) -> ResultSet:
return self._execute_with_retries(statement, [start_token, limit])
@_prepared_statement("SELECT * FROM origin")
def origin_iter_all(self, *, statement) -> ResultSet:
return self._execute_with_retries(statement, [])
@_prepared_statement("SELECT next_visit_id FROM origin WHERE sha1 = ?")
def _origin_get_next_visit_id(self, origin_sha1: bytes, *, statement) -> int:
rows = list(self._execute_with_retries(statement, [origin_sha1]))
assert len(rows) == 1 # TODO: error handling
return rows[0].next_visit_id
@_prepared_statement(
"UPDATE origin SET next_visit_id=? WHERE sha1 = ? IF next_visit_id=?"
)
def origin_generate_unique_visit_id(self, origin_url: str, *, statement) -> int:
origin_sha1 = hash_url(origin_url)
next_id = self._origin_get_next_visit_id(origin_sha1)
while True:
res = list(
self._execute_with_retries(
statement, [next_id + 1, origin_sha1, next_id]
)
)
assert len(res) == 1
if res[0].applied:
# No data race
return next_id
else:
# Someone else updated it before we did, let's try again
next_id = res[0].next_visit_id
# TODO: abort after too many attempts
return next_id
##########################
# 'origin_visit' table
##########################
_origin_visit_keys = [
"origin",
"visit",
"type",
"date",
]
@_prepared_statement(
"SELECT * FROM origin_visit WHERE origin = ? AND visit > ? "
"ORDER BY visit ASC"
)
def _origin_visit_get_pagination_asc_no_limit(
self, origin_url: str, last_visit: int, *, statement
) -> ResultSet:
return self._execute_with_retries(statement, [origin_url, last_visit])
@_prepared_statement(
"SELECT * FROM origin_visit WHERE origin = ? AND visit > ? "
"ORDER BY visit ASC "
"LIMIT ?"
)
def _origin_visit_get_pagination_asc_limit(
self, origin_url: str, last_visit: int, limit: int, *, statement
) -> ResultSet:
return self._execute_with_retries(statement, [origin_url, last_visit, limit])
@_prepared_statement(
"SELECT * FROM origin_visit WHERE origin = ? AND visit < ? "
"ORDER BY visit DESC"
)
def _origin_visit_get_pagination_desc_no_limit(
self, origin_url: str, last_visit: int, *, statement
) -> ResultSet:
return self._execute_with_retries(statement, [origin_url, last_visit])
@_prepared_statement(
"SELECT * FROM origin_visit WHERE origin = ? AND visit < ? "
"ORDER BY visit DESC "
"LIMIT ?"
)
def _origin_visit_get_pagination_desc_limit(
self, origin_url: str, last_visit: int, limit: int, *, statement
) -> ResultSet:
return self._execute_with_retries(statement, [origin_url, last_visit, limit])
@_prepared_statement(
"SELECT * FROM origin_visit WHERE origin = ? ORDER BY visit ASC LIMIT ?"
)
def _origin_visit_get_no_pagination_asc_limit(
self, origin_url: str, limit: int, *, statement
) -> ResultSet:
return self._execute_with_retries(statement, [origin_url, limit])
@_prepared_statement(
"SELECT * FROM origin_visit WHERE origin = ? ORDER BY visit ASC "
)
def _origin_visit_get_no_pagination_asc_no_limit(
self, origin_url: str, *, statement
) -> ResultSet:
return self._execute_with_retries(statement, [origin_url])
@_prepared_statement(
"SELECT * FROM origin_visit WHERE origin = ? ORDER BY visit DESC"
)
def _origin_visit_get_no_pagination_desc_no_limit(
self, origin_url: str, *, statement
) -> ResultSet:
return self._execute_with_retries(statement, [origin_url])
@_prepared_statement(
"SELECT * FROM origin_visit WHERE origin = ? ORDER BY visit DESC LIMIT ?"
)
def _origin_visit_get_no_pagination_desc_limit(
self, origin_url: str, limit: int, *, statement
) -> ResultSet:
return self._execute_with_retries(statement, [origin_url, limit])
def origin_visit_get(
self,
origin_url: str,
last_visit: Optional[int],
limit: Optional[int],
- order: str = "asc",
+ order: ListOrder,
) -> ResultSet:
- order = order.lower()
- assert order in ["asc", "desc"]
-
args: List[Any] = [origin_url]
if last_visit is not None:
page_name = "pagination"
args.append(last_visit)
else:
page_name = "no_pagination"
if limit is not None:
limit_name = "limit"
args.append(limit)
else:
limit_name = "no_limit"
- method_name = f"_origin_visit_get_{page_name}_{order}_{limit_name}"
+ method_name = f"_origin_visit_get_{page_name}_{order.value}_{limit_name}"
origin_visit_get_method = getattr(self, method_name)
return origin_visit_get_method(*args)
@_prepared_insert_statement("origin_visit", _origin_visit_keys)
def origin_visit_add_one(self, visit: OriginVisit, *, statement) -> None:
self._add_one(statement, "origin_visit", visit, self._origin_visit_keys)
_origin_visit_status_keys = [
"origin",
"visit",
"date",
"status",
"snapshot",
"metadata",
]
@_prepared_insert_statement("origin_visit_status", _origin_visit_status_keys)
def origin_visit_status_add_one(
self, visit_update: OriginVisitStatus, *, statement
) -> None:
assert self._origin_visit_status_keys[-1] == "metadata"
keys = self._origin_visit_status_keys
metadata = json.dumps(
dict(visit_update.metadata) if visit_update.metadata is not None else None
)
self._execute_with_retries(
statement, [getattr(visit_update, key) for key in keys[:-1]] + [metadata]
)
def origin_visit_status_get_latest(self, origin: str, visit: int,) -> Optional[Row]:
"""Given an origin visit id, return its latest origin_visit_status
"""
rows = self.origin_visit_status_get(origin, visit)
return rows[0] if rows else None
@_prepared_statement(
"SELECT * FROM origin_visit_status "
"WHERE origin = ? AND visit = ? "
"ORDER BY date DESC"
)
def origin_visit_status_get(
self,
origin: str,
visit: int,
allowed_statuses: Optional[List[str]] = None,
require_snapshot: bool = False,
*,
statement,
) -> List[Row]:
"""Return all origin visit statuses for a given visit
"""
return list(self._execute_with_retries(statement, [origin, visit]))
@_prepared_statement("SELECT * FROM origin_visit WHERE origin = ? AND visit = ?")
def origin_visit_get_one(
self, origin_url: str, visit_id: int, *, statement
) -> Optional[Row]:
# TODO: error handling
rows = list(self._execute_with_retries(statement, [origin_url, visit_id]))
if rows:
return rows[0]
else:
return None
@_prepared_statement("SELECT * FROM origin_visit WHERE origin = ?")
def origin_visit_get_all(self, origin_url: str, *, statement) -> ResultSet:
return self._execute_with_retries(statement, [origin_url])
@_prepared_statement("SELECT * FROM origin_visit WHERE token(origin) >= ?")
def _origin_visit_iter_from(self, min_token: int, *, statement) -> Iterator[Row]:
yield from self._execute_with_retries(statement, [min_token])
@_prepared_statement("SELECT * FROM origin_visit WHERE token(origin) < ?")
def _origin_visit_iter_to(self, max_token: int, *, statement) -> Iterator[Row]:
yield from self._execute_with_retries(statement, [max_token])
def origin_visit_iter(self, start_token: int) -> Iterator[Row]:
"""Returns all origin visits in order from this token,
and wraps around the token space."""
yield from self._origin_visit_iter_from(start_token)
yield from self._origin_visit_iter_to(start_token)
##########################
# 'metadata_authority' table
##########################
_metadata_authority_keys = ["url", "type", "metadata"]
@_prepared_insert_statement("metadata_authority", _metadata_authority_keys)
def metadata_authority_add(self, url, type, metadata, *, statement):
return self._execute_with_retries(statement, [url, type, metadata])
@_prepared_statement("SELECT * from metadata_authority WHERE type = ? AND url = ?")
def metadata_authority_get(self, type, url, *, statement) -> Optional[Row]:
return next(iter(self._execute_with_retries(statement, [type, url])), None)
##########################
# 'metadata_fetcher' table
##########################
_metadata_fetcher_keys = ["name", "version", "metadata"]
@_prepared_insert_statement("metadata_fetcher", _metadata_fetcher_keys)
def metadata_fetcher_add(self, name, version, metadata, *, statement):
return self._execute_with_retries(statement, [name, version, metadata])
@_prepared_statement(
"SELECT * from metadata_fetcher WHERE name = ? AND version = ?"
)
def metadata_fetcher_get(self, name, version, *, statement) -> Optional[Row]:
return next(iter(self._execute_with_retries(statement, [name, version])), None)
#########################
# 'raw_extrinsic_metadata' table
#########################
_raw_extrinsic_metadata_keys = [
"type",
"id",
"authority_type",
"authority_url",
"discovery_date",
"fetcher_name",
"fetcher_version",
"format",
"metadata",
"origin",
"visit",
"snapshot",
"release",
"revision",
"path",
"directory",
]
@_prepared_statement(
f"INSERT INTO raw_extrinsic_metadata "
f" ({', '.join(_raw_extrinsic_metadata_keys)}) "
f"VALUES ({', '.join('?' for _ in _raw_extrinsic_metadata_keys)})"
)
def raw_extrinsic_metadata_add(
self, statement, **kwargs,
):
assert set(kwargs) == set(
self._raw_extrinsic_metadata_keys
), f"Bad kwargs: {set(kwargs)}"
params = [kwargs[key] for key in self._raw_extrinsic_metadata_keys]
return self._execute_with_retries(statement, params,)
@_prepared_statement(
"SELECT * from raw_extrinsic_metadata "
"WHERE id=? AND authority_url=? AND discovery_date>? AND authority_type=?"
)
def raw_extrinsic_metadata_get_after_date(
self,
id: str,
authority_type: str,
authority_url: str,
after: datetime.datetime,
*,
statement,
):
return self._execute_with_retries(
statement, [id, authority_url, after, authority_type]
)
@_prepared_statement(
"SELECT * from raw_extrinsic_metadata "
"WHERE id=? AND authority_type=? AND authority_url=? "
"AND (discovery_date, fetcher_name, fetcher_version) > (?, ?, ?)"
)
def raw_extrinsic_metadata_get_after_date_and_fetcher(
self,
id: str,
authority_type: str,
authority_url: str,
after_date: datetime.datetime,
after_fetcher_name: str,
after_fetcher_version: str,
*,
statement,
):
return self._execute_with_retries(
statement,
[
id,
authority_type,
authority_url,
after_date,
after_fetcher_name,
after_fetcher_version,
],
)
@_prepared_statement(
"SELECT * from raw_extrinsic_metadata "
"WHERE id=? AND authority_url=? AND authority_type=?"
)
def raw_extrinsic_metadata_get(
self, id: str, authority_type: str, authority_url: str, *, statement
) -> Iterable[Row]:
return self._execute_with_retries(
statement, [id, authority_url, authority_type]
)
##########################
# Miscellaneous
##########################
@_prepared_statement("SELECT uuid() FROM revision LIMIT 1;")
def check_read(self, *, statement):
self._execute_with_retries(statement, [])
@_prepared_statement(
"SELECT object_type, count FROM object_count WHERE partition_key=0"
)
def stat_counters(self, *, statement) -> ResultSet:
return self._execute_with_retries(statement, [])
diff --git a/swh/storage/cassandra/storage.py b/swh/storage/cassandra/storage.py
index 6f2c581d..de2d9a3d 100644
--- a/swh/storage/cassandra/storage.py
+++ b/swh/storage/cassandra/storage.py
@@ -1,1190 +1,1186 @@
# Copyright (C) 2019-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import datetime
import itertools
import json
import random
import re
from typing import Any, Dict, List, Iterable, Optional, Tuple, Union
import attr
from swh.core.api.serializers import msgpack_loads, msgpack_dumps
from swh.model.identifiers import parse_swhid, SWHID
from swh.model.hashutil import DEFAULT_ALGORITHMS
from swh.model.model import (
Revision,
Release,
Directory,
DirectoryEntry,
Content,
SkippedContent,
OriginVisit,
OriginVisitStatus,
Snapshot,
Origin,
MetadataAuthority,
MetadataAuthorityType,
MetadataFetcher,
MetadataTargetType,
RawExtrinsicMetadata,
)
-from swh.storage.interface import PagedResult
+from swh.storage.interface import ListOrder, PagedResult
from swh.storage.objstorage import ObjStorage
from swh.storage.writer import JournalWriter
from swh.storage.utils import map_optional, now
from ..exc import StorageArgumentException, HashCollision
from .common import TOKEN_BEGIN, TOKEN_END
from . import converters
from .cql import CqlRunner
from .schema import HASH_ALGORITHMS
# Max block size of contents to return
BULK_BLOCK_CONTENT_LEN_MAX = 10000
class CassandraStorage:
def __init__(self, hosts, keyspace, objstorage, port=9042, journal_writer=None):
self._cql_runner = CqlRunner(hosts, keyspace, port)
self.journal_writer = JournalWriter(journal_writer)
self.objstorage = ObjStorage(objstorage)
def check_config(self, *, check_write):
self._cql_runner.check_read()
return True
def _content_get_from_hash(self, algo, hash_) -> Iterable:
"""From the name of a hash algorithm and a value of that hash,
looks up the "hash -> token" secondary table (content_by_{algo})
to get tokens.
Then, looks up the main table (content) to get all contents with
that token, and filters out contents whose hash doesn't match."""
found_tokens = self._cql_runner.content_get_tokens_from_single_hash(algo, hash_)
for token in found_tokens:
# Query the main table ('content').
res = self._cql_runner.content_get_from_token(token)
for row in res:
# re-check the the hash (in case of murmur3 collision)
if getattr(row, algo) == hash_:
yield row
def _content_add(self, contents: List[Content], with_data: bool) -> Dict:
# Filter-out content already in the database.
contents = [
c for c in contents if not self._cql_runner.content_get_from_pk(c.to_dict())
]
self.journal_writer.content_add(contents)
if with_data:
# First insert to the objstorage, if the endpoint is
# `content_add` (as opposed to `content_add_metadata`).
# TODO: this should probably be done in concurrently to inserting
# in index tables (but still before the main table; so an entry is
# only added to the main table after everything else was
# successfully inserted.
summary = self.objstorage.content_add(
c for c in contents if c.status != "absent"
)
content_add_bytes = summary["content:add:bytes"]
content_add = 0
for content in contents:
content_add += 1
# Check for sha1 or sha1_git collisions. This test is not atomic
# with the insertion, so it won't detect a collision if both
# contents are inserted at the same time, but it's good enough.
#
# The proper way to do it would probably be a BATCH, but this
# would be inefficient because of the number of partitions we
# need to affect (len(HASH_ALGORITHMS)+1, which is currently 5)
for algo in {"sha1", "sha1_git"}:
collisions = []
# Get tokens of 'content' rows with the same value for
# sha1/sha1_git
rows = self._content_get_from_hash(algo, content.get_hash(algo))
for row in rows:
if getattr(row, algo) != content.get_hash(algo):
# collision of token(partition key), ignore this
# row
continue
for algo in HASH_ALGORITHMS:
if getattr(row, algo) != content.get_hash(algo):
# This hash didn't match; discard the row.
collisions.append(
{algo: getattr(row, algo) for algo in HASH_ALGORITHMS}
)
if collisions:
collisions.append(content.hashes())
raise HashCollision(algo, content.get_hash(algo), collisions)
(token, insertion_finalizer) = self._cql_runner.content_add_prepare(content)
# Then add to index tables
for algo in HASH_ALGORITHMS:
self._cql_runner.content_index_add_one(algo, content, token)
# Then to the main table
insertion_finalizer()
summary = {
"content:add": content_add,
}
if with_data:
summary["content:add:bytes"] = content_add_bytes
return summary
def content_add(self, content: Iterable[Content]) -> Dict:
contents = [attr.evolve(c, ctime=now()) for c in content]
return self._content_add(list(contents), with_data=True)
def content_update(self, content, keys=[]):
raise NotImplementedError(
"content_update is not supported by the Cassandra backend"
)
def content_add_metadata(self, content: Iterable[Content]) -> Dict:
return self._content_add(list(content), with_data=False)
def content_get(self, content):
if len(content) > BULK_BLOCK_CONTENT_LEN_MAX:
raise StorageArgumentException(
"Sending at most %s contents." % BULK_BLOCK_CONTENT_LEN_MAX
)
yield from self.objstorage.content_get(content)
def content_get_partition(
self,
partition_id: int,
nb_partitions: int,
limit: int = 1000,
page_token: str = None,
):
if limit is None:
raise StorageArgumentException("limit should not be None")
# Compute start and end of the range of tokens covered by the
# requested partition
partition_size = (TOKEN_END - TOKEN_BEGIN) // nb_partitions
range_start = TOKEN_BEGIN + partition_id * partition_size
range_end = TOKEN_BEGIN + (partition_id + 1) * partition_size
# offset the range start according to the `page_token`.
if page_token is not None:
if not (range_start <= int(page_token) <= range_end):
raise StorageArgumentException("Invalid page_token.")
range_start = int(page_token)
# Get the first rows of the range
rows = self._cql_runner.content_get_token_range(range_start, range_end, limit)
rows = list(rows)
if len(rows) == limit:
next_page_token: Optional[str] = str(rows[-1].tok + 1)
else:
next_page_token = None
return {
"contents": [row._asdict() for row in rows if row.status != "absent"],
"next_page_token": next_page_token,
}
def content_get_metadata(self, contents: List[bytes]) -> Dict[bytes, List[Dict]]:
result: Dict[bytes, List[Dict]] = {sha1: [] for sha1 in contents}
for sha1 in contents:
# Get all (sha1, sha1_git, sha256, blake2s256) whose sha1
# matches the argument, from the index table ('content_by_sha1')
for row in self._content_get_from_hash("sha1", sha1):
content_metadata = row._asdict()
content_metadata.pop("ctime")
result[content_metadata["sha1"]].append(content_metadata)
return result
def content_find(self, content):
# Find an algorithm that is common to all the requested contents.
# It will be used to do an initial filtering efficiently.
filter_algos = list(set(content).intersection(HASH_ALGORITHMS))
if not filter_algos:
raise StorageArgumentException(
"content keys must contain at least one of: "
"%s" % ", ".join(sorted(HASH_ALGORITHMS))
)
common_algo = filter_algos[0]
results = []
rows = self._content_get_from_hash(common_algo, content[common_algo])
for row in rows:
# Re-check all the hashes, in case of collisions (either of the
# hash of the partition key, or the hashes in it)
for algo in HASH_ALGORITHMS:
if content.get(algo) and getattr(row, algo) != content[algo]:
# This hash didn't match; discard the row.
break
else:
# All hashes match, keep this row.
results.append(
{
**row._asdict(),
"ctime": row.ctime.replace(tzinfo=datetime.timezone.utc),
}
)
return results
def content_missing(self, content, key_hash="sha1"):
for cont in content:
res = self.content_find(cont)
if not res:
yield cont[key_hash]
if any(c["status"] == "missing" for c in res):
yield cont[key_hash]
def content_missing_per_sha1(self, contents):
return self.content_missing([{"sha1": c for c in contents}])
def content_missing_per_sha1_git(self, contents):
return self.content_missing(
[{"sha1_git": c for c in contents}], key_hash="sha1_git"
)
def content_get_random(self):
return self._cql_runner.content_get_random().sha1_git
def _skipped_content_get_from_hash(self, algo, hash_) -> Iterable:
"""From the name of a hash algorithm and a value of that hash,
looks up the "hash -> token" secondary table
(skipped_content_by_{algo}) to get tokens.
Then, looks up the main table (content) to get all contents with
that token, and filters out contents whose hash doesn't match."""
found_tokens = self._cql_runner.skipped_content_get_tokens_from_single_hash(
algo, hash_
)
for token in found_tokens:
# Query the main table ('content').
res = self._cql_runner.skipped_content_get_from_token(token)
for row in res:
# re-check the the hash (in case of murmur3 collision)
if getattr(row, algo) == hash_:
yield row
def _skipped_content_add(self, contents: Iterable[SkippedContent]) -> Dict:
# Filter-out content already in the database.
contents = [
c
for c in contents
if not self._cql_runner.skipped_content_get_from_pk(c.to_dict())
]
self.journal_writer.skipped_content_add(contents)
for content in contents:
# Compute token of the row in the main table
(token, insertion_finalizer) = self._cql_runner.skipped_content_add_prepare(
content
)
# Then add to index tables
for algo in HASH_ALGORITHMS:
self._cql_runner.skipped_content_index_add_one(algo, content, token)
# Then to the main table
insertion_finalizer()
return {"skipped_content:add": len(contents)}
def skipped_content_add(self, content: Iterable[SkippedContent]) -> Dict:
contents = [attr.evolve(c, ctime=now()) for c in content]
return self._skipped_content_add(contents)
def skipped_content_missing(self, contents):
for content in contents:
if not self._cql_runner.skipped_content_get_from_pk(content):
yield {algo: content[algo] for algo in DEFAULT_ALGORITHMS}
def directory_add(self, directories: Iterable[Directory]) -> Dict:
directories = list(directories)
# Filter out directories that are already inserted.
missing = self.directory_missing([dir_.id for dir_ in directories])
directories = [dir_ for dir_ in directories if dir_.id in missing]
self.journal_writer.directory_add(directories)
for directory in directories:
# Add directory entries to the 'directory_entry' table
for entry in directory.entries:
self._cql_runner.directory_entry_add_one(
{**entry.to_dict(), "directory_id": directory.id}
)
# Add the directory *after* adding all the entries, so someone
# calling snapshot_get_branch in the meantime won't end up
# with half the entries.
self._cql_runner.directory_add_one(directory.id)
return {"directory:add": len(missing)}
def directory_missing(self, directories):
return self._cql_runner.directory_missing(directories)
def _join_dentry_to_content(self, dentry):
keys = (
"status",
"sha1",
"sha1_git",
"sha256",
"length",
)
ret = dict.fromkeys(keys)
ret.update(dentry.to_dict())
if ret["type"] == "file":
content = self.content_find({"sha1_git": ret["target"]})
if content:
content = content[0]
for key in keys:
ret[key] = content[key]
return ret
def _directory_ls(self, directory_id, recursive, prefix=b""):
if self.directory_missing([directory_id]):
return
rows = list(self._cql_runner.directory_entry_get([directory_id]))
for row in rows:
# Build and yield the directory entry dict
entry = row._asdict()
del entry["directory_id"]
entry = DirectoryEntry.from_dict(entry)
ret = self._join_dentry_to_content(entry)
ret["name"] = prefix + ret["name"]
ret["dir_id"] = directory_id
yield ret
if recursive and ret["type"] == "dir":
yield from self._directory_ls(
ret["target"], True, prefix + ret["name"] + b"/"
)
def directory_entry_get_by_path(self, directory, paths):
return self._directory_entry_get_by_path(directory, paths, b"")
def _directory_entry_get_by_path(self, directory, paths, prefix):
if not paths:
return
contents = list(self.directory_ls(directory))
if not contents:
return
def _get_entry(entries, name):
"""Finds the entry with the requested name, prepends the
prefix (to get its full path), and returns it.
If no entry has that name, returns None."""
for entry in entries:
if entry["name"] == name:
entry = entry.copy()
entry["name"] = prefix + entry["name"]
return entry
first_item = _get_entry(contents, paths[0])
if len(paths) == 1:
return first_item
if not first_item or first_item["type"] != "dir":
return
return self._directory_entry_get_by_path(
first_item["target"], paths[1:], prefix + paths[0] + b"/"
)
def directory_ls(self, directory, recursive=False):
yield from self._directory_ls(directory, recursive)
def directory_get_random(self):
return self._cql_runner.directory_get_random().id
def revision_add(self, revisions: Iterable[Revision]) -> Dict:
revisions = list(revisions)
# Filter-out revisions already in the database
missing = self.revision_missing([rev.id for rev in revisions])
revisions = [rev for rev in revisions if rev.id in missing]
self.journal_writer.revision_add(revisions)
for revision in revisions:
revobject = converters.revision_to_db(revision)
if revobject:
# Add parents first
for (rank, parent) in enumerate(revobject["parents"]):
self._cql_runner.revision_parent_add_one(
revobject["id"], rank, parent
)
# Then write the main revision row.
# Writing this after all parents were written ensures that
# read endpoints don't return a partial view while writing
# the parents
self._cql_runner.revision_add_one(revobject)
return {"revision:add": len(revisions)}
def revision_missing(self, revisions):
return self._cql_runner.revision_missing(revisions)
def revision_get(self, revisions):
rows = self._cql_runner.revision_get(revisions)
revs = {}
for row in rows:
# TODO: use a single query to get all parents?
# (it might have lower latency, but requires more code and more
# bandwidth, because revision id would be part of each returned
# row)
parent_rows = self._cql_runner.revision_parent_get(row.id)
# parent_rank is the clustering key, so results are already
# sorted by rank.
parents = tuple(row.parent_id for row in parent_rows)
rev = converters.revision_from_db(row, parents=parents)
revs[rev.id] = rev.to_dict()
for rev_id in revisions:
yield revs.get(rev_id)
def _get_parent_revs(self, rev_ids, seen, limit, short):
if limit and len(seen) >= limit:
return
rev_ids = [id_ for id_ in rev_ids if id_ not in seen]
if not rev_ids:
return
seen |= set(rev_ids)
# We need this query, even if short=True, to return consistent
# results (ie. not return only a subset of a revision's parents
# if it is being written)
if short:
rows = self._cql_runner.revision_get_ids(rev_ids)
else:
rows = self._cql_runner.revision_get(rev_ids)
for row in rows:
# TODO: use a single query to get all parents?
# (it might have less latency, but requires less code and more
# bandwidth (because revision id would be part of each returned
# row)
parent_rows = self._cql_runner.revision_parent_get(row.id)
# parent_rank is the clustering key, so results are already
# sorted by rank.
parents = tuple(row.parent_id for row in parent_rows)
if short:
yield (row.id, parents)
else:
rev = converters.revision_from_db(row, parents=parents)
yield rev.to_dict()
yield from self._get_parent_revs(parents, seen, limit, short)
def revision_log(self, revisions, limit=None):
seen = set()
yield from self._get_parent_revs(revisions, seen, limit, False)
def revision_shortlog(self, revisions, limit=None):
seen = set()
yield from self._get_parent_revs(revisions, seen, limit, True)
def revision_get_random(self):
return self._cql_runner.revision_get_random().id
def release_add(self, releases: Iterable[Release]) -> Dict:
to_add = []
for rel in releases:
if rel not in to_add:
to_add.append(rel)
missing = set(self.release_missing([rel.id for rel in to_add]))
to_add = [rel for rel in to_add if rel.id in missing]
self.journal_writer.release_add(to_add)
for release in to_add:
if release:
self._cql_runner.release_add_one(converters.release_to_db(release))
return {"release:add": len(to_add)}
def release_missing(self, releases):
return self._cql_runner.release_missing(releases)
def release_get(self, releases):
rows = self._cql_runner.release_get(releases)
rels = {}
for row in rows:
release = converters.release_from_db(row)
rels[row.id] = release.to_dict()
for rel_id in releases:
yield rels.get(rel_id)
def release_get_random(self):
return self._cql_runner.release_get_random().id
def snapshot_add(self, snapshots: Iterable[Snapshot]) -> Dict:
snapshots = list(snapshots)
missing = self._cql_runner.snapshot_missing([snp.id for snp in snapshots])
snapshots = [snp for snp in snapshots if snp.id in missing]
for snapshot in snapshots:
self.journal_writer.snapshot_add([snapshot])
# Add branches
for (branch_name, branch) in snapshot.branches.items():
if branch is None:
target_type = None
target = None
else:
target_type = branch.target_type.value
target = branch.target
self._cql_runner.snapshot_branch_add_one(
{
"snapshot_id": snapshot.id,
"name": branch_name,
"target_type": target_type,
"target": target,
}
)
# Add the snapshot *after* adding all the branches, so someone
# calling snapshot_get_branch in the meantime won't end up
# with half the branches.
self._cql_runner.snapshot_add_one(snapshot.id)
return {"snapshot:add": len(snapshots)}
def snapshot_missing(self, snapshots):
return self._cql_runner.snapshot_missing(snapshots)
def snapshot_get(self, snapshot_id):
return self.snapshot_get_branches(snapshot_id)
def snapshot_get_by_origin_visit(self, origin, visit):
visit_status = self.origin_visit_status_get_latest(
origin, visit, require_snapshot=True
)
if not visit_status:
return None
return self.snapshot_get(visit_status.snapshot)
def snapshot_count_branches(self, snapshot_id):
if self._cql_runner.snapshot_missing([snapshot_id]):
# Makes sure we don't fetch branches for a snapshot that is
# being added.
return None
rows = list(self._cql_runner.snapshot_count_branches(snapshot_id))
assert len(rows) == 1
(nb_none, counts) = rows[0].counts
counts = dict(counts)
if nb_none:
counts[None] = nb_none
return counts
def snapshot_get_branches(
self, snapshot_id, branches_from=b"", branches_count=1000, target_types=None
):
if self._cql_runner.snapshot_missing([snapshot_id]):
# Makes sure we don't fetch branches for a snapshot that is
# being added.
return None
branches = []
while len(branches) < branches_count + 1:
new_branches = list(
self._cql_runner.snapshot_branch_get(
snapshot_id, branches_from, branches_count + 1
)
)
if not new_branches:
break
branches_from = new_branches[-1].name
new_branches_filtered = new_branches
# Filter by target_type
if target_types:
new_branches_filtered = [
branch
for branch in new_branches_filtered
if branch.target is not None and branch.target_type in target_types
]
branches.extend(new_branches_filtered)
if len(new_branches) < branches_count + 1:
break
if len(branches) > branches_count:
last_branch = branches.pop(-1).name
else:
last_branch = None
branches = {
branch.name: {"target": branch.target, "target_type": branch.target_type,}
if branch.target
else None
for branch in branches
}
return {
"id": snapshot_id,
"branches": branches,
"next_branch": last_branch,
}
def snapshot_get_random(self):
return self._cql_runner.snapshot_get_random().id
def object_find_by_sha1_git(self, ids):
results = {id_: [] for id_ in ids}
missing_ids = set(ids)
# Mind the order, revision is the most likely one for a given ID,
# so we check revisions first.
queries = [
("revision", self._cql_runner.revision_missing),
("release", self._cql_runner.release_missing),
("content", self._cql_runner.content_missing_by_sha1_git),
("directory", self._cql_runner.directory_missing),
]
for (object_type, query_fn) in queries:
found_ids = missing_ids - set(query_fn(missing_ids))
for sha1_git in found_ids:
results[sha1_git].append(
{"sha1_git": sha1_git, "type": object_type,}
)
missing_ids.remove(sha1_git)
if not missing_ids:
# We found everything, skipping the next queries.
break
return results
def origin_get(self, origins: Iterable[str]) -> Iterable[Optional[Origin]]:
return [self.origin_get_one(origin) for origin in origins]
def origin_get_one(self, origin_url: str) -> Optional[Origin]:
"""Given an origin url, return the origin if it exists, None otherwise
"""
rows = list(self._cql_runner.origin_get_by_url(origin_url))
if rows:
assert len(rows) == 1
return Origin(url=rows[0].url)
else:
return None
def origin_get_by_sha1(self, sha1s):
results = []
for sha1 in sha1s:
rows = self._cql_runner.origin_get_by_sha1(sha1)
if rows:
results.append({"url": rows.one().url})
else:
results.append(None)
return results
def origin_list(self, page_token: Optional[str] = None, limit: int = 100) -> dict:
# Compute what token to begin the listing from
start_token = TOKEN_BEGIN
if page_token:
start_token = int(page_token)
if not (TOKEN_BEGIN <= start_token <= TOKEN_END):
raise StorageArgumentException("Invalid page_token.")
rows = self._cql_runner.origin_list(start_token, limit)
rows = list(rows)
if len(rows) == limit:
next_page_token: Optional[str] = str(rows[-1].tok + 1)
else:
next_page_token = None
return {
"origins": [{"url": row.url} for row in rows],
"next_page_token": next_page_token,
}
def origin_search(
self, url_pattern, offset=0, limit=50, regexp=False, with_visit=False
):
# TODO: remove this endpoint, swh-search should be used instead.
origins = self._cql_runner.origin_iter_all()
if regexp:
pat = re.compile(url_pattern)
origins = [orig for orig in origins if pat.search(orig.url)]
else:
origins = [orig for orig in origins if url_pattern in orig.url]
if with_visit:
origins = [orig for orig in origins if orig.next_visit_id > 1]
return [{"url": orig.url,} for orig in origins[offset : offset + limit]]
def origin_add(self, origins: Iterable[Origin]) -> Dict[str, int]:
origins = list(origins)
to_add = [ori for ori in origins if self.origin_get_one(ori.url) is None]
self.journal_writer.origin_add(to_add)
for origin in to_add:
self._cql_runner.origin_add_one(origin)
return {"origin:add": len(to_add)}
def origin_visit_add(self, visits: Iterable[OriginVisit]) -> Iterable[OriginVisit]:
for visit in visits:
origin = self.origin_get_one(visit.origin)
if not origin: # Cannot add a visit without an origin
raise StorageArgumentException("Unknown origin %s", visit.origin)
all_visits = []
nb_visits = 0
for visit in visits:
nb_visits += 1
if not visit.visit:
visit_id = self._cql_runner.origin_generate_unique_visit_id(
visit.origin
)
visit = attr.evolve(visit, visit=visit_id)
self.journal_writer.origin_visit_add([visit])
self._cql_runner.origin_visit_add_one(visit)
assert visit.visit is not None
all_visits.append(visit)
self._origin_visit_status_add(
OriginVisitStatus(
origin=visit.origin,
visit=visit.visit,
date=visit.date,
status="created",
snapshot=None,
)
)
return all_visits
def _origin_visit_status_add(self, visit_status: OriginVisitStatus) -> None:
"""Add an origin visit status"""
self.journal_writer.origin_visit_status_add([visit_status])
self._cql_runner.origin_visit_status_add_one(visit_status)
def origin_visit_status_add(
self, visit_statuses: Iterable[OriginVisitStatus]
) -> None:
# First round to check existence (fail early if any is ko)
for visit_status in visit_statuses:
origin_url = self.origin_get_one(visit_status.origin)
if not origin_url:
raise StorageArgumentException(f"Unknown origin {visit_status.origin}")
for visit_status in visit_statuses:
self._origin_visit_status_add(visit_status)
def _origin_visit_apply_last_status(self, visit: Dict[str, Any]) -> Dict[str, Any]:
"""Retrieve the latest visit status information for the origin visit.
Then merge it with the visit and return it.
"""
row = self._cql_runner.origin_visit_status_get_latest(
visit["origin"], visit["visit"]
)
assert row is not None
visit_status = converters.row_to_visit_status(row)
return {
# default to the values in visit
**visit,
# override with the last update
**visit_status.to_dict(),
# visit['origin'] is the URL (via a join), while
# visit_status['origin'] is only an id.
"origin": visit["origin"],
# but keep the date of the creation of the origin visit
"date": visit["date"],
}
def _origin_visit_get_latest_status(self, visit: OriginVisit) -> OriginVisitStatus:
"""Retrieve the latest visit status information for the origin visit object.
"""
row = self._cql_runner.origin_visit_status_get_latest(visit.origin, visit.visit)
assert row is not None
visit_status = converters.row_to_visit_status(row)
return attr.evolve(visit_status, origin=visit.origin)
@staticmethod
def _format_origin_visit_row(visit):
return {
**visit._asdict(),
"origin": visit.origin,
"date": visit.date.replace(tzinfo=datetime.timezone.utc),
}
def origin_visit_get(
self,
origin: str,
page_token: Optional[str] = None,
- order: str = "asc",
+ order: ListOrder = ListOrder.ASC,
limit: int = 10,
) -> PagedResult[OriginVisit]:
- order = order.lower()
- allowed_orders = ["asc", "desc"]
- if order not in allowed_orders:
- raise StorageArgumentException(
- f"order must be one of {', '.join(allowed_orders)}."
- )
+ if not isinstance(order, ListOrder):
+ raise StorageArgumentException("order must be a ListOrder value")
if page_token and not isinstance(page_token, str):
raise StorageArgumentException("page_token must be a string.")
next_page_token = None
visit_from = page_token and int(page_token)
visits: List[OriginVisit] = []
extra_limit = limit + 1
rows = self._cql_runner.origin_visit_get(origin, visit_from, extra_limit, order)
for row in rows:
visits.append(converters.row_to_visit(row))
assert len(visits) <= extra_limit
if len(visits) == extra_limit:
last_visit = visits[limit]
visits = visits[:limit]
assert last_visit is not None and last_visit.visit is not None
- if order == "asc":
+ if order == ListOrder.ASC:
next_page_token = str(last_visit.visit - 1)
else:
next_page_token = str(last_visit.visit + 1)
return PagedResult(results=visits, next_page_token=next_page_token)
def origin_visit_find_by_date(
self, origin: str, visit_date: datetime.datetime
) -> Optional[OriginVisit]:
# Iterator over all the visits of the origin
# This should be ok for now, as there aren't too many visits
# per origin.
rows = list(self._cql_runner.origin_visit_get_all(origin))
def key(visit):
dt = visit.date.replace(tzinfo=datetime.timezone.utc) - visit_date
return (abs(dt), -visit.visit)
if rows:
return converters.row_to_visit(min(rows, key=key))
return None
def origin_visit_get_by(self, origin: str, visit: int) -> Optional[OriginVisit]:
row = self._cql_runner.origin_visit_get_one(origin, visit)
if row:
return converters.row_to_visit(row)
return None
def origin_visit_get_latest(
self,
origin: str,
type: Optional[str] = None,
allowed_statuses: Optional[List[str]] = None,
require_snapshot: bool = False,
) -> Optional[OriginVisit]:
# TODO: Do not fetch all visits
rows = self._cql_runner.origin_visit_get_all(origin)
latest_visit = None
for row in rows:
visit = self._format_origin_visit_row(row)
updated_visit = self._origin_visit_apply_last_status(visit)
if type is not None and updated_visit["type"] != type:
continue
if allowed_statuses and updated_visit["status"] not in allowed_statuses:
continue
if require_snapshot and updated_visit["snapshot"] is None:
continue
# updated_visit is a candidate
if latest_visit is not None:
if updated_visit["date"] < latest_visit["date"]:
continue
if updated_visit["visit"] < latest_visit["visit"]:
continue
latest_visit = updated_visit
if latest_visit is None:
return None
return OriginVisit(
origin=latest_visit["origin"],
visit=latest_visit["visit"],
date=latest_visit["date"],
type=latest_visit["type"],
)
def origin_visit_status_get_latest(
self,
origin_url: str,
visit: int,
allowed_statuses: Optional[List[str]] = None,
require_snapshot: bool = False,
) -> Optional[OriginVisitStatus]:
rows = self._cql_runner.origin_visit_status_get(
origin_url, visit, allowed_statuses, require_snapshot
)
# filtering is done python side as we cannot do it server side
if allowed_statuses:
rows = [row for row in rows if row.status in allowed_statuses]
if require_snapshot:
rows = [row for row in rows if row.snapshot is not None]
if not rows:
return None
return converters.row_to_visit_status(rows[0])
def origin_visit_status_get_random(
self, type: str
) -> Optional[Tuple[OriginVisit, OriginVisitStatus]]:
back_in_the_day = now() - datetime.timedelta(weeks=12) # 3 months back
# Random position to start iteration at
start_token = random.randint(TOKEN_BEGIN, TOKEN_END)
# Iterator over all visits, ordered by token(origins) then visit_id
rows = self._cql_runner.origin_visit_iter(start_token)
for row in rows:
visit = converters.row_to_visit(row)
visit_status = self._origin_visit_get_latest_status(visit)
if visit.date > back_in_the_day and visit_status.status == "full":
return visit, visit_status
return None
def stat_counters(self):
rows = self._cql_runner.stat_counters()
keys = (
"content",
"directory",
"origin",
"origin_visit",
"release",
"revision",
"skipped_content",
"snapshot",
)
stats = {key: 0 for key in keys}
stats.update({row.object_type: row.count for row in rows})
return stats
def refresh_stat_counters(self):
pass
def raw_extrinsic_metadata_add(
self, metadata: Iterable[RawExtrinsicMetadata]
) -> None:
metadata = list(metadata)
self.journal_writer.raw_extrinsic_metadata_add(metadata)
for metadata_entry in metadata:
if not self._cql_runner.metadata_authority_get(
metadata_entry.authority.type.value, metadata_entry.authority.url
):
raise StorageArgumentException(
f"Unknown authority {metadata_entry.authority}"
)
if not self._cql_runner.metadata_fetcher_get(
metadata_entry.fetcher.name, metadata_entry.fetcher.version
):
raise StorageArgumentException(
f"Unknown fetcher {metadata_entry.fetcher}"
)
try:
self._cql_runner.raw_extrinsic_metadata_add(
type=metadata_entry.type.value,
id=str(metadata_entry.id),
authority_type=metadata_entry.authority.type.value,
authority_url=metadata_entry.authority.url,
discovery_date=metadata_entry.discovery_date,
fetcher_name=metadata_entry.fetcher.name,
fetcher_version=metadata_entry.fetcher.version,
format=metadata_entry.format,
metadata=metadata_entry.metadata,
origin=metadata_entry.origin,
visit=metadata_entry.visit,
snapshot=map_optional(str, metadata_entry.snapshot),
release=map_optional(str, metadata_entry.release),
revision=map_optional(str, metadata_entry.revision),
path=metadata_entry.path,
directory=map_optional(str, metadata_entry.directory),
)
except TypeError as e:
raise StorageArgumentException(*e.args)
def raw_extrinsic_metadata_get(
self,
object_type: MetadataTargetType,
id: Union[str, SWHID],
authority: MetadataAuthority,
after: Optional[datetime.datetime] = None,
page_token: Optional[bytes] = None,
limit: int = 1000,
) -> Dict[str, Union[Optional[bytes], List[RawExtrinsicMetadata]]]:
if object_type == MetadataTargetType.ORIGIN:
if isinstance(id, SWHID):
raise StorageArgumentException(
f"raw_extrinsic_metadata_get called with object_type='origin', "
f"but provided id is an SWHID: {id!r}"
)
else:
if not isinstance(id, SWHID):
raise StorageArgumentException(
f"raw_extrinsic_metadata_get called with object_type!='origin', "
f"but provided id is not an SWHID: {id!r}"
)
if page_token is not None:
(after_date, after_fetcher_name, after_fetcher_url) = msgpack_loads(
page_token
)
if after and after_date < after:
raise StorageArgumentException(
"page_token is inconsistent with the value of 'after'."
)
entries = self._cql_runner.raw_extrinsic_metadata_get_after_date_and_fetcher( # noqa
str(id),
authority.type.value,
authority.url,
after_date,
after_fetcher_name,
after_fetcher_url,
)
elif after is not None:
entries = self._cql_runner.raw_extrinsic_metadata_get_after_date(
str(id), authority.type.value, authority.url, after
)
else:
entries = self._cql_runner.raw_extrinsic_metadata_get(
str(id), authority.type.value, authority.url
)
if limit:
entries = itertools.islice(entries, 0, limit + 1)
results = []
for entry in entries:
discovery_date = entry.discovery_date.replace(tzinfo=datetime.timezone.utc)
assert str(id) == entry.id
result = RawExtrinsicMetadata(
type=MetadataTargetType(entry.type),
id=id,
authority=MetadataAuthority(
type=MetadataAuthorityType(entry.authority_type),
url=entry.authority_url,
),
fetcher=MetadataFetcher(
name=entry.fetcher_name, version=entry.fetcher_version,
),
discovery_date=discovery_date,
format=entry.format,
metadata=entry.metadata,
origin=entry.origin,
visit=entry.visit,
snapshot=map_optional(parse_swhid, entry.snapshot),
release=map_optional(parse_swhid, entry.release),
revision=map_optional(parse_swhid, entry.revision),
path=entry.path,
directory=map_optional(parse_swhid, entry.directory),
)
results.append(result)
if len(results) > limit:
results.pop()
assert len(results) == limit
last_result = results[-1]
next_page_token: Optional[bytes] = msgpack_dumps(
(
last_result.discovery_date,
last_result.fetcher.name,
last_result.fetcher.version,
)
)
else:
next_page_token = None
return {
"next_page_token": next_page_token,
"results": results,
}
def metadata_fetcher_add(self, fetchers: Iterable[MetadataFetcher]) -> None:
fetchers = list(fetchers)
self.journal_writer.metadata_fetcher_add(fetchers)
for fetcher in fetchers:
self._cql_runner.metadata_fetcher_add(
fetcher.name,
fetcher.version,
json.dumps(map_optional(dict, fetcher.metadata)),
)
def metadata_fetcher_get(
self, name: str, version: str
) -> Optional[MetadataFetcher]:
fetcher = self._cql_runner.metadata_fetcher_get(name, version)
if fetcher:
return MetadataFetcher(
name=fetcher.name,
version=fetcher.version,
metadata=json.loads(fetcher.metadata),
)
else:
return None
def metadata_authority_add(self, authorities: Iterable[MetadataAuthority]) -> None:
authorities = list(authorities)
self.journal_writer.metadata_authority_add(authorities)
for authority in authorities:
self._cql_runner.metadata_authority_add(
authority.url,
authority.type.value,
json.dumps(map_optional(dict, authority.metadata)),
)
def metadata_authority_get(
self, type: MetadataAuthorityType, url: str
) -> Optional[MetadataAuthority]:
authority = self._cql_runner.metadata_authority_get(type.value, url)
if authority:
return MetadataAuthority(
type=MetadataAuthorityType(authority.type),
url=authority.url,
metadata=json.loads(authority.metadata),
)
else:
return None
def clear_buffers(self, object_types: Optional[Iterable[str]] = None) -> None:
"""Do nothing
"""
return None
def flush(self, object_types: Optional[Iterable[str]] = None) -> Dict:
return {}
diff --git a/swh/storage/db.py b/swh/storage/db.py
index 18bca405..88a56ae9 100644
--- a/swh/storage/db.py
+++ b/swh/storage/db.py
@@ -1,1334 +1,1334 @@
# Copyright (C) 2015-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import datetime
import random
import select
from typing import Any, Dict, Iterable, List, Optional, Tuple
from swh.core.db import BaseDb
from swh.core.db.db_utils import stored_procedure, jsonize as _jsonize
from swh.core.db.db_utils import execute_values_generator
from swh.model.model import OriginVisit, OriginVisitStatus, SHA1_SIZE
+from swh.storage.interface import ListOrder
def jsonize(d):
return _jsonize(dict(d) if d is not None else None)
class Db(BaseDb):
"""Proxy to the SWH DB, with wrappers around stored procedures
"""
def mktemp_dir_entry(self, entry_type, cur=None):
self._cursor(cur).execute(
"SELECT swh_mktemp_dir_entry(%s)", (("directory_entry_%s" % entry_type),)
)
@stored_procedure("swh_mktemp_revision")
def mktemp_revision(self, cur=None):
pass
@stored_procedure("swh_mktemp_release")
def mktemp_release(self, cur=None):
pass
@stored_procedure("swh_mktemp_snapshot_branch")
def mktemp_snapshot_branch(self, cur=None):
pass
def register_listener(self, notify_queue, cur=None):
"""Register a listener for NOTIFY queue `notify_queue`"""
self._cursor(cur).execute("LISTEN %s" % notify_queue)
def listen_notifies(self, timeout):
"""Listen to notifications for `timeout` seconds"""
if select.select([self.conn], [], [], timeout) == ([], [], []):
return
else:
self.conn.poll()
while self.conn.notifies:
yield self.conn.notifies.pop(0)
@stored_procedure("swh_content_add")
def content_add_from_temp(self, cur=None):
pass
@stored_procedure("swh_directory_add")
def directory_add_from_temp(self, cur=None):
pass
@stored_procedure("swh_skipped_content_add")
def skipped_content_add_from_temp(self, cur=None):
pass
@stored_procedure("swh_revision_add")
def revision_add_from_temp(self, cur=None):
pass
@stored_procedure("swh_release_add")
def release_add_from_temp(self, cur=None):
pass
def content_update_from_temp(self, keys_to_update, cur=None):
cur = self._cursor(cur)
cur.execute(
"""select swh_content_update(ARRAY[%s] :: text[])""" % keys_to_update
)
content_get_metadata_keys = [
"sha1",
"sha1_git",
"sha256",
"blake2s256",
"length",
"status",
]
content_add_keys = content_get_metadata_keys + ["ctime"]
skipped_content_keys = [
"sha1",
"sha1_git",
"sha256",
"blake2s256",
"length",
"reason",
"status",
"origin",
]
def content_get_metadata_from_sha1s(self, sha1s, cur=None):
cur = self._cursor(cur)
yield from execute_values_generator(
cur,
"""
select t.sha1, %s from (values %%s) as t (sha1)
inner join content using (sha1)
"""
% ", ".join(self.content_get_metadata_keys[1:]),
((sha1,) for sha1 in sha1s),
)
def content_get_range(self, start, end, limit=None, cur=None):
"""Retrieve contents within range [start, end].
"""
cur = self._cursor(cur)
query = """select %s from content
where %%s <= sha1 and sha1 <= %%s
order by sha1
limit %%s""" % ", ".join(
self.content_get_metadata_keys
)
cur.execute(query, (start, end, limit))
yield from cur
content_hash_keys = ["sha1", "sha1_git", "sha256", "blake2s256"]
def content_missing_from_list(self, contents, cur=None):
cur = self._cursor(cur)
keys = ", ".join(self.content_hash_keys)
equality = " AND ".join(
("t.%s = c.%s" % (key, key)) for key in self.content_hash_keys
)
yield from execute_values_generator(
cur,
"""
SELECT %s
FROM (VALUES %%s) as t(%s)
WHERE NOT EXISTS (
SELECT 1 FROM content c
WHERE %s
)
"""
% (keys, keys, equality),
(tuple(c[key] for key in self.content_hash_keys) for c in contents),
)
def content_missing_per_sha1(self, sha1s, cur=None):
cur = self._cursor(cur)
yield from execute_values_generator(
cur,
"""
SELECT t.sha1 FROM (VALUES %s) AS t(sha1)
WHERE NOT EXISTS (
SELECT 1 FROM content c WHERE c.sha1 = t.sha1
)""",
((sha1,) for sha1 in sha1s),
)
def content_missing_per_sha1_git(self, contents, cur=None):
cur = self._cursor(cur)
yield from execute_values_generator(
cur,
"""
SELECT t.sha1_git FROM (VALUES %s) AS t(sha1_git)
WHERE NOT EXISTS (
SELECT 1 FROM content c WHERE c.sha1_git = t.sha1_git
)""",
((sha1,) for sha1 in contents),
)
def skipped_content_missing(self, contents, cur=None):
if not contents:
return []
cur = self._cursor(cur)
query = """SELECT * FROM (VALUES %s) AS t (%s)
WHERE not exists
(SELECT 1 FROM skipped_content s WHERE
s.sha1 is not distinct from t.sha1::sha1 and
s.sha1_git is not distinct from t.sha1_git::sha1 and
s.sha256 is not distinct from t.sha256::bytea);""" % (
(", ".join("%s" for _ in contents)),
", ".join(self.content_hash_keys),
)
cur.execute(
query,
[tuple(cont[key] for key in self.content_hash_keys) for cont in contents],
)
yield from cur
def snapshot_exists(self, snapshot_id, cur=None):
"""Check whether a snapshot with the given id exists"""
cur = self._cursor(cur)
cur.execute("""SELECT 1 FROM snapshot where id=%s""", (snapshot_id,))
return bool(cur.fetchone())
def snapshot_missing_from_list(self, snapshots, cur=None):
cur = self._cursor(cur)
yield from execute_values_generator(
cur,
"""
SELECT id FROM (VALUES %s) as t(id)
WHERE NOT EXISTS (
SELECT 1 FROM snapshot d WHERE d.id = t.id
)
""",
((id,) for id in snapshots),
)
def snapshot_add(self, snapshot_id, cur=None):
"""Add a snapshot from the temporary table"""
cur = self._cursor(cur)
cur.execute("""SELECT swh_snapshot_add(%s)""", (snapshot_id,))
snapshot_count_cols = ["target_type", "count"]
def snapshot_count_branches(self, snapshot_id, cur=None):
cur = self._cursor(cur)
query = """\
SELECT %s FROM swh_snapshot_count_branches(%%s)
""" % ", ".join(
self.snapshot_count_cols
)
cur.execute(query, (snapshot_id,))
yield from cur
snapshot_get_cols = ["snapshot_id", "name", "target", "target_type"]
def snapshot_get_by_id(
self,
snapshot_id,
branches_from=b"",
branches_count=None,
target_types=None,
cur=None,
):
cur = self._cursor(cur)
query = """\
SELECT %s
FROM swh_snapshot_get_by_id(%%s, %%s, %%s, %%s :: snapshot_target[])
""" % ", ".join(
self.snapshot_get_cols
)
cur.execute(query, (snapshot_id, branches_from, branches_count, target_types))
yield from cur
def snapshot_get_by_origin_visit(self, origin_url, visit_id, cur=None):
cur = self._cursor(cur)
query = """\
SELECT ovs.snapshot
FROM origin_visit ov
INNER JOIN origin o ON o.id = ov.origin
INNER JOIN origin_visit_status ovs
ON ov.origin = ovs.origin AND ov.visit = ovs.visit
WHERE o.url=%s AND ov.visit=%s
ORDER BY ovs.date DESC LIMIT 1
"""
cur.execute(query, (origin_url, visit_id))
ret = cur.fetchone()
if ret:
return ret[0]
def snapshot_get_random(self, cur=None):
return self._get_random_row_from_table("snapshot", ["id"], "id", cur)
content_find_cols = [
"sha1",
"sha1_git",
"sha256",
"blake2s256",
"length",
"ctime",
"status",
]
def content_find(
self, sha1=None, sha1_git=None, sha256=None, blake2s256=None, cur=None
):
"""Find the content optionally on a combination of the following
checksums sha1, sha1_git, sha256 or blake2s256.
Args:
sha1: sha1 content
git_sha1: the sha1 computed `a la git` sha1 of the content
sha256: sha256 content
blake2s256: blake2s256 content
Returns:
The tuple (sha1, sha1_git, sha256, blake2s256) if found or None.
"""
cur = self._cursor(cur)
checksum_dict = {
"sha1": sha1,
"sha1_git": sha1_git,
"sha256": sha256,
"blake2s256": blake2s256,
}
where_parts = []
args = []
# Adds only those keys which have value other than None
for algorithm in checksum_dict:
if checksum_dict[algorithm] is not None:
args.append(checksum_dict[algorithm])
where_parts.append(algorithm + "= %s")
query = " AND ".join(where_parts)
cur.execute(
"""SELECT %s
FROM content WHERE %s
"""
% (",".join(self.content_find_cols), query),
args,
)
content = cur.fetchall()
return content
def content_get_random(self, cur=None):
return self._get_random_row_from_table("content", ["sha1_git"], "sha1_git", cur)
def directory_missing_from_list(self, directories, cur=None):
cur = self._cursor(cur)
yield from execute_values_generator(
cur,
"""
SELECT id FROM (VALUES %s) as t(id)
WHERE NOT EXISTS (
SELECT 1 FROM directory d WHERE d.id = t.id
)
""",
((id,) for id in directories),
)
directory_ls_cols = [
"dir_id",
"type",
"target",
"name",
"perms",
"status",
"sha1",
"sha1_git",
"sha256",
"length",
]
def directory_walk_one(self, directory, cur=None):
cur = self._cursor(cur)
cols = ", ".join(self.directory_ls_cols)
query = "SELECT %s FROM swh_directory_walk_one(%%s)" % cols
cur.execute(query, (directory,))
yield from cur
def directory_walk(self, directory, cur=None):
cur = self._cursor(cur)
cols = ", ".join(self.directory_ls_cols)
query = "SELECT %s FROM swh_directory_walk(%%s)" % cols
cur.execute(query, (directory,))
yield from cur
def directory_entry_get_by_path(self, directory, paths, cur=None):
"""Retrieve a directory entry by path.
"""
cur = self._cursor(cur)
cols = ", ".join(self.directory_ls_cols)
query = "SELECT %s FROM swh_find_directory_entry_by_path(%%s, %%s)" % cols
cur.execute(query, (directory, paths))
data = cur.fetchone()
if set(data) == {None}:
return None
return data
def directory_get_random(self, cur=None):
return self._get_random_row_from_table("directory", ["id"], "id", cur)
def revision_missing_from_list(self, revisions, cur=None):
cur = self._cursor(cur)
yield from execute_values_generator(
cur,
"""
SELECT id FROM (VALUES %s) as t(id)
WHERE NOT EXISTS (
SELECT 1 FROM revision r WHERE r.id = t.id
)
""",
((id,) for id in revisions),
)
revision_add_cols = [
"id",
"date",
"date_offset",
"date_neg_utc_offset",
"committer_date",
"committer_date_offset",
"committer_date_neg_utc_offset",
"type",
"directory",
"message",
"author_fullname",
"author_name",
"author_email",
"committer_fullname",
"committer_name",
"committer_email",
"metadata",
"synthetic",
"extra_headers",
]
revision_get_cols = revision_add_cols + ["parents"]
def origin_visit_add(self, origin, ts, type, cur=None):
"""Add a new origin_visit for origin origin at timestamp ts.
Args:
origin: origin concerned by the visit
ts: the date of the visit
type: type of loader for the visit
Returns:
The new visit index step for that origin
"""
cur = self._cursor(cur)
self._cursor(cur).execute(
"SELECT swh_origin_visit_add(%s, %s, %s)", (origin, ts, type)
)
return cur.fetchone()[0]
origin_visit_status_cols = [
"origin",
"visit",
"date",
"status",
"snapshot",
"metadata",
]
def origin_visit_status_add(
self, visit_status: OriginVisitStatus, cur=None
) -> None:
"""Add new origin visit status
"""
assert self.origin_visit_status_cols[0] == "origin"
assert self.origin_visit_status_cols[-1] == "metadata"
cols = self.origin_visit_status_cols[1:-1]
cur = self._cursor(cur)
cur.execute(
f"WITH origin_id as (select id from origin where url=%s) "
f"INSERT INTO origin_visit_status "
f"(origin, {', '.join(cols)}, metadata) "
f"VALUES ((select id from origin_id), "
f"{', '.join(['%s']*len(cols))}, %s) "
f"ON CONFLICT (origin, visit, date) do nothing",
[visit_status.origin]
+ [getattr(visit_status, key) for key in cols]
+ [jsonize(visit_status.metadata)],
)
origin_visit_cols = ["origin", "visit", "date", "type"]
def origin_visit_add_with_id(self, origin_visit: OriginVisit, cur=None) -> None:
"""Insert origin visit when id are already set
"""
ov = origin_visit
assert ov.visit is not None
cur = self._cursor(cur)
query = """INSERT INTO origin_visit ({cols})
VALUES ((select id from origin where url=%s), {values})
ON CONFLICT (origin, visit) DO NOTHING""".format(
cols=", ".join(self.origin_visit_cols),
values=", ".join("%s" for col in self.origin_visit_cols[1:]),
)
cur.execute(query, (ov.origin, ov.visit, ov.date, ov.type))
origin_visit_get_cols = [
"origin",
"visit",
"date",
"type",
"status",
"metadata",
"snapshot",
]
origin_visit_select_cols = [
"o.url AS origin",
"ov.visit",
"ov.date",
"ov.type AS type",
"ovs.status",
"ovs.metadata",
"ovs.snapshot",
]
origin_visit_status_select_cols = [
"o.url AS origin",
"ovs.visit",
"ovs.date",
"ovs.status",
"ovs.snapshot",
"ovs.metadata",
]
def _make_origin_visit_status(
self, row: Optional[Tuple[Any]]
) -> Optional[Dict[str, Any]]:
"""Make an origin_visit_status dict out of a row
"""
if not row:
return None
return dict(zip(self.origin_visit_status_cols, row))
def origin_visit_status_get_latest(
self,
origin_url: str,
visit: int,
allowed_statuses: Optional[List[str]] = None,
require_snapshot: bool = False,
cur=None,
) -> Optional[Dict[str, Any]]:
"""Given an origin visit id, return its latest origin_visit_status
"""
cur = self._cursor(cur)
query_parts = [
"SELECT %s" % ", ".join(self.origin_visit_status_select_cols),
"FROM origin_visit_status ovs ",
"INNER JOIN origin o ON o.id = ovs.origin",
]
query_parts.append("WHERE o.url = %s")
query_params: List[Any] = [origin_url]
query_parts.append("AND ovs.visit = %s")
query_params.append(visit)
if require_snapshot:
query_parts.append("AND ovs.snapshot is not null")
if allowed_statuses:
query_parts.append("AND ovs.status IN %s")
query_params.append(tuple(allowed_statuses))
query_parts.append("ORDER BY ovs.date DESC LIMIT 1")
query = "\n".join(query_parts)
cur.execute(query, tuple(query_params))
row = cur.fetchone()
return self._make_origin_visit_status(row)
def origin_visit_get_all(
self, origin_id, last_visit=None, order="asc", limit=None, cur=None
):
"""Retrieve all visits for origin with id origin_id.
Args:
origin_id: The occurrence's origin
Yields:
The visits for that origin
"""
cur = self._cursor(cur)
assert order.lower() in ["asc", "desc"]
query_parts = [
"SELECT DISTINCT ON (ov.visit) %s "
% ", ".join(self.origin_visit_select_cols),
"FROM origin_visit ov",
"INNER JOIN origin o ON o.id = ov.origin",
"INNER JOIN origin_visit_status ovs",
"ON ov.origin = ovs.origin AND ov.visit = ovs.visit",
]
query_parts.append("WHERE o.url = %s")
query_params: List[Any] = [origin_id]
if last_visit is not None:
op_comparison = ">" if order == "asc" else "<"
query_parts.append(f"and ov.visit {op_comparison} %s")
query_params.append(last_visit)
if order == "asc":
query_parts.append("ORDER BY ov.visit ASC, ovs.date DESC")
elif order == "desc":
query_parts.append("ORDER BY ov.visit DESC, ovs.date DESC")
else:
assert False
if limit is not None:
query_parts.append("LIMIT %s")
query_params.append(limit)
query = "\n".join(query_parts)
cur.execute(query, tuple(query_params))
yield from cur
def origin_visit_get_range(
- self, origin: str, visit_from: int, order: str, limit: int, cur=None,
+ self, origin: str, visit_from: int, order: ListOrder, limit: int, cur=None,
):
- assert order in ["asc", "desc"]
cur = self._cursor(cur)
origin_visit_cols = ["o.url as origin", "ov.visit", "ov.date", "ov.type"]
query_parts = [
f"SELECT {', '.join(origin_visit_cols)} FROM origin_visit ov ",
"INNER JOIN origin o ON o.id = ov.origin ",
]
query_parts.append("WHERE o.url = %s")
query_params: List[Any] = [origin]
if visit_from > 0:
- op_comparison = ">" if order == "asc" else "<"
+ op_comparison = ">" if order == ListOrder.ASC else "<"
query_parts.append(f"and ov.visit {op_comparison} %s")
query_params.append(visit_from)
- if order == "asc":
+ if order == ListOrder.ASC:
query_parts.append("ORDER BY ov.visit ASC")
- elif order == "desc":
+ elif order == ListOrder.DESC:
query_parts.append("ORDER BY ov.visit DESC")
query_parts.append("LIMIT %s")
query_params.append(limit)
query = "\n".join(query_parts)
cur.execute(query, tuple(query_params))
yield from cur
def origin_visit_get(self, origin_id, visit_id, cur=None):
"""Retrieve information on visit visit_id of origin origin_id.
Args:
origin_id: the origin concerned
visit_id: The visit step for that origin
Returns:
The origin_visit information
"""
cur = self._cursor(cur)
query = """\
SELECT %s
FROM origin_visit ov
INNER JOIN origin o ON o.id = ov.origin
INNER JOIN origin_visit_status ovs
ON ov.origin = ovs.origin AND ov.visit = ovs.visit
WHERE o.url = %%s AND ov.visit = %%s
ORDER BY ovs.date DESC
LIMIT 1
""" % (
", ".join(self.origin_visit_select_cols)
)
cur.execute(query, (origin_id, visit_id))
r = cur.fetchall()
if not r:
return None
return r[0]
def origin_visit_find_by_date(self, origin, visit_date, cur=None):
cur = self._cursor(cur)
cur.execute(
"SELECT * FROM swh_visit_find_by_date(%s, %s)", (origin, visit_date)
)
rows = cur.fetchall()
if rows:
visit = dict(zip(self.origin_visit_get_cols, rows[0]))
visit["origin"] = origin
return visit
def origin_visit_exists(self, origin_id, visit_id, cur=None):
"""Check whether an origin visit with the given ids exists"""
cur = self._cursor(cur)
query = "SELECT 1 FROM origin_visit where origin = %s AND visit = %s"
cur.execute(query, (origin_id, visit_id))
return bool(cur.fetchone())
def origin_visit_get_latest(
self,
origin_id: str,
type: Optional[str],
allowed_statuses: Optional[Iterable[str]],
require_snapshot: bool,
cur=None,
):
"""Retrieve the most recent origin_visit of the given origin,
with optional filters.
Args:
origin_id: the origin concerned
type: Optional visit type to filter on
allowed_statuses: the visit statuses allowed for the returned visit
require_snapshot (bool): If True, only a visit with a known
snapshot will be returned.
Returns:
The origin_visit information, or None if no visit matches.
"""
cur = self._cursor(cur)
query_parts = [
"SELECT %s" % ", ".join(self.origin_visit_select_cols),
"FROM origin_visit ov ",
"INNER JOIN origin o ON o.id = ov.origin",
"INNER JOIN origin_visit_status ovs ",
"ON o.id = ovs.origin AND ov.visit = ovs.visit ",
]
query_parts.append("WHERE o.url = %s")
query_params: List[Any] = [origin_id]
if type is not None:
query_parts.append("AND ov.type = %s")
query_params.append(type)
if require_snapshot:
query_parts.append("AND ovs.snapshot is not null")
if allowed_statuses:
query_parts.append("AND ovs.status IN %s")
query_params.append(tuple(allowed_statuses))
query_parts.append(
"ORDER BY ov.date DESC, ov.visit DESC, ovs.date DESC LIMIT 1"
)
query = "\n".join(query_parts)
cur.execute(query, tuple(query_params))
r = cur.fetchone()
if not r:
return None
return r
def origin_visit_get_random(self, type, cur=None):
"""Randomly select one origin visit that was full and in the last 3
months
"""
cur = self._cursor(cur)
columns = ",".join(self.origin_visit_select_cols)
query = f"""select {columns}
from origin_visit ov
inner join origin o on ov.origin=o.id
inner join origin_visit_status ovs
on ov.origin = ovs.origin and ov.visit = ovs.visit
where ovs.status='full'
and ov.type=%s
and ov.date > now() - '3 months'::interval
and random() < 0.1
limit 1
"""
cur.execute(query, (type,))
return cur.fetchone()
@staticmethod
def mangle_query_key(key, main_table):
if key == "id":
return "t.id"
if key == "parents":
return """
ARRAY(
SELECT rh.parent_id::bytea
FROM revision_history rh
WHERE rh.id = t.id
ORDER BY rh.parent_rank
)"""
if "_" not in key:
return "%s.%s" % (main_table, key)
head, tail = key.split("_", 1)
if head in ("author", "committer") and tail in (
"name",
"email",
"id",
"fullname",
):
return "%s.%s" % (head, tail)
return "%s.%s" % (main_table, key)
def revision_get_from_list(self, revisions, cur=None):
cur = self._cursor(cur)
query_keys = ", ".join(
self.mangle_query_key(k, "revision") for k in self.revision_get_cols
)
yield from execute_values_generator(
cur,
"""
SELECT %s FROM (VALUES %%s) as t(sortkey, id)
LEFT JOIN revision ON t.id = revision.id
LEFT JOIN person author ON revision.author = author.id
LEFT JOIN person committer ON revision.committer = committer.id
ORDER BY sortkey
"""
% query_keys,
((sortkey, id) for sortkey, id in enumerate(revisions)),
)
def revision_log(self, root_revisions, limit=None, cur=None):
cur = self._cursor(cur)
query = """SELECT %s
FROM swh_revision_log(%%s, %%s)
""" % ", ".join(
self.revision_get_cols
)
cur.execute(query, (root_revisions, limit))
yield from cur
revision_shortlog_cols = ["id", "parents"]
def revision_shortlog(self, root_revisions, limit=None, cur=None):
cur = self._cursor(cur)
query = """SELECT %s
FROM swh_revision_list(%%s, %%s)
""" % ", ".join(
self.revision_shortlog_cols
)
cur.execute(query, (root_revisions, limit))
yield from cur
def revision_get_random(self, cur=None):
return self._get_random_row_from_table("revision", ["id"], "id", cur)
def release_missing_from_list(self, releases, cur=None):
cur = self._cursor(cur)
yield from execute_values_generator(
cur,
"""
SELECT id FROM (VALUES %s) as t(id)
WHERE NOT EXISTS (
SELECT 1 FROM release r WHERE r.id = t.id
)
""",
((id,) for id in releases),
)
object_find_by_sha1_git_cols = ["sha1_git", "type"]
def object_find_by_sha1_git(self, ids, cur=None):
cur = self._cursor(cur)
yield from execute_values_generator(
cur,
"""
WITH t (sha1_git) AS (VALUES %s),
known_objects as ((
select
id as sha1_git,
'release'::object_type as type,
object_id
from release r
where exists (select 1 from t where t.sha1_git = r.id)
) union all (
select
id as sha1_git,
'revision'::object_type as type,
object_id
from revision r
where exists (select 1 from t where t.sha1_git = r.id)
) union all (
select
id as sha1_git,
'directory'::object_type as type,
object_id
from directory d
where exists (select 1 from t where t.sha1_git = d.id)
) union all (
select
sha1_git as sha1_git,
'content'::object_type as type,
object_id
from content c
where exists (select 1 from t where t.sha1_git = c.sha1_git)
))
select t.sha1_git as sha1_git, k.type
from t
left join known_objects k on t.sha1_git = k.sha1_git
""",
((id,) for id in ids),
)
def stat_counters(self, cur=None):
cur = self._cursor(cur)
cur.execute("SELECT * FROM swh_stat_counters()")
yield from cur
def origin_add(self, url, cur=None):
"""Insert a new origin and return the new identifier."""
insert = """INSERT INTO origin (url) values (%s)
RETURNING url"""
cur.execute(insert, (url,))
return cur.fetchone()[0]
origin_cols = ["url"]
def origin_get_by_url(self, origins, cur=None):
"""Retrieve origin `(type, url)` from urls if found."""
cur = self._cursor(cur)
query = """SELECT %s FROM (VALUES %%s) as t(url)
LEFT JOIN origin ON t.url = origin.url
""" % ",".join(
"origin." + col for col in self.origin_cols
)
yield from execute_values_generator(cur, query, ((url,) for url in origins))
def origin_get_by_sha1(self, sha1s, cur=None):
"""Retrieve origin urls from sha1s if found."""
cur = self._cursor(cur)
query = """SELECT %s FROM (VALUES %%s) as t(sha1)
LEFT JOIN origin ON t.sha1 = digest(origin.url, 'sha1')
""" % ",".join(
"origin." + col for col in self.origin_cols
)
yield from execute_values_generator(cur, query, ((sha1,) for sha1 in sha1s))
def origin_id_get_by_url(self, origins, cur=None):
"""Retrieve origin `(type, url)` from urls if found."""
cur = self._cursor(cur)
query = """SELECT id FROM (VALUES %s) as t(url)
LEFT JOIN origin ON t.url = origin.url
"""
for row in execute_values_generator(cur, query, ((url,) for url in origins)):
yield row[0]
origin_get_range_cols = ["id", "url"]
def origin_get_range(self, origin_from=1, origin_count=100, cur=None):
"""Retrieve ``origin_count`` origins whose ids are greater
or equal than ``origin_from``.
Origins are sorted by id before retrieving them.
Args:
origin_from (int): the minimum id of origins to retrieve
origin_count (int): the maximum number of origins to retrieve
"""
cur = self._cursor(cur)
query = """SELECT %s
FROM origin WHERE id >= %%s
ORDER BY id LIMIT %%s
""" % ",".join(
self.origin_get_range_cols
)
cur.execute(query, (origin_from, origin_count))
yield from cur
def _origin_query(
self,
url_pattern,
count=False,
offset=0,
limit=50,
regexp=False,
with_visit=False,
cur=None,
):
"""
Method factorizing query creation for searching and counting origins.
"""
cur = self._cursor(cur)
if count:
origin_cols = "COUNT(*)"
else:
origin_cols = ",".join(self.origin_cols)
query = """SELECT %s
FROM origin o
WHERE """
if with_visit:
query += """
EXISTS (
SELECT 1
FROM origin_visit ov
INNER JOIN origin_visit_status ovs
ON ov.origin = ovs.origin AND ov.visit = ovs.visit
INNER JOIN snapshot ON ovs.snapshot=snapshot.id
WHERE ov.origin=o.id
)
AND """
query += "url %s %%s "
if not count:
query += "ORDER BY id OFFSET %%s LIMIT %%s"
if not regexp:
query = query % (origin_cols, "ILIKE")
query_params = ("%" + url_pattern + "%", offset, limit)
else:
query = query % (origin_cols, "~*")
query_params = (url_pattern, offset, limit)
if count:
query_params = (query_params[0],)
cur.execute(query, query_params)
def origin_search(
self, url_pattern, offset=0, limit=50, regexp=False, with_visit=False, cur=None
):
"""Search for origins whose urls contain a provided string pattern
or match a provided regular expression.
The search is performed in a case insensitive way.
Args:
url_pattern (str): the string pattern to search for in origin urls
offset (int): number of found origins to skip before returning
results
limit (int): the maximum number of found origins to return
regexp (bool): if True, consider the provided pattern as a regular
expression and returns origins whose urls match it
with_visit (bool): if True, filter out origins with no visit
"""
self._origin_query(
url_pattern,
offset=offset,
limit=limit,
regexp=regexp,
with_visit=with_visit,
cur=cur,
)
yield from cur
def origin_count(self, url_pattern, regexp=False, with_visit=False, cur=None):
"""Count origins whose urls contain a provided string pattern
or match a provided regular expression.
The pattern search in origin urls is performed in a case insensitive
way.
Args:
url_pattern (str): the string pattern to search for in origin urls
regexp (bool): if True, consider the provided pattern as a regular
expression and returns origins whose urls match it
with_visit (bool): if True, filter out origins with no visit
"""
self._origin_query(
url_pattern, count=True, regexp=regexp, with_visit=with_visit, cur=cur
)
return cur.fetchone()[0]
release_add_cols = [
"id",
"target",
"target_type",
"date",
"date_offset",
"date_neg_utc_offset",
"name",
"comment",
"synthetic",
"author_fullname",
"author_name",
"author_email",
]
release_get_cols = release_add_cols
def release_get_from_list(self, releases, cur=None):
cur = self._cursor(cur)
query_keys = ", ".join(
self.mangle_query_key(k, "release") for k in self.release_get_cols
)
yield from execute_values_generator(
cur,
"""
SELECT %s FROM (VALUES %%s) as t(sortkey, id)
LEFT JOIN release ON t.id = release.id
LEFT JOIN person author ON release.author = author.id
ORDER BY sortkey
"""
% query_keys,
((sortkey, id) for sortkey, id in enumerate(releases)),
)
def release_get_random(self, cur=None):
return self._get_random_row_from_table("release", ["id"], "id", cur)
_raw_extrinsic_metadata_context_cols = [
"origin",
"visit",
"snapshot",
"release",
"revision",
"path",
"directory",
]
"""The list of context columns for all artifact types."""
_raw_extrinsic_metadata_insert_cols = [
"type",
"id",
"authority_id",
"fetcher_id",
"discovery_date",
"format",
"metadata",
*_raw_extrinsic_metadata_context_cols,
]
"""List of columns of the raw_extrinsic_metadata table, used when writing
metadata."""
_raw_extrinsic_metadata_insert_query = f"""
INSERT INTO raw_extrinsic_metadata
({', '.join(_raw_extrinsic_metadata_insert_cols)})
VALUES ({', '.join('%s' for _ in _raw_extrinsic_metadata_insert_cols)})
ON CONFLICT (id, authority_id, discovery_date, fetcher_id)
DO NOTHING
"""
raw_extrinsic_metadata_get_cols = [
"raw_extrinsic_metadata.id",
"raw_extrinsic_metadata.type",
"discovery_date",
"metadata_authority.type",
"metadata_authority.url",
"metadata_fetcher.id",
"metadata_fetcher.name",
"metadata_fetcher.version",
*_raw_extrinsic_metadata_context_cols,
"format",
"raw_extrinsic_metadata.metadata",
]
"""List of columns of the raw_extrinsic_metadata, metadata_authority,
and metadata_fetcher tables, used when reading object metadata."""
_raw_extrinsic_metadata_select_query = f"""
SELECT
{', '.join(raw_extrinsic_metadata_get_cols)}
FROM raw_extrinsic_metadata
INNER JOIN metadata_authority
ON (metadata_authority.id=authority_id)
INNER JOIN metadata_fetcher ON (metadata_fetcher.id=fetcher_id)
WHERE raw_extrinsic_metadata.id=%s AND authority_id=%s
"""
def raw_extrinsic_metadata_add(
self,
object_type: str,
id: str,
discovery_date: datetime.datetime,
authority_id: int,
fetcher_id: int,
format: str,
metadata: bytes,
origin: Optional[str],
visit: Optional[int],
snapshot: Optional[str],
release: Optional[str],
revision: Optional[str],
path: Optional[bytes],
directory: Optional[str],
cur,
):
query = self._raw_extrinsic_metadata_insert_query
args: Dict[str, Any] = dict(
type=object_type,
id=id,
authority_id=authority_id,
fetcher_id=fetcher_id,
discovery_date=discovery_date,
format=format,
metadata=metadata,
origin=origin,
visit=visit,
snapshot=snapshot,
release=release,
revision=revision,
path=path,
directory=directory,
)
params = [args[col] for col in self._raw_extrinsic_metadata_insert_cols]
cur.execute(query, params)
def raw_extrinsic_metadata_get(
self,
object_type: str,
id: str,
authority_id: int,
after_time: Optional[datetime.datetime],
after_fetcher: Optional[int],
limit: int,
cur,
):
query_parts = [self._raw_extrinsic_metadata_select_query]
args = [id, authority_id]
if after_fetcher is not None:
assert after_time
query_parts.append("AND (discovery_date, fetcher_id) > (%s, %s)")
args.extend([after_time, after_fetcher])
elif after_time is not None:
query_parts.append("AND discovery_date > %s")
args.append(after_time)
query_parts.append("ORDER BY discovery_date, fetcher_id")
if limit:
query_parts.append("LIMIT %s")
args.append(limit)
cur.execute(" ".join(query_parts), args)
yield from cur
metadata_fetcher_cols = ["name", "version", "metadata"]
def metadata_fetcher_add(
self, name: str, version: str, metadata: bytes, cur=None
) -> None:
cur = self._cursor(cur)
cur.execute(
"INSERT INTO metadata_fetcher (name, version, metadata) "
"VALUES (%s, %s, %s) ON CONFLICT DO NOTHING",
(name, version, jsonize(metadata)),
)
def metadata_fetcher_get(self, name: str, version: str, cur=None):
cur = self._cursor(cur)
cur.execute(
f"SELECT {', '.join(self.metadata_fetcher_cols)} "
f"FROM metadata_fetcher "
f"WHERE name=%s AND version=%s",
(name, version),
)
return cur.fetchone()
def metadata_fetcher_get_id(
self, name: str, version: str, cur=None
) -> Optional[int]:
cur = self._cursor(cur)
cur.execute(
"SELECT id FROM metadata_fetcher WHERE name=%s AND version=%s",
(name, version),
)
row = cur.fetchone()
if row:
return row[0]
else:
return None
metadata_authority_cols = ["type", "url", "metadata"]
def metadata_authority_add(
self, type: str, url: str, metadata: bytes, cur=None
) -> None:
cur = self._cursor(cur)
cur.execute(
"INSERT INTO metadata_authority (type, url, metadata) "
"VALUES (%s, %s, %s) ON CONFLICT DO NOTHING",
(type, url, jsonize(metadata)),
)
def metadata_authority_get(self, type: str, url: str, cur=None):
cur = self._cursor(cur)
cur.execute(
f"SELECT {', '.join(self.metadata_authority_cols)} "
f"FROM metadata_authority "
f"WHERE type=%s AND url=%s",
(type, url),
)
return cur.fetchone()
def metadata_authority_get_id(self, type: str, url: str, cur=None) -> Optional[int]:
cur = self._cursor(cur)
cur.execute(
"SELECT id FROM metadata_authority WHERE type=%s AND url=%s", (type, url)
)
row = cur.fetchone()
if row:
return row[0]
else:
return None
def _get_random_row_from_table(self, table_name, cols, id_col, cur=None):
random_sha1 = bytes(random.randint(0, 255) for _ in range(SHA1_SIZE))
cur = self._cursor(cur)
query = """
(SELECT {cols} FROM {table} WHERE {id_col} >= %s
ORDER BY {id_col} LIMIT 1)
UNION
(SELECT {cols} FROM {table} WHERE {id_col} < %s
ORDER BY {id_col} DESC LIMIT 1)
LIMIT 1
""".format(
cols=", ".join(cols), table=table_name, id_col=id_col
)
cur.execute(query, (random_sha1, random_sha1))
row = cur.fetchone()
if row:
return row[0]
diff --git a/swh/storage/in_memory.py b/swh/storage/in_memory.py
index 05a8675c..d025a4e7 100644
--- a/swh/storage/in_memory.py
+++ b/swh/storage/in_memory.py
@@ -1,1235 +1,1231 @@
# Copyright (C) 2015-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import re
import bisect
import collections
import copy
import datetime
import itertools
import random
from collections import defaultdict
from datetime import timedelta
from typing import (
Any,
Callable,
Dict,
Generic,
Hashable,
Iterable,
Iterator,
List,
Optional,
Tuple,
TypeVar,
Union,
)
import attr
from swh.core.api.serializers import msgpack_loads, msgpack_dumps
from swh.model.identifiers import SWHID
from swh.model.model import (
BaseContent,
Content,
SkippedContent,
Directory,
Revision,
Release,
Snapshot,
OriginVisit,
OriginVisitStatus,
Origin,
SHA1_SIZE,
MetadataAuthority,
MetadataAuthorityType,
MetadataFetcher,
MetadataTargetType,
RawExtrinsicMetadata,
)
from swh.model.hashutil import DEFAULT_ALGORITHMS, hash_to_bytes, hash_to_hex
-from swh.storage.interface import PagedResult
+from swh.storage.interface import ListOrder, PagedResult
from swh.storage.objstorage import ObjStorage
from swh.storage.utils import now
from .converters import origin_url_to_sha1
from .exc import StorageArgumentException, HashCollision
from .utils import get_partition_bounds_bytes
from .writer import JournalWriter
# Max block size of contents to return
BULK_BLOCK_CONTENT_LEN_MAX = 10000
SortedListItem = TypeVar("SortedListItem")
SortedListKey = TypeVar("SortedListKey")
FetcherKey = Tuple[str, str]
class SortedList(collections.UserList, Generic[SortedListKey, SortedListItem]):
data: List[Tuple[SortedListKey, SortedListItem]]
# https://github.com/python/mypy/issues/708
# key: Callable[[SortedListItem], SortedListKey]
def __init__(
self,
data: List[SortedListItem] = None,
key: Optional[Callable[[SortedListItem], SortedListKey]] = None,
):
if key is None:
def key(item):
return item
assert key is not None # for mypy
super().__init__(sorted((key(x), x) for x in data or []))
self.key: Callable[[SortedListItem], SortedListKey] = key
def add(self, item: SortedListItem):
k = self.key(item)
bisect.insort(self.data, (k, item))
def __iter__(self) -> Iterator[SortedListItem]:
for (k, item) in self.data:
yield item
def iter_from(self, start_key: Any) -> Iterator[SortedListItem]:
"""Returns an iterator over all the elements whose key is greater
or equal to `start_key`.
(This is an efficient equivalent to:
`(x for x in L if key(x) >= start_key)`)
"""
from_index = bisect.bisect_left(self.data, (start_key,))
for (k, item) in itertools.islice(self.data, from_index, None):
yield item
def iter_after(self, start_key: Any) -> Iterator[SortedListItem]:
"""Same as iter_from, but using a strict inequality."""
it = self.iter_from(start_key)
for item in it:
if self.key(item) > start_key: # type: ignore
yield item
break
yield from it
class InMemoryStorage:
def __init__(self, journal_writer=None):
self.reset()
self.journal_writer = JournalWriter(journal_writer)
def reset(self):
self._contents = {}
self._content_indexes = defaultdict(lambda: defaultdict(set))
self._skipped_contents = {}
self._skipped_content_indexes = defaultdict(lambda: defaultdict(set))
self._directories = {}
self._revisions = {}
self._releases = {}
self._snapshots = {}
self._origins = {}
self._origins_by_id = []
self._origins_by_sha1 = {}
self._origin_visits = {}
self._origin_visit_statuses: Dict[Tuple[str, int], List[OriginVisitStatus]] = {}
self._persons = {}
# {object_type: {id: {authority: [metadata]}}}
self._raw_extrinsic_metadata: Dict[
MetadataTargetType,
Dict[
Union[str, SWHID],
Dict[
Hashable,
SortedList[
Tuple[datetime.datetime, FetcherKey], RawExtrinsicMetadata
],
],
],
] = defaultdict(
lambda: defaultdict(
lambda: defaultdict(
lambda: SortedList(
key=lambda x: (
x.discovery_date,
self._metadata_fetcher_key(x.fetcher),
)
)
)
)
) # noqa
self._metadata_fetchers: Dict[FetcherKey, MetadataFetcher] = {}
self._metadata_authorities: Dict[Hashable, MetadataAuthority] = {}
self._objects = defaultdict(list)
self._sorted_sha1s = SortedList[bytes, bytes]()
self.objstorage = ObjStorage({"cls": "memory", "args": {}})
def check_config(self, *, check_write):
return True
def _content_add(self, contents: Iterable[Content], with_data: bool) -> Dict:
self.journal_writer.content_add(contents)
content_add = 0
if with_data:
summary = self.objstorage.content_add(
c for c in contents if c.status != "absent"
)
content_add_bytes = summary["content:add:bytes"]
for content in contents:
key = self._content_key(content)
if key in self._contents:
continue
for algorithm in DEFAULT_ALGORITHMS:
hash_ = content.get_hash(algorithm)
if hash_ in self._content_indexes[algorithm] and (
algorithm not in {"blake2s256", "sha256"}
):
colliding_content_hashes = []
# Add the already stored contents
for content_hashes_set in self._content_indexes[algorithm][hash_]:
hashes = dict(content_hashes_set)
colliding_content_hashes.append(hashes)
# Add the new colliding content
colliding_content_hashes.append(content.hashes())
raise HashCollision(algorithm, hash_, colliding_content_hashes)
for algorithm in DEFAULT_ALGORITHMS:
hash_ = content.get_hash(algorithm)
self._content_indexes[algorithm][hash_].add(key)
self._objects[content.sha1_git].append(("content", content.sha1))
self._contents[key] = content
self._sorted_sha1s.add(content.sha1)
self._contents[key] = attr.evolve(self._contents[key], data=None)
content_add += 1
summary = {
"content:add": content_add,
}
if with_data:
summary["content:add:bytes"] = content_add_bytes
return summary
def content_add(self, content: Iterable[Content]) -> Dict:
content = [attr.evolve(c, ctime=now()) for c in content]
return self._content_add(content, with_data=True)
def content_update(self, content, keys=[]):
self.journal_writer.content_update(content)
for cont_update in content:
cont_update = cont_update.copy()
sha1 = cont_update.pop("sha1")
for old_key in self._content_indexes["sha1"][sha1]:
old_cont = self._contents.pop(old_key)
for algorithm in DEFAULT_ALGORITHMS:
hash_ = old_cont.get_hash(algorithm)
self._content_indexes[algorithm][hash_].remove(old_key)
new_cont = attr.evolve(old_cont, **cont_update)
new_key = self._content_key(new_cont)
self._contents[new_key] = new_cont
for algorithm in DEFAULT_ALGORITHMS:
hash_ = new_cont.get_hash(algorithm)
self._content_indexes[algorithm][hash_].add(new_key)
def content_add_metadata(self, content: Iterable[Content]) -> Dict:
return self._content_add(content, with_data=False)
def content_get(self, content):
# FIXME: Make this method support slicing the `data`.
if len(content) > BULK_BLOCK_CONTENT_LEN_MAX:
raise StorageArgumentException(
"Sending at most %s contents." % BULK_BLOCK_CONTENT_LEN_MAX
)
yield from self.objstorage.content_get(content)
def content_get_range(self, start, end, limit=1000):
if limit is None:
raise StorageArgumentException("limit should not be None")
sha1s = (
(sha1, content_key)
for sha1 in self._sorted_sha1s.iter_from(start)
for content_key in self._content_indexes["sha1"][sha1]
)
matched = []
next_content = None
for sha1, key in sha1s:
if sha1 > end:
break
if len(matched) >= limit:
next_content = sha1
break
matched.append(self._contents[key].to_dict())
return {
"contents": matched,
"next": next_content,
}
def content_get_partition(
self,
partition_id: int,
nb_partitions: int,
limit: int = 1000,
page_token: str = None,
):
if limit is None:
raise StorageArgumentException("limit should not be None")
(start, end) = get_partition_bounds_bytes(
partition_id, nb_partitions, SHA1_SIZE
)
if page_token:
start = hash_to_bytes(page_token)
if end is None:
end = b"\xff" * SHA1_SIZE
result = self.content_get_range(start, end, limit)
result2 = {
"contents": result["contents"],
"next_page_token": None,
}
if result["next"]:
result2["next_page_token"] = hash_to_hex(result["next"])
return result2
def content_get_metadata(self, contents: List[bytes]) -> Dict[bytes, List[Dict]]:
result: Dict = {sha1: [] for sha1 in contents}
for sha1 in contents:
if sha1 in self._content_indexes["sha1"]:
objs = self._content_indexes["sha1"][sha1]
# only 1 element as content_add_metadata would have raised a
# hash collision otherwise
for key in objs:
d = self._contents[key].to_dict()
del d["ctime"]
if "data" in d:
del d["data"]
result[sha1].append(d)
return result
def content_find(self, content):
if not set(content).intersection(DEFAULT_ALGORITHMS):
raise StorageArgumentException(
"content keys must contain at least one of: %s"
% ", ".join(sorted(DEFAULT_ALGORITHMS))
)
found = []
for algo in DEFAULT_ALGORITHMS:
hash = content.get(algo)
if hash and hash in self._content_indexes[algo]:
found.append(self._content_indexes[algo][hash])
if not found:
return []
keys = list(set.intersection(*found))
return [self._contents[key].to_dict() for key in keys]
def content_missing(self, content, key_hash="sha1"):
for cont in content:
for (algo, hash_) in cont.items():
if algo not in DEFAULT_ALGORITHMS:
continue
if hash_ not in self._content_indexes.get(algo, []):
yield cont[key_hash]
break
else:
for result in self.content_find(cont):
if result["status"] == "missing":
yield cont[key_hash]
def content_missing_per_sha1(self, contents):
for content in contents:
if content not in self._content_indexes["sha1"]:
yield content
def content_missing_per_sha1_git(self, contents):
for content in contents:
if content not in self._content_indexes["sha1_git"]:
yield content
def content_get_random(self):
return random.choice(list(self._content_indexes["sha1_git"]))
def _skipped_content_add(self, contents: List[SkippedContent]) -> Dict:
self.journal_writer.skipped_content_add(contents)
summary = {"skipped_content:add": 0}
missing_contents = self.skipped_content_missing([c.hashes() for c in contents])
missing = {self._content_key(c) for c in missing_contents}
contents = [c for c in contents if self._content_key(c) in missing]
for content in contents:
key = self._content_key(content)
for algo in DEFAULT_ALGORITHMS:
if content.get_hash(algo):
self._skipped_content_indexes[algo][content.get_hash(algo)].add(key)
self._skipped_contents[key] = content
summary["skipped_content:add"] += 1
return summary
def skipped_content_add(self, content: Iterable[SkippedContent]) -> Dict:
content = [attr.evolve(c, ctime=now()) for c in content]
return self._skipped_content_add(content)
def skipped_content_missing(self, contents):
for content in contents:
matches = list(self._skipped_contents.values())
for (algorithm, key) in self._content_key(content):
if algorithm == "blake2s256":
continue
# Filter out skipped contents with the same hash
matches = [
match for match in matches if match.get_hash(algorithm) == key
]
# if none of the contents match
if not matches:
yield {algo: content[algo] for algo in DEFAULT_ALGORITHMS}
def directory_add(self, directories: Iterable[Directory]) -> Dict:
directories = [dir_ for dir_ in directories if dir_.id not in self._directories]
self.journal_writer.directory_add(directories)
count = 0
for directory in directories:
count += 1
self._directories[directory.id] = directory
self._objects[directory.id].append(("directory", directory.id))
return {"directory:add": count}
def directory_missing(self, directories):
for id in directories:
if id not in self._directories:
yield id
def _join_dentry_to_content(self, dentry):
keys = (
"status",
"sha1",
"sha1_git",
"sha256",
"length",
)
ret = dict.fromkeys(keys)
ret.update(dentry)
if ret["type"] == "file":
# TODO: Make it able to handle more than one content
content = self.content_find({"sha1_git": ret["target"]})
if content:
content = content[0]
for key in keys:
ret[key] = content[key]
return ret
def _directory_ls(self, directory_id, recursive, prefix=b""):
if directory_id in self._directories:
for entry in self._directories[directory_id].entries:
ret = self._join_dentry_to_content(entry.to_dict())
ret["name"] = prefix + ret["name"]
ret["dir_id"] = directory_id
yield ret
if recursive and ret["type"] == "dir":
yield from self._directory_ls(
ret["target"], True, prefix + ret["name"] + b"/"
)
def directory_ls(self, directory, recursive=False):
yield from self._directory_ls(directory, recursive)
def directory_entry_get_by_path(self, directory, paths):
return self._directory_entry_get_by_path(directory, paths, b"")
def directory_get_random(self):
if not self._directories:
return None
return random.choice(list(self._directories))
def _directory_entry_get_by_path(self, directory, paths, prefix):
if not paths:
return
contents = list(self.directory_ls(directory))
if not contents:
return
def _get_entry(entries, name):
for entry in entries:
if entry["name"] == name:
entry = entry.copy()
entry["name"] = prefix + entry["name"]
return entry
first_item = _get_entry(contents, paths[0])
if len(paths) == 1:
return first_item
if not first_item or first_item["type"] != "dir":
return
return self._directory_entry_get_by_path(
first_item["target"], paths[1:], prefix + paths[0] + b"/"
)
def revision_add(self, revisions: Iterable[Revision]) -> Dict:
revisions = [rev for rev in revisions if rev.id not in self._revisions]
self.journal_writer.revision_add(revisions)
count = 0
for revision in revisions:
revision = attr.evolve(
revision,
committer=self._person_add(revision.committer),
author=self._person_add(revision.author),
)
self._revisions[revision.id] = revision
self._objects[revision.id].append(("revision", revision.id))
count += 1
return {"revision:add": count}
def revision_missing(self, revisions):
for id in revisions:
if id not in self._revisions:
yield id
def revision_get(self, revisions):
for id in revisions:
if id in self._revisions:
yield self._revisions.get(id).to_dict()
else:
yield None
def _get_parent_revs(self, rev_id, seen, limit):
if limit and len(seen) >= limit:
return
if rev_id in seen or rev_id not in self._revisions:
return
seen.add(rev_id)
yield self._revisions[rev_id].to_dict()
for parent in self._revisions[rev_id].parents:
yield from self._get_parent_revs(parent, seen, limit)
def revision_log(self, revisions, limit=None):
seen = set()
for rev_id in revisions:
yield from self._get_parent_revs(rev_id, seen, limit)
def revision_shortlog(self, revisions, limit=None):
yield from (
(rev["id"], rev["parents"]) for rev in self.revision_log(revisions, limit)
)
def revision_get_random(self):
return random.choice(list(self._revisions))
def release_add(self, releases: Iterable[Release]) -> Dict:
to_add = []
for rel in releases:
if rel.id not in self._releases and rel not in to_add:
to_add.append(rel)
self.journal_writer.release_add(to_add)
for rel in to_add:
if rel.author:
self._person_add(rel.author)
self._objects[rel.id].append(("release", rel.id))
self._releases[rel.id] = rel
return {"release:add": len(to_add)}
def release_missing(self, releases):
yield from (rel for rel in releases if rel not in self._releases)
def release_get(self, releases):
for rel_id in releases:
if rel_id in self._releases:
yield self._releases[rel_id].to_dict()
else:
yield None
def release_get_random(self):
return random.choice(list(self._releases))
def snapshot_add(self, snapshots: Iterable[Snapshot]) -> Dict:
count = 0
snapshots = (snap for snap in snapshots if snap.id not in self._snapshots)
for snapshot in snapshots:
self.journal_writer.snapshot_add([snapshot])
self._snapshots[snapshot.id] = snapshot
self._objects[snapshot.id].append(("snapshot", snapshot.id))
count += 1
return {"snapshot:add": count}
def snapshot_missing(self, snapshots):
for id in snapshots:
if id not in self._snapshots:
yield id
def snapshot_get(self, snapshot_id):
return self.snapshot_get_branches(snapshot_id)
def snapshot_get_by_origin_visit(self, origin, visit):
origin_url = self._get_origin_url(origin)
if not origin_url:
return
if origin_url not in self._origins or visit > len(
self._origin_visits[origin_url]
):
return None
visit = self._origin_visit_get_updated(origin_url, visit)
snapshot_id = visit["snapshot"]
if snapshot_id:
return self.snapshot_get(snapshot_id)
else:
return None
def snapshot_count_branches(self, snapshot_id):
snapshot = self._snapshots[snapshot_id]
return collections.Counter(
branch.target_type.value if branch else None
for branch in snapshot.branches.values()
)
def snapshot_get_branches(
self, snapshot_id, branches_from=b"", branches_count=1000, target_types=None
):
snapshot = self._snapshots.get(snapshot_id)
if snapshot is None:
return None
sorted_branches = sorted(snapshot.branches.items())
sorted_branch_names = [k for (k, v) in sorted_branches]
from_index = bisect.bisect_left(sorted_branch_names, branches_from)
if target_types:
next_branch = None
branches = {}
for (branch_name, branch) in sorted_branches:
if branch_name in sorted_branch_names[from_index:]:
if branch and branch.target_type.value in target_types:
if len(branches) < branches_count:
branches[branch_name] = branch
else:
next_branch = branch_name
break
else:
# As there is no 'target_types', we can do that much faster
to_index = from_index + branches_count
returned_branch_names = frozenset(sorted_branch_names[from_index:to_index])
branches = dict(
(branch_name, branch)
for (branch_name, branch) in snapshot.branches.items()
if branch_name in returned_branch_names
)
if to_index >= len(sorted_branch_names):
next_branch = None
else:
next_branch = sorted_branch_names[to_index]
branches = {
name: branch.to_dict() if branch else None
for (name, branch) in branches.items()
}
return {
"id": snapshot_id,
"branches": branches,
"next_branch": next_branch,
}
def snapshot_get_random(self):
return random.choice(list(self._snapshots))
def object_find_by_sha1_git(self, ids):
ret = {}
for id_ in ids:
objs = self._objects.get(id_, [])
ret[id_] = [{"sha1_git": id_, "type": obj[0],} for obj in objs]
return ret
def _convert_origin(self, t):
if t is None:
return None
return t.to_dict()
def origin_get_one(self, origin_url: str) -> Optional[Origin]:
return self._origins.get(origin_url)
def origin_get(self, origins: Iterable[str]) -> Iterable[Optional[Origin]]:
return [self.origin_get_one(origin_url) for origin_url in origins]
def origin_get_by_sha1(self, sha1s):
return [self._convert_origin(self._origins_by_sha1.get(sha1)) for sha1 in sha1s]
def origin_get_range(self, origin_from=1, origin_count=100):
origin_from = max(origin_from, 1)
if origin_from <= len(self._origins_by_id):
max_idx = origin_from + origin_count - 1
if max_idx > len(self._origins_by_id):
max_idx = len(self._origins_by_id)
for idx in range(origin_from - 1, max_idx):
origin = self._convert_origin(self._origins[self._origins_by_id[idx]])
yield {"id": idx + 1, **origin}
def origin_list(self, page_token: Optional[str] = None, limit: int = 100) -> dict:
origin_urls = sorted(self._origins)
if page_token:
from_ = bisect.bisect_left(origin_urls, page_token)
else:
from_ = 0
result = {
"origins": [
{"url": origin_url} for origin_url in origin_urls[from_ : from_ + limit]
]
}
if from_ + limit < len(origin_urls):
result["next_page_token"] = origin_urls[from_ + limit]
return result
def origin_search(
self, url_pattern, offset=0, limit=50, regexp=False, with_visit=False
):
origins = map(self._convert_origin, self._origins.values())
if regexp:
pat = re.compile(url_pattern)
origins = [orig for orig in origins if pat.search(orig["url"])]
else:
origins = [orig for orig in origins if url_pattern in orig["url"]]
if with_visit:
filtered_origins = []
for orig in origins:
visits = (
self._origin_visit_get_updated(ov.origin, ov.visit)
for ov in self._origin_visits[orig["url"]]
)
for ov in visits:
snapshot = ov["snapshot"]
if snapshot and snapshot in self._snapshots:
filtered_origins.append(orig)
break
else:
filtered_origins = origins
return filtered_origins[offset : offset + limit]
def origin_count(self, url_pattern, regexp=False, with_visit=False):
return len(
self.origin_search(
url_pattern,
regexp=regexp,
with_visit=with_visit,
limit=len(self._origins),
)
)
def origin_add(self, origins: Iterable[Origin]) -> Dict[str, int]:
origins = list(origins)
added = 0
for origin in origins:
if origin.url not in self._origins:
self.origin_add_one(origin)
added += 1
return {"origin:add": added}
def origin_add_one(self, origin: Origin) -> str:
if origin.url not in self._origins:
self.journal_writer.origin_add([origin])
# generate an origin_id because it is needed by origin_get_range.
# TODO: remove this when we remove origin_get_range
origin_id = len(self._origins) + 1
self._origins_by_id.append(origin.url)
assert len(self._origins_by_id) == origin_id
self._origins[origin.url] = origin
self._origins_by_sha1[origin_url_to_sha1(origin.url)] = origin
self._origin_visits[origin.url] = []
self._objects[origin.url].append(("origin", origin.url))
return origin.url
def origin_visit_add(self, visits: Iterable[OriginVisit]) -> Iterable[OriginVisit]:
for visit in visits:
origin = self.origin_get_one(visit.origin)
if not origin: # Cannot add a visit without an origin
raise StorageArgumentException("Unknown origin %s", visit.origin)
all_visits = []
for visit in visits:
origin_url = visit.origin
if origin_url in self._origins:
origin = self._origins[origin_url]
if visit.visit:
self.journal_writer.origin_visit_add([visit])
while len(self._origin_visits[origin_url]) < visit.visit:
self._origin_visits[origin_url].append(None)
self._origin_visits[origin_url][visit.visit - 1] = visit
else:
# visit ids are in the range [1, +inf[
visit_id = len(self._origin_visits[origin_url]) + 1
visit = attr.evolve(visit, visit=visit_id)
self.journal_writer.origin_visit_add([visit])
self._origin_visits[origin_url].append(visit)
visit_key = (origin_url, visit.visit)
self._objects[visit_key].append(("origin_visit", None))
assert visit.visit is not None
self._origin_visit_status_add_one(
OriginVisitStatus(
origin=visit.origin,
visit=visit.visit,
date=visit.date,
status="created",
snapshot=None,
)
)
all_visits.append(visit)
return all_visits
def _origin_visit_status_add_one(self, visit_status: OriginVisitStatus) -> None:
"""Add an origin visit status without checks. If already present, do nothing.
"""
self.journal_writer.origin_visit_status_add([visit_status])
visit_key = (visit_status.origin, visit_status.visit)
self._origin_visit_statuses.setdefault(visit_key, [])
visit_statuses = self._origin_visit_statuses[visit_key]
if visit_status not in visit_statuses:
visit_statuses.append(visit_status)
def origin_visit_status_add(
self, visit_statuses: Iterable[OriginVisitStatus],
) -> None:
# First round to check existence (fail early if any is ko)
for visit_status in visit_statuses:
origin_url = self.origin_get_one(visit_status.origin)
if not origin_url:
raise StorageArgumentException(f"Unknown origin {visit_status.origin}")
for visit_status in visit_statuses:
self._origin_visit_status_add_one(visit_status)
def _origin_visit_status_get_latest(
self, origin: str, visit_id: int
) -> Tuple[OriginVisit, OriginVisitStatus]:
"""Return a tuple of OriginVisit, latest associated OriginVisitStatus.
"""
assert visit_id >= 1
visit = self._origin_visits[origin][visit_id - 1]
assert visit is not None
visit_key = (origin, visit_id)
visit_update = max(self._origin_visit_statuses[visit_key], key=lambda v: v.date)
return visit, visit_update
def _origin_visit_get_updated(self, origin: str, visit_id: int) -> Dict[str, Any]:
"""Merge origin visit and latest origin visit status
"""
visit, visit_update = self._origin_visit_status_get_latest(origin, visit_id)
assert visit is not None and visit_update is not None
return {
# default to the values in visit
**visit.to_dict(),
# override with the last update
**visit_update.to_dict(),
# but keep the date of the creation of the origin visit
"date": visit.date,
}
def origin_visit_get(
self,
origin: str,
page_token: Optional[str] = None,
- order: str = "asc",
+ order: ListOrder = ListOrder.ASC,
limit: int = 10,
) -> PagedResult[OriginVisit]:
next_page_token = None
page_token = page_token or "0"
- order = order.lower()
- allowed_orders = ["asc", "desc"]
- if order not in allowed_orders:
- raise StorageArgumentException(
- f"order must be one of {', '.join(allowed_orders)}."
- )
+ if not isinstance(order, ListOrder):
+ raise StorageArgumentException("order must be a ListOrder value")
if not isinstance(page_token, str):
raise StorageArgumentException("page_token must be a string.")
visit_from = int(page_token)
origin_url = self._get_origin_url(origin)
extra_limit = limit + 1
visits = sorted(
self._origin_visits.get(origin_url, []),
key=lambda v: v.visit,
- reverse=(order == "desc"),
+ reverse=(order == ListOrder.DESC),
)
- if visit_from > 0 and order == "asc":
+ if visit_from > 0 and order == ListOrder.ASC:
visits = [v for v in visits if v.visit > visit_from]
- elif visit_from > 0 and order == "desc":
+ elif visit_from > 0 and order == ListOrder.DESC:
visits = [v for v in visits if v.visit < visit_from]
visits = visits[:extra_limit]
assert len(visits) <= extra_limit
if len(visits) == extra_limit:
last_visit = visits[limit]
visits = visits[:limit]
assert last_visit is not None and last_visit.visit is not None
- if order == "asc":
+ if order == ListOrder.ASC:
next_page_token = str(last_visit.visit - 1)
else:
next_page_token = str(last_visit.visit + 1)
return PagedResult(results=visits, next_page_token=next_page_token)
def origin_visit_find_by_date(
self, origin: str, visit_date: datetime.datetime
) -> Optional[OriginVisit]:
origin_url = self._get_origin_url(origin)
if origin_url in self._origin_visits:
visits = self._origin_visits[origin_url]
visit = min(visits, key=lambda v: (abs(v.date - visit_date), -v.visit))
return visit
return None
def origin_visit_get_by(self, origin: str, visit: int) -> Optional[OriginVisit]:
origin_url = self._get_origin_url(origin)
if origin_url in self._origin_visits and visit <= len(
self._origin_visits[origin_url]
):
found_visit, _ = self._origin_visit_status_get_latest(origin, visit)
return found_visit
return None
def origin_visit_get_latest(
self,
origin: str,
type: Optional[str] = None,
allowed_statuses: Optional[List[str]] = None,
require_snapshot: bool = False,
) -> Optional[OriginVisit]:
ori = self._origins.get(origin)
if not ori:
return None
visits = sorted(
self._origin_visits[ori.url], key=lambda v: (v.date, v.visit), reverse=True,
)
for visit in visits:
if type is not None and visit.type != type:
continue
visit_statuses = self._origin_visit_statuses[origin, visit.visit]
if allowed_statuses is not None:
visit_statuses = [
vs for vs in visit_statuses if vs.status in allowed_statuses
]
if require_snapshot:
visit_statuses = [vs for vs in visit_statuses if vs.snapshot]
if visit_statuses: # we found visit statuses matching criteria
visit_status = max(visit_statuses, key=lambda vs: (vs.date, vs.visit))
assert visit.origin == visit_status.origin
assert visit.visit == visit_status.visit
return visit
return None
def origin_visit_status_get_latest(
self,
origin_url: str,
visit: int,
allowed_statuses: Optional[List[str]] = None,
require_snapshot: bool = False,
) -> Optional[OriginVisitStatus]:
ori = self._origins.get(origin_url)
if not ori:
return None
visit_key = (origin_url, visit)
visits = self._origin_visit_statuses.get(visit_key)
if not visits:
return None
if allowed_statuses is not None:
visits = [visit for visit in visits if visit.status in allowed_statuses]
if require_snapshot:
visits = [visit for visit in visits if visit.snapshot]
visit_status = max(visits, key=lambda v: (v.date, v.visit), default=None)
return visit_status
def _select_random_origin_visit_by_type(self, type: str) -> str:
while True:
url = random.choice(list(self._origin_visits.keys()))
random_origin_visits = self._origin_visits[url]
if random_origin_visits[0].type == type:
return url
def origin_visit_status_get_random(
self, type: str
) -> Optional[Tuple[OriginVisit, OriginVisitStatus]]:
url = self._select_random_origin_visit_by_type(type)
random_origin_visits = copy.deepcopy(self._origin_visits[url])
random_origin_visits.reverse()
back_in_the_day = now() - timedelta(weeks=12) # 3 months back
# This should be enough for tests
for visit in random_origin_visits:
origin_visit, latest_visit_status = self._origin_visit_status_get_latest(
url, visit.visit
)
assert latest_visit_status is not None
if (
origin_visit.date > back_in_the_day
and latest_visit_status.status == "full"
):
return origin_visit, latest_visit_status
else:
return None
def stat_counters(self):
keys = (
"content",
"directory",
"origin",
"origin_visit",
"person",
"release",
"revision",
"skipped_content",
"snapshot",
)
stats = {key: 0 for key in keys}
stats.update(
collections.Counter(
obj_type
for (obj_type, obj_id) in itertools.chain(*self._objects.values())
)
)
return stats
def refresh_stat_counters(self):
pass
def raw_extrinsic_metadata_add(
self, metadata: Iterable[RawExtrinsicMetadata],
) -> None:
metadata = list(metadata)
self.journal_writer.raw_extrinsic_metadata_add(metadata)
for metadata_entry in metadata:
authority_key = self._metadata_authority_key(metadata_entry.authority)
if authority_key not in self._metadata_authorities:
raise StorageArgumentException(
f"Unknown authority {metadata_entry.authority}"
)
fetcher_key = self._metadata_fetcher_key(metadata_entry.fetcher)
if fetcher_key not in self._metadata_fetchers:
raise StorageArgumentException(
f"Unknown fetcher {metadata_entry.fetcher}"
)
raw_extrinsic_metadata_list = self._raw_extrinsic_metadata[
metadata_entry.type
][metadata_entry.id][authority_key]
for existing_raw_extrinsic_metadata in raw_extrinsic_metadata_list:
if (
self._metadata_fetcher_key(existing_raw_extrinsic_metadata.fetcher)
== fetcher_key
and existing_raw_extrinsic_metadata.discovery_date
== metadata_entry.discovery_date
):
# Duplicate of an existing one; ignore it.
break
else:
raw_extrinsic_metadata_list.add(metadata_entry)
def raw_extrinsic_metadata_get(
self,
object_type: MetadataTargetType,
id: Union[str, SWHID],
authority: MetadataAuthority,
after: Optional[datetime.datetime] = None,
page_token: Optional[bytes] = None,
limit: int = 1000,
) -> Dict[str, Union[Optional[bytes], List[RawExtrinsicMetadata]]]:
authority_key = self._metadata_authority_key(authority)
if object_type == MetadataTargetType.ORIGIN:
if isinstance(id, SWHID):
raise StorageArgumentException(
f"raw_extrinsic_metadata_get called with object_type='origin', "
f"but provided id is an SWHID: {id!r}"
)
else:
if not isinstance(id, SWHID):
raise StorageArgumentException(
f"raw_extrinsic_metadata_get called with object_type!='origin', "
f"but provided id is not an SWHID: {id!r}"
)
if page_token is not None:
(after_time, after_fetcher) = msgpack_loads(page_token)
after_fetcher = tuple(after_fetcher)
if after is not None and after > after_time:
raise StorageArgumentException(
"page_token is inconsistent with the value of 'after'."
)
entries = self._raw_extrinsic_metadata[object_type][id][
authority_key
].iter_after((after_time, after_fetcher))
elif after is not None:
entries = self._raw_extrinsic_metadata[object_type][id][
authority_key
].iter_from((after,))
entries = (entry for entry in entries if entry.discovery_date > after)
else:
entries = iter(self._raw_extrinsic_metadata[object_type][id][authority_key])
if limit:
entries = itertools.islice(entries, 0, limit + 1)
results = []
for entry in entries:
entry_authority = self._metadata_authorities[
self._metadata_authority_key(entry.authority)
]
entry_fetcher = self._metadata_fetchers[
self._metadata_fetcher_key(entry.fetcher)
]
if after:
assert entry.discovery_date > after
results.append(
attr.evolve(
entry,
authority=attr.evolve(entry_authority, metadata=None),
fetcher=attr.evolve(entry_fetcher, metadata=None),
)
)
if len(results) > limit:
results.pop()
assert len(results) == limit
last_result = results[-1]
next_page_token: Optional[bytes] = msgpack_dumps(
(
last_result.discovery_date,
self._metadata_fetcher_key(last_result.fetcher),
)
)
else:
next_page_token = None
return {
"next_page_token": next_page_token,
"results": results,
}
def metadata_fetcher_add(self, fetchers: Iterable[MetadataFetcher]) -> None:
fetchers = list(fetchers)
self.journal_writer.metadata_fetcher_add(fetchers)
for fetcher in fetchers:
if fetcher.metadata is None:
raise StorageArgumentException(
"MetadataFetcher.metadata may not be None in metadata_fetcher_add."
)
key = self._metadata_fetcher_key(fetcher)
if key not in self._metadata_fetchers:
self._metadata_fetchers[key] = fetcher
def metadata_fetcher_get(
self, name: str, version: str
) -> Optional[MetadataFetcher]:
return self._metadata_fetchers.get(
self._metadata_fetcher_key(MetadataFetcher(name=name, version=version))
)
def metadata_authority_add(self, authorities: Iterable[MetadataAuthority]) -> None:
authorities = list(authorities)
self.journal_writer.metadata_authority_add(authorities)
for authority in authorities:
if authority.metadata is None:
raise StorageArgumentException(
"MetadataAuthority.metadata may not be None in "
"metadata_authority_add."
)
key = self._metadata_authority_key(authority)
self._metadata_authorities[key] = authority
def metadata_authority_get(
self, type: MetadataAuthorityType, url: str
) -> Optional[MetadataAuthority]:
return self._metadata_authorities.get(
self._metadata_authority_key(MetadataAuthority(type=type, url=url))
)
def _get_origin_url(self, origin):
if isinstance(origin, str):
return origin
else:
raise TypeError("origin must be a string.")
def _person_add(self, person):
key = ("person", person.fullname)
if key not in self._objects:
self._persons[person.fullname] = person
self._objects[key].append(key)
return self._persons[person.fullname]
@staticmethod
def _content_key(content):
""" A stable key and the algorithm for a content"""
if isinstance(content, BaseContent):
content = content.to_dict()
return tuple((key, content.get(key)) for key in sorted(DEFAULT_ALGORITHMS))
@staticmethod
def _metadata_fetcher_key(fetcher: MetadataFetcher) -> FetcherKey:
return (fetcher.name, fetcher.version)
@staticmethod
def _metadata_authority_key(authority: MetadataAuthority) -> Hashable:
return (authority.type, authority.url)
def diff_directories(self, from_dir, to_dir, track_renaming=False):
raise NotImplementedError("InMemoryStorage.diff_directories")
def diff_revisions(self, from_rev, to_rev, track_renaming=False):
raise NotImplementedError("InMemoryStorage.diff_revisions")
def diff_revision(self, revision, track_renaming=False):
raise NotImplementedError("InMemoryStorage.diff_revision")
def clear_buffers(self, object_types: Optional[Iterable[str]] = None) -> None:
"""Do nothing
"""
return None
def flush(self, object_types: Optional[Iterable[str]] = None) -> Dict:
return {}
diff --git a/swh/storage/interface.py b/swh/storage/interface.py
index c4c4dfd6..55335390 100644
--- a/swh/storage/interface.py
+++ b/swh/storage/interface.py
@@ -1,1265 +1,1274 @@
# Copyright (C) 2015-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import datetime
+from enum import Enum
from typing import Dict, Iterable, List, Optional, Tuple, TypeVar, Union
+
from swh.core.api import remote_api_endpoint
from swh.core.api.classes import PagedResult as CorePagedResult
from swh.model.identifiers import SWHID
from swh.model.model import (
Content,
Directory,
Origin,
OriginVisit,
OriginVisitStatus,
Revision,
Release,
Snapshot,
SkippedContent,
MetadataAuthority,
MetadataAuthorityType,
MetadataFetcher,
MetadataTargetType,
RawExtrinsicMetadata,
)
-def deprecated(f):
- f.deprecated_endpoint = True
- return f
+class ListOrder(Enum):
+ """Specifies the order for paginated endpoints returning sorted results."""
+
+ ASC = "asc"
+ DESC = "desc"
TResult = TypeVar("TResult")
PagedResult = CorePagedResult[TResult, str]
+def deprecated(f):
+ f.deprecated_endpoint = True
+ return f
+
+
class StorageInterface:
@remote_api_endpoint("check_config")
def check_config(self, *, check_write):
"""Check that the storage is configured and ready to go."""
...
@remote_api_endpoint("content/add")
def content_add(self, content: Iterable[Content]) -> Dict:
"""Add content blobs to the storage
Args:
contents (iterable): iterable of dictionaries representing
individual pieces of content to add. Each dictionary has the
following keys:
- data (bytes): the actual content
- length (int): content length
- one key for each checksum algorithm in
:data:`swh.model.hashutil.ALGORITHMS`, mapped to the
corresponding checksum
- status (str): one of visible, hidden
Raises:
The following exceptions can occur:
- HashCollision in case of collision
- Any other exceptions raise by the db
In case of errors, some of the content may have been stored in
the DB and in the objstorage.
Since additions to both idempotent, that should not be a problem.
Returns:
Summary dict with the following keys and associated values:
content:add: New contents added
content:add:bytes: Sum of the contents' length data
"""
...
@remote_api_endpoint("content/update")
def content_update(self, content, keys=[]):
"""Update content blobs to the storage. Does nothing for unknown
contents or skipped ones.
Args:
content (iterable): iterable of dictionaries representing
individual pieces of content to update. Each dictionary has the
following keys:
- data (bytes): the actual content
- length (int): content length (default: -1)
- one key for each checksum algorithm in
:data:`swh.model.hashutil.ALGORITHMS`, mapped to the
corresponding checksum
- status (str): one of visible, hidden, absent
keys (list): List of keys (str) whose values needs an update, e.g.,
new hash column
"""
...
@remote_api_endpoint("content/add_metadata")
def content_add_metadata(self, content: Iterable[Content]) -> Dict:
"""Add content metadata to the storage (like `content_add`, but
without inserting to the objstorage).
Args:
content (iterable): iterable of dictionaries representing
individual pieces of content to add. Each dictionary has the
following keys:
- length (int): content length (default: -1)
- one key for each checksum algorithm in
:data:`swh.model.hashutil.ALGORITHMS`, mapped to the
corresponding checksum
- status (str): one of visible, hidden, absent
- reason (str): if status = absent, the reason why
- origin (int): if status = absent, the origin we saw the
content in
- ctime (datetime): time of insertion in the archive
Returns:
Summary dict with the following key and associated values:
content:add: New contents added
skipped_content:add: New skipped contents (no data) added
"""
...
@remote_api_endpoint("content/data")
def content_get(self, content):
"""Retrieve in bulk contents and their data.
This generator yields exactly as many items than provided sha1
identifiers, but callers should not assume this will always be true.
It may also yield `None` values in case an object was not found.
Args:
content: iterables of sha1
Yields:
Dict[str, bytes]: Generates streams of contents as dict with their
raw data:
- sha1 (bytes): content id
- data (bytes): content's raw data
Raises:
ValueError in case of too much contents are required.
cf. BULK_BLOCK_CONTENT_LEN_MAX
"""
...
@deprecated
@remote_api_endpoint("content/range")
def content_get_range(self, start, end, limit=1000):
"""Retrieve contents within range [start, end] bound by limit.
Note that this function may return more than one blob per hash. The
limit is enforced with multiplicity (ie. two blobs with the same hash
will count twice toward the limit).
Args:
**start** (bytes): Starting identifier range (expected smaller
than end)
**end** (bytes): Ending identifier range (expected larger
than start)
**limit** (int): Limit result (default to 1000)
Returns:
a dict with keys:
- contents [dict]: iterable of contents in between the range.
- next (bytes): There remains content in the range
starting from this next sha1
"""
...
@remote_api_endpoint("content/partition")
def content_get_partition(
self,
partition_id: int,
nb_partitions: int,
limit: int = 1000,
page_token: str = None,
):
"""Splits contents into nb_partitions, and returns one of these based on
partition_id (which must be in [0, nb_partitions-1])
There is no guarantee on how the partitioning is done, or the
result order.
Args:
partition_id (int): index of the partition to fetch
nb_partitions (int): total number of partitions to split into
limit (int): Limit result (default to 1000)
page_token (Optional[str]): opaque token used for pagination.
Returns:
a dict with keys:
- contents (List[dict]): iterable of contents in the partition.
- **next_page_token** (Optional[str]): opaque token to be used as
`page_token` for retrieving the next page. if absent, there is
no more pages to gather.
"""
...
@remote_api_endpoint("content/metadata")
def content_get_metadata(self, contents: List[bytes]) -> Dict[bytes, List[Dict]]:
"""Retrieve content metadata in bulk
Args:
content: iterable of content identifiers (sha1)
Returns:
a dict with keys the content's sha1 and the associated value
either the existing content's metadata or None if the content does
not exist.
"""
...
@remote_api_endpoint("content/missing")
def content_missing(self, content, key_hash="sha1"):
"""List content missing from storage
Args:
content ([dict]): iterable of dictionaries whose keys are
either 'length' or an item of
:data:`swh.model.hashutil.ALGORITHMS`;
mapped to the corresponding checksum
(or length).
key_hash (str): name of the column to use as hash id
result (default: 'sha1')
Returns:
iterable ([bytes]): missing content ids (as per the
key_hash column)
Raises:
TODO: an exception when we get a hash collision.
"""
...
@remote_api_endpoint("content/missing/sha1")
def content_missing_per_sha1(self, contents):
"""List content missing from storage based only on sha1.
Args:
contents: Iterable of sha1 to check for absence.
Returns:
iterable: missing ids
Raises:
TODO: an exception when we get a hash collision.
"""
...
@remote_api_endpoint("content/missing/sha1_git")
def content_missing_per_sha1_git(self, contents):
"""List content missing from storage based only on sha1_git.
Args:
contents (Iterable): An iterable of content id (sha1_git)
Yields:
missing contents sha1_git
"""
...
@remote_api_endpoint("content/present")
def content_find(self, content):
"""Find a content hash in db.
Args:
content: a dictionary representing one content hash, mapping
checksum algorithm names (see swh.model.hashutil.ALGORITHMS) to
checksum values
Returns:
a triplet (sha1, sha1_git, sha256) if the content exist
or None otherwise.
Raises:
ValueError: in case the key of the dictionary is not sha1, sha1_git
nor sha256.
"""
...
@remote_api_endpoint("content/get_random")
def content_get_random(self):
"""Finds a random content id.
Returns:
a sha1_git
"""
...
@remote_api_endpoint("content/skipped/add")
def skipped_content_add(self, content: Iterable[SkippedContent]) -> Dict:
"""Add contents to the skipped_content list, which contains
(partial) information about content missing from the archive.
Args:
contents (iterable): iterable of dictionaries representing
individual pieces of content to add. Each dictionary has the
following keys:
- length (Optional[int]): content length (default: -1)
- one key for each checksum algorithm in
:data:`swh.model.hashutil.ALGORITHMS`, mapped to the
corresponding checksum; each is optional
- status (str): must be "absent"
- reason (str): the reason why the content is absent
- origin (int): if status = absent, the origin we saw the
content in
Raises:
The following exceptions can occur:
- HashCollision in case of collision
- Any other exceptions raise by the backend
In case of errors, some content may have been stored in
the DB and in the objstorage.
Since additions to both idempotent, that should not be a problem.
Returns:
Summary dict with the following key and associated values:
skipped_content:add: New skipped contents (no data) added
"""
...
@remote_api_endpoint("content/skipped/missing")
def skipped_content_missing(self, contents):
"""List skipped_content missing from storage
Args:
content: iterable of dictionaries containing the data for each
checksum algorithm.
Returns:
iterable: missing signatures
"""
...
@remote_api_endpoint("directory/add")
def directory_add(self, directories: Iterable[Directory]) -> Dict:
"""Add directories to the storage
Args:
directories (iterable): iterable of dictionaries representing the
individual directories to add. Each dict has the following
keys:
- id (sha1_git): the id of the directory to add
- entries (list): list of dicts for each entry in the
directory. Each dict has the following keys:
- name (bytes)
- type (one of 'file', 'dir', 'rev'): type of the
directory entry (file, directory, revision)
- target (sha1_git): id of the object pointed at by the
directory entry
- perms (int): entry permissions
Returns:
Summary dict of keys with associated count as values:
directory:add: Number of directories actually added
"""
...
@remote_api_endpoint("directory/missing")
def directory_missing(self, directories):
"""List directories missing from storage
Args:
directories (iterable): an iterable of directory ids
Yields:
missing directory ids
"""
...
@remote_api_endpoint("directory/ls")
def directory_ls(self, directory, recursive=False):
"""Get entries for one directory.
Args:
- directory: the directory to list entries from.
- recursive: if flag on, this list recursively from this directory.
Returns:
List of entries for such directory.
If `recursive=True`, names in the path of a dir/file not at the
root are concatenated with a slash (`/`).
"""
...
@remote_api_endpoint("directory/path")
def directory_entry_get_by_path(self, directory, paths):
"""Get the directory entry (either file or dir) from directory with path.
Args:
- directory: sha1 of the top level directory
- paths: path to lookup from the top level directory. From left
(top) to right (bottom).
Returns:
The corresponding directory entry if found, None otherwise.
"""
...
@remote_api_endpoint("directory/get_random")
def directory_get_random(self):
"""Finds a random directory id.
Returns:
a sha1_git
"""
...
@remote_api_endpoint("revision/add")
def revision_add(self, revisions: Iterable[Revision]) -> Dict:
"""Add revisions to the storage
Args:
revisions (Iterable[dict]): iterable of dictionaries representing
the individual revisions to add. Each dict has the following
keys:
- **id** (:class:`sha1_git`): id of the revision to add
- **date** (:class:`dict`): date the revision was written
- **committer_date** (:class:`dict`): date the revision got
added to the origin
- **type** (one of 'git', 'tar'): type of the
revision added
- **directory** (:class:`sha1_git`): the directory the
revision points at
- **message** (:class:`bytes`): the message associated with
the revision
- **author** (:class:`Dict[str, bytes]`): dictionary with
keys: name, fullname, email
- **committer** (:class:`Dict[str, bytes]`): dictionary with
keys: name, fullname, email
- **metadata** (:class:`jsonb`): extra information as
dictionary
- **synthetic** (:class:`bool`): revision's nature (tarball,
directory creates synthetic revision`)
- **parents** (:class:`list[sha1_git]`): the parents of
this revision
date dictionaries have the form defined in :mod:`swh.model`.
Returns:
Summary dict of keys with associated count as values
revision:add: New objects actually stored in db
"""
...
@remote_api_endpoint("revision/missing")
def revision_missing(self, revisions):
"""List revisions missing from storage
Args:
revisions (iterable): revision ids
Yields:
missing revision ids
"""
...
@remote_api_endpoint("revision")
def revision_get(self, revisions):
"""Get all revisions from storage
Args:
revisions: an iterable of revision ids
Returns:
iterable: an iterable of revisions as dictionaries (or None if the
revision doesn't exist)
"""
...
@remote_api_endpoint("revision/log")
def revision_log(self, revisions, limit=None):
"""Fetch revision entry from the given root revisions.
Args:
revisions: array of root revision to lookup
limit: limitation on the output result. Default to None.
Yields:
List of revision log from such revisions root.
"""
...
@remote_api_endpoint("revision/shortlog")
def revision_shortlog(self, revisions, limit=None):
"""Fetch the shortlog for the given revisions
Args:
revisions: list of root revisions to lookup
limit: depth limitation for the output
Yields:
a list of (id, parents) tuples.
"""
...
@remote_api_endpoint("revision/get_random")
def revision_get_random(self):
"""Finds a random revision id.
Returns:
a sha1_git
"""
...
@remote_api_endpoint("release/add")
def release_add(self, releases: Iterable[Release]) -> Dict:
"""Add releases to the storage
Args:
releases (Iterable[dict]): iterable of dictionaries representing
the individual releases to add. Each dict has the following
keys:
- **id** (:class:`sha1_git`): id of the release to add
- **revision** (:class:`sha1_git`): id of the revision the
release points to
- **date** (:class:`dict`): the date the release was made
- **name** (:class:`bytes`): the name of the release
- **comment** (:class:`bytes`): the comment associated with
the release
- **author** (:class:`Dict[str, bytes]`): dictionary with
keys: name, fullname, email
the date dictionary has the form defined in :mod:`swh.model`.
Returns:
Summary dict of keys with associated count as values
release:add: New objects contents actually stored in db
"""
...
@remote_api_endpoint("release/missing")
def release_missing(self, releases):
"""List releases missing from storage
Args:
releases: an iterable of release ids
Returns:
a list of missing release ids
"""
...
@remote_api_endpoint("release")
def release_get(self, releases):
"""Given a list of sha1, return the releases's information
Args:
releases: list of sha1s
Yields:
dicts with the same keys as those given to `release_add`
(or ``None`` if a release does not exist)
"""
...
@remote_api_endpoint("release/get_random")
def release_get_random(self):
"""Finds a random release id.
Returns:
a sha1_git
"""
...
@remote_api_endpoint("snapshot/add")
def snapshot_add(self, snapshots: Iterable[Snapshot]) -> Dict:
"""Add snapshots to the storage.
Args:
snapshot ([dict]): the snapshots to add, containing the
following keys:
- **id** (:class:`bytes`): id of the snapshot
- **branches** (:class:`dict`): branches the snapshot contains,
mapping the branch name (:class:`bytes`) to the branch target,
itself a :class:`dict` (or ``None`` if the branch points to an
unknown object)
- **target_type** (:class:`str`): one of ``content``,
``directory``, ``revision``, ``release``,
``snapshot``, ``alias``
- **target** (:class:`bytes`): identifier of the target
(currently a ``sha1_git`` for all object kinds, or the name
of the target branch for aliases)
Raises:
ValueError: if the origin or visit id does not exist.
Returns:
Summary dict of keys with associated count as values
snapshot:add: Count of object actually stored in db
"""
...
@remote_api_endpoint("snapshot/missing")
def snapshot_missing(self, snapshots):
"""List snapshots missing from storage
Args:
snapshots (iterable): an iterable of snapshot ids
Yields:
missing snapshot ids
"""
...
@remote_api_endpoint("snapshot")
def snapshot_get(self, snapshot_id):
"""Get the content, possibly partial, of a snapshot with the given id
The branches of the snapshot are iterated in the lexicographical
order of their names.
.. warning:: At most 1000 branches contained in the snapshot will be
returned for performance reasons. In order to browse the whole
set of branches, the method :meth:`snapshot_get_branches`
should be used instead.
Args:
snapshot_id (bytes): identifier of the snapshot
Returns:
dict: a dict with three keys:
* **id**: identifier of the snapshot
* **branches**: a dict of branches contained in the snapshot
whose keys are the branches' names.
* **next_branch**: the name of the first branch not returned
or :const:`None` if the snapshot has less than 1000
branches.
"""
...
@remote_api_endpoint("snapshot/by_origin_visit")
def snapshot_get_by_origin_visit(self, origin, visit):
"""Get the content, possibly partial, of a snapshot for the given origin visit
The branches of the snapshot are iterated in the lexicographical
order of their names.
.. warning:: At most 1000 branches contained in the snapshot will be
returned for performance reasons. In order to browse the whole
set of branches, the method :meth:`snapshot_get_branches`
should be used instead.
Args:
origin (int): the origin identifier
visit (int): the visit identifier
Returns:
dict: None if the snapshot does not exist;
a dict with three keys otherwise:
* **id**: identifier of the snapshot
* **branches**: a dict of branches contained in the snapshot
whose keys are the branches' names.
* **next_branch**: the name of the first branch not returned
or :const:`None` if the snapshot has less than 1000
branches.
"""
...
@remote_api_endpoint("snapshot/count_branches")
def snapshot_count_branches(self, snapshot_id):
"""Count the number of branches in the snapshot with the given id
Args:
snapshot_id (bytes): identifier of the snapshot
Returns:
dict: A dict whose keys are the target types of branches and
values their corresponding amount
"""
...
@remote_api_endpoint("snapshot/get_branches")
def snapshot_get_branches(
self, snapshot_id, branches_from=b"", branches_count=1000, target_types=None
):
"""Get the content, possibly partial, of a snapshot with the given id
The branches of the snapshot are iterated in the lexicographical
order of their names.
Args:
snapshot_id (bytes): identifier of the snapshot
branches_from (bytes): optional parameter used to skip branches
whose name is lesser than it before returning them
branches_count (int): optional parameter used to restrain
the amount of returned branches
target_types (list): optional parameter used to filter the
target types of branch to return (possible values that can be
contained in that list are `'content', 'directory',
'revision', 'release', 'snapshot', 'alias'`)
Returns:
dict: None if the snapshot does not exist;
a dict with three keys otherwise:
* **id**: identifier of the snapshot
* **branches**: a dict of branches contained in the snapshot
whose keys are the branches' names.
* **next_branch**: the name of the first branch not returned
or :const:`None` if the snapshot has less than
`branches_count` branches after `branches_from` included.
"""
...
@remote_api_endpoint("snapshot/get_random")
def snapshot_get_random(self):
"""Finds a random snapshot id.
Returns:
a sha1_git
"""
...
@remote_api_endpoint("origin/visit/add")
def origin_visit_add(self, visits: Iterable[OriginVisit]) -> Iterable[OriginVisit]:
"""Add visits to storage. If the visits have no id, they will be created and assigned
one. The resulted visits are visits with their visit id set.
Args:
visits: Iterable of OriginVisit objects to add
Raises:
StorageArgumentException if some origin visit reference unknown origins
Returns:
Iterable[OriginVisit] stored
"""
...
@remote_api_endpoint("origin/visit_status/add")
def origin_visit_status_add(
self, visit_statuses: Iterable[OriginVisitStatus],
) -> None:
"""Add origin visit statuses.
If there is already a status for the same origin and visit id at the same
date, the new one will be either dropped or will replace the existing one
(it is unspecified which one of these two behaviors happens).
Args:
visit_statuses: origin visit statuses to add
Raises: StorageArgumentException if the origin of the visit status is unknown
"""
...
@remote_api_endpoint("origin/visit/get")
def origin_visit_get(
self,
origin: str,
page_token: Optional[str] = None,
- order: str = "asc",
+ order: ListOrder = ListOrder.ASC,
limit: int = 10,
) -> PagedResult[OriginVisit]:
"""Retrieve page of OriginVisit information.
Args:
origin: The visited origin
page_token: opaque string used to get the next results of a search
order: Order on visit id fields to list origin visits (default to asc)
limit: Number of visits to return
Raises:
StorageArgumentException if the order is wrong or the page_token type is
mistyped.
Returns: Page of OriginVisit data model objects. if next_page_token is None,
there is no longer data to retrieve.
"""
...
@remote_api_endpoint("origin/visit/find_by_date")
def origin_visit_find_by_date(
self, origin: str, visit_date: datetime.datetime
) -> Optional[OriginVisit]:
"""Retrieves the origin visit whose date is closest to the provided
timestamp.
In case of a tie, the visit with largest id is selected.
Args:
origin: origin (URL)
visit_date: expected visit date
Returns:
A visit if found, None otherwise
"""
...
@remote_api_endpoint("origin/visit/getby")
def origin_visit_get_by(self, origin: str, visit: int) -> Optional[OriginVisit]:
"""Retrieve origin visit's information.
Args:
origin: origin (URL)
visit: visit id
Returns:
The information on that particular OriginVisit or None if
it does not exist
"""
...
@remote_api_endpoint("origin/visit/get_latest")
def origin_visit_get_latest(
self,
origin: str,
type: Optional[str] = None,
allowed_statuses: Optional[List[str]] = None,
require_snapshot: bool = False,
) -> Optional[OriginVisit]:
"""Get the latest origin visit for the given origin, optionally
looking only for those with one of the given allowed_statuses
or for those with a snapshot.
Args:
origin: origin URL
type: Optional visit type to filter on (e.g git, tar, dsc, svn,
hg, npm, pypi, ...)
allowed_statuses: list of visit statuses considered
to find the latest visit. For instance,
``allowed_statuses=['full']`` will only consider visits that
have successfully run to completion.
require_snapshot: If True, only a visit with a snapshot
will be returned.
Returns:
OriginVisit matching the criteria if found, None otherwise. Note that as
OriginVisit no longer held reference on the visit status or snapshot, you
may want to use origin_visit_status_get_latest for those information.
"""
...
@remote_api_endpoint("origin/visit_status/get_latest")
def origin_visit_status_get_latest(
self,
origin_url: str,
visit: int,
allowed_statuses: Optional[List[str]] = None,
require_snapshot: bool = False,
) -> Optional[OriginVisitStatus]:
"""Get the latest origin visit status for the given origin visit, optionally
looking only for those with one of the given allowed_statuses or with a
snapshot.
Args:
origin: origin URL
allowed_statuses: list of visit statuses considered to find the latest
visit. Possible values are {created, ongoing, partial, full}. For
instance, ``allowed_statuses=['full']`` will only consider visits that
have successfully run to completion.
require_snapshot: If True, only a visit with a snapshot
will be returned.
Returns:
The OriginVisitStatus matching the criteria
"""
...
@remote_api_endpoint("origin/visit_status/get_random")
def origin_visit_status_get_random(
self, type: str
) -> Optional[Tuple[OriginVisit, OriginVisitStatus]]:
"""Randomly select one successful origin visit with
made in the last 3 months.
Returns:
One random tuple of (OriginVisit, OriginVisitStatus) matching the
selection criteria
"""
...
@remote_api_endpoint("object/find_by_sha1_git")
def object_find_by_sha1_git(self, ids):
"""Return the objects found with the given ids.
Args:
ids: a generator of sha1_gits
Returns:
dict: a mapping from id to the list of objects found. Each object
found is itself a dict with keys:
- sha1_git: the input id
- type: the type of object found
"""
...
@remote_api_endpoint("origin/get")
def origin_get(self, origins: Iterable[str]) -> Iterable[Optional[Origin]]:
"""Return origins.
Args:
origin: a list of urls to find
Returns:
the list of associated existing origin model objects. The unknown origins
will be returned as None at the same index as the input.
"""
...
@remote_api_endpoint("origin/get_sha1")
def origin_get_by_sha1(self, sha1s):
"""Return origins, identified by the sha1 of their URLs.
Args:
sha1s (list[bytes]): a list of sha1s
Yields:
dicts containing origin information as returned
by :meth:`swh.storage.storage.Storage.origin_get`, or None if an
origin matching the sha1 is not found.
"""
...
@deprecated
@remote_api_endpoint("origin/get_range")
def origin_get_range(self, origin_from=1, origin_count=100):
"""Retrieve ``origin_count`` origins whose ids are greater
or equal than ``origin_from``.
Origins are sorted by id before retrieving them.
Args:
origin_from (int): the minimum id of origins to retrieve
origin_count (int): the maximum number of origins to retrieve
Yields:
dicts containing origin information as returned
by :meth:`swh.storage.storage.Storage.origin_get`.
"""
...
@remote_api_endpoint("origin/list")
def origin_list(self, page_token: Optional[str] = None, limit: int = 100) -> dict:
"""Returns the list of origins
Args:
page_token: opaque token used for pagination.
limit: the maximum number of results to return
Returns:
dict: dict with the following keys:
- **next_page_token** (str, optional): opaque token to be used as
`page_token` for retrieving the next page. if absent, there is
no more pages to gather.
- **origins** (List[dict]): list of origins, as returned by
`origin_get`.
"""
...
@remote_api_endpoint("origin/search")
def origin_search(
self, url_pattern, offset=0, limit=50, regexp=False, with_visit=False
):
"""Search for origins whose urls contain a provided string pattern
or match a provided regular expression.
The search is performed in a case insensitive way.
Args:
url_pattern (str): the string pattern to search for in origin urls
offset (int): number of found origins to skip before returning
results
limit (int): the maximum number of found origins to return
regexp (bool): if True, consider the provided pattern as a regular
expression and return origins whose urls match it
with_visit (bool): if True, filter out origins with no visit
Yields:
dicts containing origin information as returned
by :meth:`swh.storage.storage.Storage.origin_get`.
"""
...
@deprecated
@remote_api_endpoint("origin/count")
def origin_count(self, url_pattern, regexp=False, with_visit=False):
"""Count origins whose urls contain a provided string pattern
or match a provided regular expression.
The pattern search in origin urls is performed in a case insensitive
way.
Args:
url_pattern (str): the string pattern to search for in origin urls
regexp (bool): if True, consider the provided pattern as a regular
expression and return origins whose urls match it
with_visit (bool): if True, filter out origins with no visit
Returns:
int: The number of origins matching the search criterion.
"""
...
@remote_api_endpoint("origin/add_multi")
def origin_add(self, origins: Iterable[Origin]) -> Dict[str, int]:
"""Add origins to the storage
Args:
origins: list of dictionaries representing the individual origins,
with the following keys:
- type: the origin type ('git', 'svn', 'deb', ...)
- url (bytes): the url the origin points to
Returns:
Summary dict of keys with associated count as values
origin:add: Count of object actually stored in db
"""
...
def stat_counters(self):
"""compute statistics about the number of tuples in various tables
Returns:
dict: a dictionary mapping textual labels (e.g., content) to
integer values (e.g., the number of tuples in table content)
"""
...
def refresh_stat_counters(self):
"""Recomputes the statistics for `stat_counters`."""
...
@remote_api_endpoint("raw_extrinsic_metadata/add")
def raw_extrinsic_metadata_add(
self, metadata: Iterable[RawExtrinsicMetadata],
) -> None:
"""Add extrinsic metadata on objects (contents, directories, ...).
The authority and fetcher must be known to the storage before
using this endpoint.
If there is already metadata for the same object, authority,
fetcher, and at the same date; the new one will be either dropped or
will replace the existing one
(it is unspecified which one of these two behaviors happens).
Args:
metadata: iterable of RawExtrinsicMetadata objects to be inserted.
"""
...
@remote_api_endpoint("raw_extrinsic_metadata/get")
def raw_extrinsic_metadata_get(
self,
object_type: MetadataTargetType,
id: Union[str, SWHID],
authority: MetadataAuthority,
after: Optional[datetime.datetime] = None,
page_token: Optional[bytes] = None,
limit: int = 1000,
) -> Dict[str, Union[Optional[bytes], List[RawExtrinsicMetadata]]]:
"""Retrieve list of all raw_extrinsic_metadata entries for the id
Args:
object_type: one of the values of swh.model.model.MetadataTargetType
id: an URL if object_type is 'origin', else a core SWHID
authority: a dict containing keys `type` and `url`.
after: minimum discovery_date for a result to be returned
page_token: opaque token, used to get the next page of results
limit: maximum number of results to be returned
Returns:
dict with keys `next_page_token` and `results`.
`next_page_token` is an opaque token that is used to get the
next page of results, or `None` if there are no more results.
`results` is a list of RawExtrinsicMetadata objects:
"""
...
@remote_api_endpoint("metadata_fetcher/add")
def metadata_fetcher_add(self, fetchers: Iterable[MetadataFetcher],) -> None:
"""Add new metadata fetchers to the storage.
Their `name` and `version` together are unique identifiers of this
fetcher; and `metadata` is an arbitrary dict of JSONable data
with information about this fetcher, which must not be `None`
(but may be empty).
Args:
fetchers: iterable of MetadataFetcher to be inserted
"""
...
@remote_api_endpoint("metadata_fetcher/get")
def metadata_fetcher_get(
self, name: str, version: str
) -> Optional[MetadataFetcher]:
"""Retrieve information about a fetcher
Args:
name: the name of the fetcher
version: version of the fetcher
Returns:
a MetadataFetcher object (with a non-None metadata field) if it is known,
else None.
"""
...
@remote_api_endpoint("metadata_authority/add")
def metadata_authority_add(self, authorities: Iterable[MetadataAuthority]) -> None:
"""Add new metadata authorities to the storage.
Their `type` and `url` together are unique identifiers of this
authority; and `metadata` is an arbitrary dict of JSONable data
with information about this authority, which must not be `None`
(but may be empty).
Args:
authorities: iterable of MetadataAuthority to be inserted
"""
...
@remote_api_endpoint("metadata_authority/get")
def metadata_authority_get(
self, type: MetadataAuthorityType, url: str
) -> Optional[MetadataAuthority]:
"""Retrieve information about an authority
Args:
type: one of "deposit_client", "forge", or "registry"
url: unique URI identifying the authority
Returns:
a MetadataAuthority object (with a non-None metadata field) if it is known,
else None.
"""
...
@deprecated
@remote_api_endpoint("algos/diff_directories")
def diff_directories(self, from_dir, to_dir, track_renaming=False):
"""Compute the list of file changes introduced between two arbitrary
directories (insertion / deletion / modification / renaming of files).
Args:
from_dir (bytes): identifier of the directory to compare from
to_dir (bytes): identifier of the directory to compare to
track_renaming (bool): whether or not to track files renaming
Returns:
A list of dict describing the introduced file changes
(see :func:`swh.storage.algos.diff.diff_directories`
for more details).
"""
...
@deprecated
@remote_api_endpoint("algos/diff_revisions")
def diff_revisions(self, from_rev, to_rev, track_renaming=False):
"""Compute the list of file changes introduced between two arbitrary
revisions (insertion / deletion / modification / renaming of files).
Args:
from_rev (bytes): identifier of the revision to compare from
to_rev (bytes): identifier of the revision to compare to
track_renaming (bool): whether or not to track files renaming
Returns:
A list of dict describing the introduced file changes
(see :func:`swh.storage.algos.diff.diff_directories`
for more details).
"""
...
@deprecated
@remote_api_endpoint("algos/diff_revision")
def diff_revision(self, revision, track_renaming=False):
"""Compute the list of file changes introduced by a specific revision
(insertion / deletion / modification / renaming of files) by comparing
it against its first parent.
Args:
revision (bytes): identifier of the revision from which to
compute the list of files changes
track_renaming (bool): whether or not to track files renaming
Returns:
A list of dict describing the introduced file changes
(see :func:`swh.storage.algos.diff.diff_directories`
for more details).
"""
...
@remote_api_endpoint("clear/buffer")
def clear_buffers(self, object_types: Optional[Iterable[str]] = None) -> None:
"""For backend storages (pg, storage, in-memory), this is a noop operation. For proxy
storages (especially filter, buffer), this is an operation which cleans internal
state.
"""
@remote_api_endpoint("flush")
def flush(self, object_types: Optional[Iterable[str]] = None) -> Dict:
"""For backend storages (pg, storage, in-memory), this is expected to be a noop
operation. For proxy storages (especially buffer), this is expected to trigger
actual writes to the backend.
"""
...
diff --git a/swh/storage/storage.py b/swh/storage/storage.py
index 651615fe..abb7d1f0 100644
--- a/swh/storage/storage.py
+++ b/swh/storage/storage.py
@@ -1,1383 +1,1379 @@
# Copyright (C) 2015-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import contextlib
import datetime
import itertools
from collections import defaultdict
from contextlib import contextmanager
from typing import (
Any,
Counter,
Dict,
Iterable,
List,
Optional,
Tuple,
Union,
)
import attr
import psycopg2
import psycopg2.pool
import psycopg2.errors
from swh.core.api.serializers import msgpack_loads, msgpack_dumps
from swh.model.identifiers import parse_swhid, SWHID
from swh.model.model import (
Content,
Directory,
Origin,
OriginVisit,
OriginVisitStatus,
Revision,
Release,
SkippedContent,
Snapshot,
SHA1_SIZE,
MetadataAuthority,
MetadataAuthorityType,
MetadataFetcher,
MetadataTargetType,
RawExtrinsicMetadata,
)
from swh.model.hashutil import DEFAULT_ALGORITHMS, hash_to_bytes, hash_to_hex
-from swh.storage.interface import PagedResult
+from swh.storage.interface import ListOrder, PagedResult
from swh.storage.objstorage import ObjStorage
from swh.storage.utils import now
from . import converters
from .common import db_transaction_generator, db_transaction
from .db import Db
from .exc import StorageArgumentException, StorageDBError, HashCollision
from .algos import diff
from .metrics import timed, send_metric, process_metrics
from .utils import get_partition_bounds_bytes, extract_collision_hash, map_optional
from .writer import JournalWriter
# Max block size of contents to return
BULK_BLOCK_CONTENT_LEN_MAX = 10000
EMPTY_SNAPSHOT_ID = hash_to_bytes("1a8893e6a86f444e8be8e7bda6cb34fb1735a00e")
"""Identifier for the empty snapshot"""
VALIDATION_EXCEPTIONS = (
KeyError,
TypeError,
ValueError,
psycopg2.errors.CheckViolation,
psycopg2.errors.IntegrityError,
psycopg2.errors.InvalidTextRepresentation,
psycopg2.errors.NotNullViolation,
psycopg2.errors.NumericValueOutOfRange,
psycopg2.errors.UndefinedFunction, # (raised on wrong argument typs)
)
"""Exceptions raised by postgresql when validation of the arguments
failed."""
@contextlib.contextmanager
def convert_validation_exceptions():
"""Catches postgresql errors related to invalid arguments, and
re-raises a StorageArgumentException."""
try:
yield
except tuple(VALIDATION_EXCEPTIONS) as e:
raise StorageArgumentException(str(e))
class Storage:
"""SWH storage proxy, encompassing DB and object storage
"""
def __init__(
self, db, objstorage, min_pool_conns=1, max_pool_conns=10, journal_writer=None
):
"""
Args:
db_conn: either a libpq connection string, or a psycopg2 connection
obj_root: path to the root of the object storage
"""
try:
if isinstance(db, psycopg2.extensions.connection):
self._pool = None
self._db = Db(db)
else:
self._pool = psycopg2.pool.ThreadedConnectionPool(
min_pool_conns, max_pool_conns, db
)
self._db = None
except psycopg2.OperationalError as e:
raise StorageDBError(e)
self.journal_writer = JournalWriter(journal_writer)
self.objstorage = ObjStorage(objstorage)
def get_db(self):
if self._db:
return self._db
else:
return Db.from_pool(self._pool)
def put_db(self, db):
if db is not self._db:
db.put_conn()
@contextmanager
def db(self):
db = None
try:
db = self.get_db()
yield db
finally:
if db:
self.put_db(db)
@timed
@db_transaction()
def check_config(self, *, check_write, db=None, cur=None):
if not self.objstorage.check_config(check_write=check_write):
return False
# Check permissions on one of the tables
if check_write:
check = "INSERT"
else:
check = "SELECT"
cur.execute("select has_table_privilege(current_user, 'content', %s)", (check,))
return cur.fetchone()[0]
def _content_unique_key(self, hash, db):
"""Given a hash (tuple or dict), return a unique key from the
aggregation of keys.
"""
keys = db.content_hash_keys
if isinstance(hash, tuple):
return hash
return tuple([hash[k] for k in keys])
def _content_add_metadata(self, db, cur, content):
"""Add content to the postgresql database but not the object storage.
"""
# create temporary table for metadata injection
db.mktemp("content", cur)
db.copy_to(
(c.to_dict() for c in content), "tmp_content", db.content_add_keys, cur
)
# move metadata in place
try:
db.content_add_from_temp(cur)
except psycopg2.IntegrityError as e:
if e.diag.sqlstate == "23505" and e.diag.table_name == "content":
message_detail = e.diag.message_detail
if message_detail:
hash_name, hash_id = extract_collision_hash(message_detail)
collision_contents_hashes = [
c.hashes() for c in content if c.get_hash(hash_name) == hash_id
]
else:
constraint_to_hash_name = {
"content_pkey": "sha1",
"content_sha1_git_idx": "sha1_git",
"content_sha256_idx": "sha256",
}
hash_name = constraint_to_hash_name.get(e.diag.constraint_name)
hash_id = None
collision_contents_hashes = None
raise HashCollision(
hash_name, hash_id, collision_contents_hashes
) from None
else:
raise
@timed
@process_metrics
def content_add(self, content: Iterable[Content]) -> Dict:
ctime = now()
contents = [attr.evolve(c, ctime=ctime) for c in content]
objstorage_summary = self.objstorage.content_add(contents)
with self.db() as db:
with db.transaction() as cur:
missing = list(
self.content_missing(
map(Content.to_dict, contents),
key_hash="sha1_git",
db=db,
cur=cur,
)
)
contents = [c for c in contents if c.sha1_git in missing]
self.journal_writer.content_add(contents)
self._content_add_metadata(db, cur, contents)
return {
"content:add": len(contents),
"content:add:bytes": objstorage_summary["content:add:bytes"],
}
@timed
@db_transaction()
def content_update(self, content, keys=[], db=None, cur=None):
# TODO: Add a check on input keys. How to properly implement
# this? We don't know yet the new columns.
self.journal_writer.content_update(content)
db.mktemp("content", cur)
select_keys = list(set(db.content_get_metadata_keys).union(set(keys)))
with convert_validation_exceptions():
db.copy_to(content, "tmp_content", select_keys, cur)
db.content_update_from_temp(keys_to_update=keys, cur=cur)
@timed
@process_metrics
@db_transaction()
def content_add_metadata(
self, content: Iterable[Content], db=None, cur=None
) -> Dict:
contents = list(content)
missing = self.content_missing(
(c.to_dict() for c in contents), key_hash="sha1_git", db=db, cur=cur,
)
contents = [c for c in contents if c.sha1_git in missing]
self.journal_writer.content_add_metadata(contents)
self._content_add_metadata(db, cur, contents)
return {
"content:add": len(contents),
}
@timed
def content_get(self, content):
# FIXME: Make this method support slicing the `data`.
if len(content) > BULK_BLOCK_CONTENT_LEN_MAX:
raise StorageArgumentException(
"Send at maximum %s contents." % BULK_BLOCK_CONTENT_LEN_MAX
)
yield from self.objstorage.content_get(content)
@timed
@db_transaction()
def content_get_range(self, start, end, limit=1000, db=None, cur=None):
if limit is None:
raise StorageArgumentException("limit should not be None")
contents = []
next_content = None
for counter, content_row in enumerate(
db.content_get_range(start, end, limit + 1, cur)
):
content = dict(zip(db.content_get_metadata_keys, content_row))
if counter >= limit:
# take the last commit for the next page starting from this
next_content = content["sha1"]
break
contents.append(content)
return {
"contents": contents,
"next": next_content,
}
@timed
def content_get_partition(
self,
partition_id: int,
nb_partitions: int,
limit: int = 1000,
page_token: str = None,
):
if limit is None:
raise StorageArgumentException("limit should not be None")
(start, end) = get_partition_bounds_bytes(
partition_id, nb_partitions, SHA1_SIZE
)
if page_token:
start = hash_to_bytes(page_token)
if end is None:
end = b"\xff" * SHA1_SIZE
result = self.content_get_range(start, end, limit)
result2 = {
"contents": result["contents"],
"next_page_token": None,
}
if result["next"]:
result2["next_page_token"] = hash_to_hex(result["next"])
return result2
@timed
@db_transaction(statement_timeout=500)
def content_get_metadata(
self, contents: List[bytes], db=None, cur=None
) -> Dict[bytes, List[Dict]]:
result: Dict[bytes, List[Dict]] = {sha1: [] for sha1 in contents}
for row in db.content_get_metadata_from_sha1s(contents, cur):
content_meta = dict(zip(db.content_get_metadata_keys, row))
result[content_meta["sha1"]].append(content_meta)
return result
@timed
@db_transaction_generator()
def content_missing(self, content, key_hash="sha1", db=None, cur=None):
keys = db.content_hash_keys
if key_hash not in keys:
raise StorageArgumentException("key_hash should be one of %s" % keys)
key_hash_idx = keys.index(key_hash)
if not content:
return
for obj in db.content_missing_from_list(content, cur):
yield obj[key_hash_idx]
@timed
@db_transaction_generator()
def content_missing_per_sha1(self, contents, db=None, cur=None):
for obj in db.content_missing_per_sha1(contents, cur):
yield obj[0]
@timed
@db_transaction_generator()
def content_missing_per_sha1_git(self, contents, db=None, cur=None):
for obj in db.content_missing_per_sha1_git(contents, cur):
yield obj[0]
@timed
@db_transaction()
def content_find(self, content, db=None, cur=None):
if not set(content).intersection(DEFAULT_ALGORITHMS):
raise StorageArgumentException(
"content keys must contain at least one of: "
"sha1, sha1_git, sha256, blake2s256"
)
contents = db.content_find(
sha1=content.get("sha1"),
sha1_git=content.get("sha1_git"),
sha256=content.get("sha256"),
blake2s256=content.get("blake2s256"),
cur=cur,
)
return [dict(zip(db.content_find_cols, content)) for content in contents]
@timed
@db_transaction()
def content_get_random(self, db=None, cur=None):
return db.content_get_random(cur)
@staticmethod
def _skipped_content_normalize(d):
d = d.copy()
if d.get("status") is None:
d["status"] = "absent"
if d.get("length") is None:
d["length"] = -1
return d
def _skipped_content_add_metadata(self, db, cur, content: Iterable[SkippedContent]):
origin_ids = db.origin_id_get_by_url([cont.origin for cont in content], cur=cur)
content = [
attr.evolve(c, origin=origin_id)
for (c, origin_id) in zip(content, origin_ids)
]
db.mktemp("skipped_content", cur)
db.copy_to(
[c.to_dict() for c in content],
"tmp_skipped_content",
db.skipped_content_keys,
cur,
)
# move metadata in place
db.skipped_content_add_from_temp(cur)
@timed
@process_metrics
@db_transaction()
def skipped_content_add(
self, content: Iterable[SkippedContent], db=None, cur=None
) -> Dict:
ctime = now()
content = [attr.evolve(c, ctime=ctime) for c in content]
missing_contents = self.skipped_content_missing(
(c.to_dict() for c in content), db=db, cur=cur,
)
content = [
c
for c in content
if any(
all(
c.get_hash(algo) == missing_content.get(algo)
for algo in DEFAULT_ALGORITHMS
)
for missing_content in missing_contents
)
]
self.journal_writer.skipped_content_add(content)
self._skipped_content_add_metadata(db, cur, content)
return {
"skipped_content:add": len(content),
}
@timed
@db_transaction_generator()
def skipped_content_missing(self, contents, db=None, cur=None):
contents = list(contents)
for content in db.skipped_content_missing(contents, cur):
yield dict(zip(db.content_hash_keys, content))
@timed
@process_metrics
@db_transaction()
def directory_add(
self, directories: Iterable[Directory], db=None, cur=None
) -> Dict:
directories = list(directories)
summary = {"directory:add": 0}
dirs = set()
dir_entries: Dict[str, defaultdict] = {
"file": defaultdict(list),
"dir": defaultdict(list),
"rev": defaultdict(list),
}
for cur_dir in directories:
dir_id = cur_dir.id
dirs.add(dir_id)
for src_entry in cur_dir.entries:
entry = src_entry.to_dict()
entry["dir_id"] = dir_id
dir_entries[entry["type"]][dir_id].append(entry)
dirs_missing = set(self.directory_missing(dirs, db=db, cur=cur))
if not dirs_missing:
return summary
self.journal_writer.directory_add(
dir_ for dir_ in directories if dir_.id in dirs_missing
)
# Copy directory ids
dirs_missing_dict = ({"id": dir} for dir in dirs_missing)
db.mktemp("directory", cur)
db.copy_to(dirs_missing_dict, "tmp_directory", ["id"], cur)
# Copy entries
for entry_type, entry_list in dir_entries.items():
entries = itertools.chain.from_iterable(
entries_for_dir
for dir_id, entries_for_dir in entry_list.items()
if dir_id in dirs_missing
)
db.mktemp_dir_entry(entry_type)
db.copy_to(
entries,
"tmp_directory_entry_%s" % entry_type,
["target", "name", "perms", "dir_id"],
cur,
)
# Do the final copy
db.directory_add_from_temp(cur)
summary["directory:add"] = len(dirs_missing)
return summary
@timed
@db_transaction_generator()
def directory_missing(self, directories, db=None, cur=None):
for obj in db.directory_missing_from_list(directories, cur):
yield obj[0]
@timed
@db_transaction_generator(statement_timeout=20000)
def directory_ls(self, directory, recursive=False, db=None, cur=None):
if recursive:
res_gen = db.directory_walk(directory, cur=cur)
else:
res_gen = db.directory_walk_one(directory, cur=cur)
for line in res_gen:
yield dict(zip(db.directory_ls_cols, line))
@timed
@db_transaction(statement_timeout=2000)
def directory_entry_get_by_path(self, directory, paths, db=None, cur=None):
res = db.directory_entry_get_by_path(directory, paths, cur)
if res:
return dict(zip(db.directory_ls_cols, res))
@timed
@db_transaction()
def directory_get_random(self, db=None, cur=None):
return db.directory_get_random(cur)
@timed
@process_metrics
@db_transaction()
def revision_add(self, revisions: Iterable[Revision], db=None, cur=None) -> Dict:
revisions = list(revisions)
summary = {"revision:add": 0}
revisions_missing = set(
self.revision_missing(
set(revision.id for revision in revisions), db=db, cur=cur
)
)
if not revisions_missing:
return summary
db.mktemp_revision(cur)
revisions_filtered = [
revision for revision in revisions if revision.id in revisions_missing
]
self.journal_writer.revision_add(revisions_filtered)
revisions_filtered = list(map(converters.revision_to_db, revisions_filtered))
parents_filtered: List[bytes] = []
with convert_validation_exceptions():
db.copy_to(
revisions_filtered,
"tmp_revision",
db.revision_add_cols,
cur,
lambda rev: parents_filtered.extend(rev["parents"]),
)
db.revision_add_from_temp(cur)
db.copy_to(
parents_filtered,
"revision_history",
["id", "parent_id", "parent_rank"],
cur,
)
return {"revision:add": len(revisions_missing)}
@timed
@db_transaction_generator()
def revision_missing(self, revisions, db=None, cur=None):
if not revisions:
return
for obj in db.revision_missing_from_list(revisions, cur):
yield obj[0]
@timed
@db_transaction_generator(statement_timeout=1000)
def revision_get(self, revisions, db=None, cur=None):
for line in db.revision_get_from_list(revisions, cur):
data = converters.db_to_revision(dict(zip(db.revision_get_cols, line)))
if not data["type"]:
yield None
continue
yield data
@timed
@db_transaction_generator(statement_timeout=2000)
def revision_log(self, revisions, limit=None, db=None, cur=None):
for line in db.revision_log(revisions, limit, cur):
data = converters.db_to_revision(dict(zip(db.revision_get_cols, line)))
if not data["type"]:
yield None
continue
yield data
@timed
@db_transaction_generator(statement_timeout=2000)
def revision_shortlog(self, revisions, limit=None, db=None, cur=None):
yield from db.revision_shortlog(revisions, limit, cur)
@timed
@db_transaction()
def revision_get_random(self, db=None, cur=None):
return db.revision_get_random(cur)
@timed
@process_metrics
@db_transaction()
def release_add(self, releases: Iterable[Release], db=None, cur=None) -> Dict:
releases = list(releases)
summary = {"release:add": 0}
release_ids = set(release.id for release in releases)
releases_missing = set(self.release_missing(release_ids, db=db, cur=cur))
if not releases_missing:
return summary
db.mktemp_release(cur)
releases_filtered = [
release for release in releases if release.id in releases_missing
]
self.journal_writer.release_add(releases_filtered)
releases_filtered = list(map(converters.release_to_db, releases_filtered))
with convert_validation_exceptions():
db.copy_to(releases_filtered, "tmp_release", db.release_add_cols, cur)
db.release_add_from_temp(cur)
return {"release:add": len(releases_missing)}
@timed
@db_transaction_generator()
def release_missing(self, releases, db=None, cur=None):
if not releases:
return
for obj in db.release_missing_from_list(releases, cur):
yield obj[0]
@timed
@db_transaction_generator(statement_timeout=500)
def release_get(self, releases, db=None, cur=None):
for release in db.release_get_from_list(releases, cur):
data = converters.db_to_release(dict(zip(db.release_get_cols, release)))
yield data if data["target_type"] else None
@timed
@db_transaction()
def release_get_random(self, db=None, cur=None):
return db.release_get_random(cur)
@timed
@process_metrics
@db_transaction()
def snapshot_add(self, snapshots: Iterable[Snapshot], db=None, cur=None) -> Dict:
created_temp_table = False
count = 0
for snapshot in snapshots:
if not db.snapshot_exists(snapshot.id, cur):
if not created_temp_table:
db.mktemp_snapshot_branch(cur)
created_temp_table = True
with convert_validation_exceptions():
db.copy_to(
(
{
"name": name,
"target": info.target if info else None,
"target_type": (
info.target_type.value if info else None
),
}
for name, info in snapshot.branches.items()
),
"tmp_snapshot_branch",
["name", "target", "target_type"],
cur,
)
self.journal_writer.snapshot_add([snapshot])
db.snapshot_add(snapshot.id, cur)
count += 1
return {"snapshot:add": count}
@timed
@db_transaction_generator()
def snapshot_missing(self, snapshots, db=None, cur=None):
for obj in db.snapshot_missing_from_list(snapshots, cur):
yield obj[0]
@timed
@db_transaction(statement_timeout=2000)
def snapshot_get(self, snapshot_id, db=None, cur=None):
return self.snapshot_get_branches(snapshot_id, db=db, cur=cur)
@timed
@db_transaction(statement_timeout=2000)
def snapshot_get_by_origin_visit(self, origin, visit, db=None, cur=None):
snapshot_id = db.snapshot_get_by_origin_visit(origin, visit, cur)
if snapshot_id:
return self.snapshot_get(snapshot_id, db=db, cur=cur)
return None
@timed
@db_transaction(statement_timeout=2000)
def snapshot_count_branches(self, snapshot_id, db=None, cur=None):
return dict([bc for bc in db.snapshot_count_branches(snapshot_id, cur)])
@timed
@db_transaction(statement_timeout=2000)
def snapshot_get_branches(
self,
snapshot_id,
branches_from=b"",
branches_count=1000,
target_types=None,
db=None,
cur=None,
):
if snapshot_id == EMPTY_SNAPSHOT_ID:
return {
"id": snapshot_id,
"branches": {},
"next_branch": None,
}
branches = {}
next_branch = None
fetched_branches = list(
db.snapshot_get_by_id(
snapshot_id,
branches_from=branches_from,
branches_count=branches_count + 1,
target_types=target_types,
cur=cur,
)
)
for branch in fetched_branches[:branches_count]:
branch = dict(zip(db.snapshot_get_cols, branch))
del branch["snapshot_id"]
name = branch.pop("name")
if branch == {"target": None, "target_type": None}:
branch = None
branches[name] = branch
if len(fetched_branches) > branches_count:
branch = dict(zip(db.snapshot_get_cols, fetched_branches[-1]))
next_branch = branch["name"]
if branches:
return {
"id": snapshot_id,
"branches": branches,
"next_branch": next_branch,
}
return None
@timed
@db_transaction()
def snapshot_get_random(self, db=None, cur=None):
return db.snapshot_get_random(cur)
@timed
@db_transaction()
def origin_visit_add(
self, visits: Iterable[OriginVisit], db=None, cur=None
) -> Iterable[OriginVisit]:
for visit in visits:
origin = self.origin_get([visit.origin], db=db, cur=cur)[0]
if not origin: # Cannot add a visit without an origin
raise StorageArgumentException("Unknown origin %s", visit.origin)
all_visits = []
nb_visits = 0
for visit in visits:
nb_visits += 1
if not visit.visit:
with convert_validation_exceptions():
visit_id = db.origin_visit_add(
visit.origin, visit.date, visit.type, cur=cur
)
visit = attr.evolve(visit, visit=visit_id)
else:
db.origin_visit_add_with_id(visit, cur=cur)
assert visit.visit is not None
all_visits.append(visit)
# Forced to write after for the case when the visit has no id
self.journal_writer.origin_visit_add([visit])
visit_status = OriginVisitStatus(
origin=visit.origin,
visit=visit.visit,
date=visit.date,
status="created",
snapshot=None,
)
self._origin_visit_status_add(visit_status, db=db, cur=cur)
send_metric("origin_visit:add", count=nb_visits, method_name="origin_visit")
return all_visits
def _origin_visit_status_add(
self, visit_status: OriginVisitStatus, db, cur
) -> None:
"""Add an origin visit status"""
self.journal_writer.origin_visit_status_add([visit_status])
db.origin_visit_status_add(visit_status, cur=cur)
send_metric(
"origin_visit_status:add", count=1, method_name="origin_visit_status"
)
@timed
@db_transaction()
def origin_visit_status_add(
self, visit_statuses: Iterable[OriginVisitStatus], db=None, cur=None,
) -> None:
# First round to check existence (fail early if any is ko)
for visit_status in visit_statuses:
origin_url = self.origin_get([visit_status.origin], db=db, cur=cur)[0]
if not origin_url:
raise StorageArgumentException(f"Unknown origin {visit_status.origin}")
for visit_status in visit_statuses:
self._origin_visit_status_add(visit_status, db, cur)
@timed
@db_transaction()
def origin_visit_status_get_latest(
self,
origin_url: str,
visit: int,
allowed_statuses: Optional[List[str]] = None,
require_snapshot: bool = False,
db=None,
cur=None,
) -> Optional[OriginVisitStatus]:
row = db.origin_visit_status_get_latest(
origin_url, visit, allowed_statuses, require_snapshot, cur=cur
)
if not row:
return None
return OriginVisitStatus.from_dict(row)
@timed
@db_transaction(statement_timeout=500)
def origin_visit_get(
self,
origin: str,
page_token: Optional[str] = None,
- order: str = "asc",
+ order: ListOrder = ListOrder.ASC,
limit: int = 10,
db=None,
cur=None,
) -> PagedResult[OriginVisit]:
page_token = page_token or "0"
- order = order.lower()
- allowed_orders = ["asc", "desc"]
- if order not in allowed_orders:
- raise StorageArgumentException(
- f"order must be one of {', '.join(allowed_orders)}."
- )
+ if not isinstance(order, ListOrder):
+ raise StorageArgumentException("order must be a ListOrder value")
if not isinstance(page_token, str):
raise StorageArgumentException("page_token must be a string.")
next_page_token = None
visit_from = int(page_token)
visits: List[OriginVisit] = []
extra_limit = limit + 1
for row in db.origin_visit_get_range(
origin, visit_from=visit_from, order=order, limit=extra_limit, cur=cur
):
row_d = dict(zip(db.origin_visit_cols, row))
visits.append(
OriginVisit(
origin=row_d["origin"],
visit=row_d["visit"],
date=row_d["date"],
type=row_d["type"],
)
)
assert len(visits) <= extra_limit
if len(visits) == extra_limit:
last_visit = visits[limit]
visits = visits[:limit]
assert last_visit is not None and last_visit.visit is not None
- if order == "asc":
+ if order == ListOrder.ASC:
next_page_token = str(last_visit.visit - 1)
else:
next_page_token = str(last_visit.visit + 1)
return PagedResult(results=visits, next_page_token=next_page_token)
@timed
@db_transaction(statement_timeout=500)
def origin_visit_find_by_date(
self, origin: str, visit_date: datetime.datetime, db=None, cur=None
) -> Optional[OriginVisit]:
row_d = db.origin_visit_find_by_date(origin, visit_date, cur=cur)
if not row_d:
return None
return OriginVisit(
origin=row_d["origin"],
visit=row_d["visit"],
date=row_d["date"],
type=row_d["type"],
)
@timed
@db_transaction(statement_timeout=500)
def origin_visit_get_by(
self, origin: str, visit: int, db=None, cur=None
) -> Optional[OriginVisit]:
row = db.origin_visit_get(origin, visit, cur)
if row:
row_d = dict(zip(db.origin_visit_get_cols, row))
return OriginVisit(
origin=row_d["origin"],
visit=row_d["visit"],
date=row_d["date"],
type=row_d["type"],
)
return None
@timed
@db_transaction(statement_timeout=4000)
def origin_visit_get_latest(
self,
origin: str,
type: Optional[str] = None,
allowed_statuses: Optional[List[str]] = None,
require_snapshot: bool = False,
db=None,
cur=None,
) -> Optional[OriginVisit]:
row = db.origin_visit_get_latest(
origin,
type=type,
allowed_statuses=allowed_statuses,
require_snapshot=require_snapshot,
cur=cur,
)
if row:
row_d = dict(zip(db.origin_visit_get_cols, row))
visit = OriginVisit(
origin=row_d["origin"],
visit=row_d["visit"],
date=row_d["date"],
type=row_d["type"],
)
return visit
return None
@timed
@db_transaction()
def origin_visit_status_get_random(
self, type: str, db=None, cur=None
) -> Optional[Tuple[OriginVisit, OriginVisitStatus]]:
row = db.origin_visit_get_random(type, cur)
if row is not None:
row_d = dict(zip(db.origin_visit_get_cols, row))
visit = OriginVisit(
origin=row_d["origin"],
visit=row_d["visit"],
date=row_d["date"],
type=row_d["type"],
)
visit_status = OriginVisitStatus(
origin=row_d["origin"],
visit=row_d["visit"],
date=row_d["date"],
status=row_d["status"],
metadata=row_d["metadata"],
snapshot=row_d["snapshot"],
)
return visit, visit_status
return None
@timed
@db_transaction(statement_timeout=2000)
def object_find_by_sha1_git(self, ids, db=None, cur=None):
ret = {id: [] for id in ids}
for retval in db.object_find_by_sha1_git(ids, cur=cur):
if retval[1]:
ret[retval[0]].append(
dict(zip(db.object_find_by_sha1_git_cols, retval))
)
return ret
@timed
@db_transaction(statement_timeout=500)
def origin_get(
self, origins: Iterable[str], db=None, cur=None
) -> Iterable[Optional[Origin]]:
origin_urls = list(origins)
rows = db.origin_get_by_url(origin_urls, cur)
result: List[Optional[Origin]] = []
for row in rows:
origin_d = dict(zip(db.origin_cols, row))
url = origin_d["url"]
result.append(None if url is None else Origin(url=url))
return result
@timed
@db_transaction_generator(statement_timeout=500)
def origin_get_by_sha1(self, sha1s, db=None, cur=None):
for line in db.origin_get_by_sha1(sha1s, cur):
if line[0] is not None:
yield dict(zip(db.origin_cols, line))
else:
yield None
@timed
@db_transaction_generator()
def origin_get_range(self, origin_from=1, origin_count=100, db=None, cur=None):
for origin in db.origin_get_range(origin_from, origin_count, cur):
yield dict(zip(db.origin_get_range_cols, origin))
@timed
@db_transaction()
def origin_list(
self, page_token: Optional[str] = None, limit: int = 100, *, db=None, cur=None
) -> dict:
page_token = page_token or "0"
if not isinstance(page_token, str):
raise StorageArgumentException("page_token must be a string.")
origin_from = int(page_token)
result: Dict[str, Any] = {
"origins": [
dict(zip(db.origin_get_range_cols, origin))
for origin in db.origin_get_range(origin_from, limit, cur)
],
}
assert len(result["origins"]) <= limit
if len(result["origins"]) == limit:
result["next_page_token"] = str(result["origins"][limit - 1]["id"] + 1)
for origin in result["origins"]:
del origin["id"]
return result
@timed
@db_transaction_generator()
def origin_search(
self,
url_pattern,
offset=0,
limit=50,
regexp=False,
with_visit=False,
db=None,
cur=None,
):
for origin in db.origin_search(
url_pattern, offset, limit, regexp, with_visit, cur
):
yield dict(zip(db.origin_cols, origin))
@timed
@db_transaction()
def origin_count(
self, url_pattern, regexp=False, with_visit=False, db=None, cur=None
):
return db.origin_count(url_pattern, regexp, with_visit, cur)
@timed
@process_metrics
@db_transaction()
def origin_add(
self, origins: Iterable[Origin], db=None, cur=None
) -> Dict[str, int]:
urls = [o.url for o in origins]
known_origins = set(url for (url,) in db.origin_get_by_url(urls, cur))
# use lists here to keep origins sorted; some tests depend on this
to_add = [url for url in urls if url not in known_origins]
self.journal_writer.origin_add([Origin(url=url) for url in to_add])
added = 0
for url in to_add:
if db.origin_add(url, cur):
added += 1
return {"origin:add": added}
@db_transaction(statement_timeout=500)
def stat_counters(self, db=None, cur=None):
return {k: v for (k, v) in db.stat_counters()}
@db_transaction()
def refresh_stat_counters(self, db=None, cur=None):
keys = [
"content",
"directory",
"directory_entry_dir",
"directory_entry_file",
"directory_entry_rev",
"origin",
"origin_visit",
"person",
"release",
"revision",
"revision_history",
"skipped_content",
"snapshot",
]
for key in keys:
cur.execute("select * from swh_update_counter(%s)", (key,))
@db_transaction()
def raw_extrinsic_metadata_add(
self, metadata: Iterable[RawExtrinsicMetadata], db, cur,
) -> None:
metadata = list(metadata)
self.journal_writer.raw_extrinsic_metadata_add(metadata)
counter = Counter[MetadataTargetType]()
for metadata_entry in metadata:
authority_id = self._get_authority_id(metadata_entry.authority, db, cur)
fetcher_id = self._get_fetcher_id(metadata_entry.fetcher, db, cur)
db.raw_extrinsic_metadata_add(
object_type=metadata_entry.type.value,
id=str(metadata_entry.id),
discovery_date=metadata_entry.discovery_date,
authority_id=authority_id,
fetcher_id=fetcher_id,
format=metadata_entry.format,
metadata=metadata_entry.metadata,
origin=metadata_entry.origin,
visit=metadata_entry.visit,
snapshot=map_optional(str, metadata_entry.snapshot),
release=map_optional(str, metadata_entry.release),
revision=map_optional(str, metadata_entry.revision),
path=metadata_entry.path,
directory=map_optional(str, metadata_entry.directory),
cur=cur,
)
counter[metadata_entry.type] += 1
for (object_type, count) in counter.items():
send_metric(
f"{object_type.value}_metadata:add",
count=count,
method_name=f"{object_type.value}_metadata_add",
)
@db_transaction()
def raw_extrinsic_metadata_get(
self,
object_type: MetadataTargetType,
id: Union[str, SWHID],
authority: MetadataAuthority,
after: Optional[datetime.datetime] = None,
page_token: Optional[bytes] = None,
limit: int = 1000,
db=None,
cur=None,
) -> Dict[str, Union[Optional[bytes], List[RawExtrinsicMetadata]]]:
if object_type == MetadataTargetType.ORIGIN:
if isinstance(id, SWHID):
raise StorageArgumentException(
f"raw_extrinsic_metadata_get called with object_type='origin', "
f"but provided id is an SWHID: {id!r}"
)
else:
if not isinstance(id, SWHID):
raise StorageArgumentException(
f"raw_extrinsic_metadata_get called with object_type!='origin', "
f"but provided id is not an SWHID: {id!r}"
)
if page_token:
(after_time, after_fetcher) = msgpack_loads(page_token)
if after and after_time < after:
raise StorageArgumentException(
"page_token is inconsistent with the value of 'after'."
)
else:
after_time = after
after_fetcher = None
authority_id = self._get_authority_id(authority, db, cur)
if not authority_id:
return {
"next_page_token": None,
"results": [],
}
rows = db.raw_extrinsic_metadata_get(
object_type,
str(id),
authority_id,
after_time,
after_fetcher,
limit + 1,
cur,
)
rows = [dict(zip(db.raw_extrinsic_metadata_get_cols, row)) for row in rows]
results = []
for row in rows:
row = row.copy()
row.pop("metadata_fetcher.id")
assert str(id) == row["raw_extrinsic_metadata.id"]
result = RawExtrinsicMetadata(
type=MetadataTargetType(row["raw_extrinsic_metadata.type"]),
id=id,
authority=MetadataAuthority(
type=MetadataAuthorityType(row["metadata_authority.type"]),
url=row["metadata_authority.url"],
),
fetcher=MetadataFetcher(
name=row["metadata_fetcher.name"],
version=row["metadata_fetcher.version"],
),
discovery_date=row["discovery_date"],
format=row["format"],
metadata=row["raw_extrinsic_metadata.metadata"],
origin=row["origin"],
visit=row["visit"],
snapshot=map_optional(parse_swhid, row["snapshot"]),
release=map_optional(parse_swhid, row["release"]),
revision=map_optional(parse_swhid, row["revision"]),
path=row["path"],
directory=map_optional(parse_swhid, row["directory"]),
)
results.append(result)
if len(results) > limit:
results.pop()
assert len(results) == limit
last_returned_row = rows[-2] # rows[-1] corresponds to the popped result
next_page_token: Optional[bytes] = msgpack_dumps(
(
last_returned_row["discovery_date"],
last_returned_row["metadata_fetcher.id"],
)
)
else:
next_page_token = None
return {
"next_page_token": next_page_token,
"results": results,
}
@timed
@db_transaction()
def metadata_fetcher_add(
self, fetchers: Iterable[MetadataFetcher], db=None, cur=None
) -> None:
fetchers = list(fetchers)
self.journal_writer.metadata_fetcher_add(fetchers)
count = 0
for fetcher in fetchers:
if fetcher.metadata is None:
raise StorageArgumentException(
"MetadataFetcher.metadata may not be None in metadata_fetcher_add."
)
db.metadata_fetcher_add(
fetcher.name, fetcher.version, dict(fetcher.metadata), cur=cur
)
count += 1
send_metric("metadata_fetcher:add", count=count, method_name="metadata_fetcher")
@timed
@db_transaction(statement_timeout=500)
def metadata_fetcher_get(
self, name: str, version: str, db=None, cur=None
) -> Optional[MetadataFetcher]:
row = db.metadata_fetcher_get(name, version, cur=cur)
if not row:
return None
return MetadataFetcher.from_dict(dict(zip(db.metadata_fetcher_cols, row)))
@timed
@db_transaction()
def metadata_authority_add(
self, authorities: Iterable[MetadataAuthority], db=None, cur=None
) -> None:
authorities = list(authorities)
self.journal_writer.metadata_authority_add(authorities)
count = 0
for authority in authorities:
if authority.metadata is None:
raise StorageArgumentException(
"MetadataAuthority.metadata may not be None in "
"metadata_authority_add."
)
db.metadata_authority_add(
authority.type.value, authority.url, dict(authority.metadata), cur=cur
)
count += 1
send_metric(
"metadata_authority:add", count=count, method_name="metadata_authority"
)
@timed
@db_transaction()
def metadata_authority_get(
self, type: MetadataAuthorityType, url: str, db=None, cur=None
) -> Optional[MetadataAuthority]:
row = db.metadata_authority_get(type.value, url, cur=cur)
if not row:
return None
return MetadataAuthority.from_dict(dict(zip(db.metadata_authority_cols, row)))
@timed
def diff_directories(self, from_dir, to_dir, track_renaming=False):
return diff.diff_directories(self, from_dir, to_dir, track_renaming)
@timed
def diff_revisions(self, from_rev, to_rev, track_renaming=False):
return diff.diff_revisions(self, from_rev, to_rev, track_renaming)
@timed
def diff_revision(self, revision, track_renaming=False):
return diff.diff_revision(self, revision, track_renaming)
def clear_buffers(self, object_types: Optional[Iterable[str]] = None) -> None:
"""Do nothing
"""
return None
def flush(self, object_types: Optional[Iterable[str]] = None) -> Dict:
return {}
def _get_authority_id(self, authority: MetadataAuthority, db, cur):
authority_id = db.metadata_authority_get_id(
authority.type.value, authority.url, cur
)
if not authority_id:
raise StorageArgumentException(f"Unknown authority {authority}")
return authority_id
def _get_fetcher_id(self, fetcher: MetadataFetcher, db, cur):
fetcher_id = db.metadata_fetcher_get_id(fetcher.name, fetcher.version, cur)
if not fetcher_id:
raise StorageArgumentException(f"Unknown fetcher {fetcher}")
return fetcher_id
diff --git a/swh/storage/tests/test_serializers.py b/swh/storage/tests/test_serializers.py
new file mode 100644
index 00000000..29f80fad
--- /dev/null
+++ b/swh/storage/tests/test_serializers.py
@@ -0,0 +1,41 @@
+# Copyright (C) 2020 The Software Heritage developers
+# See the AUTHORS file at the top-level directory of this distribution
+# License: GNU General Public License version 3, or any later version
+# See top-level LICENSE file for more information
+
+from swh.storage.interface import ListOrder
+from swh.model import model
+
+from swh.storage.api.serializers import (
+ _encode_enum,
+ _decode_model_enum,
+ _decode_storage_enum,
+)
+
+
+def test_model_enum_serialization(sample_data):
+ result_enum = model.MetadataAuthorityType.DEPOSIT_CLIENT
+ actual_serialized_enum = _encode_enum(result_enum)
+
+ expected_serialized_enum = {
+ "value": result_enum.value,
+ "__type__": type(result_enum).__name__,
+ }
+ assert actual_serialized_enum == expected_serialized_enum
+
+ decoded_paged_result = _decode_model_enum(actual_serialized_enum)
+ assert decoded_paged_result == result_enum
+
+
+def test_storage_enum_serialization(sample_data):
+ result_enum = ListOrder.ASC
+ actual_serialized_enum = _encode_enum(result_enum)
+
+ expected_serialized_enum = {
+ "value": result_enum.value,
+ "__type__": type(result_enum).__name__,
+ }
+ assert actual_serialized_enum == expected_serialized_enum
+
+ decoded_paged_result = _decode_storage_enum(actual_serialized_enum)
+ assert decoded_paged_result == result_enum
diff --git a/swh/storage/tests/test_storage.py b/swh/storage/tests/test_storage.py
index 021f2fe1..63ecea62 100644
--- a/swh/storage/tests/test_storage.py
+++ b/swh/storage/tests/test_storage.py
@@ -1,4250 +1,4252 @@
# Copyright (C) 2015-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import datetime
import inspect
import itertools
import math
import queue
import random
import threading
from collections import defaultdict
from contextlib import contextmanager
from datetime import timedelta
from unittest.mock import Mock
import attr
import pytest
from hypothesis import given, strategies, settings, HealthCheck
from typing import ClassVar, Optional
from swh.model import from_disk
from swh.model.hashutil import hash_to_bytes
from swh.model.identifiers import SWHID
from swh.model.model import (
Content,
MetadataTargetType,
Origin,
OriginVisit,
OriginVisitStatus,
Person,
Release,
Revision,
Snapshot,
)
from swh.model.hypothesis_strategies import objects
from swh.storage import get_storage
from swh.storage.converters import origin_url_to_sha1 as sha1
from swh.storage.exc import HashCollision, StorageArgumentException
-from swh.storage.interface import StorageInterface, PagedResult # noqa
+from swh.storage.interface import ListOrder, PagedResult, StorageInterface
from swh.storage.utils import content_hex_hashes, now
@contextmanager
def db_transaction(storage):
with storage.db() as db:
with db.transaction() as cur:
yield db, cur
def transform_entries(dir_, *, prefix=b""):
for ent in dir_.entries:
yield {
"dir_id": dir_.id,
"type": ent.type,
"target": ent.target,
"name": prefix + ent.name,
"perms": ent.perms,
"status": None,
"sha1": None,
"sha1_git": None,
"sha256": None,
"length": None,
}
def cmpdir(directory):
return (directory["type"], directory["dir_id"])
def assert_contents_ok(
expected_contents, actual_contents, keys_to_check={"sha1", "data"}
):
"""Assert that a given list of contents matches on a given set of keys.
"""
for k in keys_to_check:
expected_list = set([c.get(k) for c in expected_contents])
actual_list = set([c.get(k) for c in actual_contents])
assert actual_list == expected_list, k
def round_to_milliseconds(date):
"""Round datetime to milliseconds before insertion, so equality doesn't fail after a
round-trip through a DB (eg. Cassandra)
"""
return date.replace(microsecond=(date.microsecond // 1000) * 1000)
def test_round_to_milliseconds():
date = now()
for (ms, expected_ms) in [(0, 0), (1000, 1000), (555555, 555000), (999500, 999000)]:
date = date.replace(microsecond=ms)
actual_date = round_to_milliseconds(date)
assert actual_date.microsecond == expected_ms
class LazyContent(Content):
def with_data(self):
return Content.from_dict({**self.to_dict(), "data": b"42\n"})
class TestStorage:
"""Main class for Storage testing.
This class is used as-is to test local storage (see TestLocalStorage
below) and remote storage (see TestRemoteStorage in
test_remote_storage.py.
We need to have the two classes inherit from this base class
separately to avoid nosetests running the tests from the base
class twice.
"""
maxDiff = None # type: ClassVar[Optional[int]]
def test_types(self, swh_storage_backend_config):
"""Checks all methods of StorageInterface are implemented by this
backend, and that they have the same signature."""
# Create an instance of the protocol (which cannot be instantiated
# directly, so this creates a subclass, then instantiates it)
interface = type("_", (StorageInterface,), {})()
storage = get_storage(**swh_storage_backend_config)
assert "content_add" in dir(interface)
missing_methods = []
for meth_name in dir(interface):
if meth_name.startswith("_"):
continue
interface_meth = getattr(interface, meth_name)
try:
concrete_meth = getattr(storage, meth_name)
except AttributeError:
if not getattr(interface_meth, "deprecated_endpoint", False):
# The backend is missing a (non-deprecated) endpoint
missing_methods.append(meth_name)
continue
expected_signature = inspect.signature(interface_meth)
actual_signature = inspect.signature(concrete_meth)
assert expected_signature == actual_signature, meth_name
assert missing_methods == []
def test_check_config(self, swh_storage):
assert swh_storage.check_config(check_write=True)
assert swh_storage.check_config(check_write=False)
def test_content_add(self, swh_storage, sample_data):
cont = sample_data.content
insertion_start_time = now()
actual_result = swh_storage.content_add([cont])
insertion_end_time = now()
assert actual_result == {
"content:add": 1,
"content:add:bytes": cont.length,
}
assert list(swh_storage.content_get([cont.sha1])) == [
{"sha1": cont.sha1, "data": cont.data}
]
expected_cont = attr.evolve(cont, data=None)
contents = [
obj
for (obj_type, obj) in swh_storage.journal_writer.journal.objects
if obj_type == "content"
]
assert len(contents) == 1
for obj in contents:
assert insertion_start_time <= obj.ctime
assert obj.ctime <= insertion_end_time
assert obj == expected_cont
swh_storage.refresh_stat_counters()
assert swh_storage.stat_counters()["content"] == 1
def test_content_add_from_generator(self, swh_storage, sample_data):
cont = sample_data.content
def _cnt_gen():
yield cont
actual_result = swh_storage.content_add(_cnt_gen())
assert actual_result == {
"content:add": 1,
"content:add:bytes": cont.length,
}
swh_storage.refresh_stat_counters()
assert swh_storage.stat_counters()["content"] == 1
def test_content_add_from_lazy_content(self, swh_storage, sample_data):
cont = sample_data.content
lazy_content = LazyContent.from_dict(cont.to_dict())
insertion_start_time = now()
actual_result = swh_storage.content_add([lazy_content])
insertion_end_time = now()
assert actual_result == {
"content:add": 1,
"content:add:bytes": cont.length,
}
# the fact that we retrieve the content object from the storage with
# the correct 'data' field ensures it has been 'called'
assert list(swh_storage.content_get([cont.sha1])) == [
{"sha1": cont.sha1, "data": cont.data}
]
expected_cont = attr.evolve(lazy_content, data=None, ctime=None)
contents = [
obj
for (obj_type, obj) in swh_storage.journal_writer.journal.objects
if obj_type == "content"
]
assert len(contents) == 1
for obj in contents:
assert insertion_start_time <= obj.ctime
assert obj.ctime <= insertion_end_time
assert attr.evolve(obj, ctime=None).to_dict() == expected_cont.to_dict()
swh_storage.refresh_stat_counters()
assert swh_storage.stat_counters()["content"] == 1
def test_content_get_missing(self, swh_storage, sample_data):
cont, cont2 = sample_data.contents[:2]
swh_storage.content_add([cont])
# Query a single missing content
results = list(swh_storage.content_get([cont2.sha1]))
assert results == [None]
# Check content_get does not abort after finding a missing content
results = list(swh_storage.content_get([cont.sha1, cont2.sha1]))
assert results == [{"sha1": cont.sha1, "data": cont.data}, None]
# Check content_get does not discard found countent when it finds
# a missing content.
results = list(swh_storage.content_get([cont2.sha1, cont.sha1]))
assert results == [None, {"sha1": cont.sha1, "data": cont.data}]
def test_content_add_different_input(self, swh_storage, sample_data):
cont, cont2 = sample_data.contents[:2]
actual_result = swh_storage.content_add([cont, cont2])
assert actual_result == {
"content:add": 2,
"content:add:bytes": cont.length + cont2.length,
}
def test_content_add_twice(self, swh_storage, sample_data):
cont, cont2 = sample_data.contents[:2]
actual_result = swh_storage.content_add([cont])
assert actual_result == {
"content:add": 1,
"content:add:bytes": cont.length,
}
assert len(swh_storage.journal_writer.journal.objects) == 1
actual_result = swh_storage.content_add([cont, cont2])
assert actual_result == {
"content:add": 1,
"content:add:bytes": cont2.length,
}
assert 2 <= len(swh_storage.journal_writer.journal.objects) <= 3
assert len(swh_storage.content_find(cont.to_dict())) == 1
assert len(swh_storage.content_find(cont2.to_dict())) == 1
def test_content_add_collision(self, swh_storage, sample_data):
cont1 = sample_data.content
# create (corrupted) content with same sha1{,_git} but != sha256
sha256_array = bytearray(cont1.sha256)
sha256_array[0] += 1
cont1b = attr.evolve(cont1, sha256=bytes(sha256_array))
with pytest.raises(HashCollision) as cm:
swh_storage.content_add([cont1, cont1b])
exc = cm.value
actual_algo = exc.algo
assert actual_algo in ["sha1", "sha1_git", "blake2s256"]
actual_id = exc.hash_id
assert actual_id == getattr(cont1, actual_algo).hex()
collisions = exc.args[2]
assert len(collisions) == 2
assert collisions == [
content_hex_hashes(cont1.hashes()),
content_hex_hashes(cont1b.hashes()),
]
assert exc.colliding_content_hashes() == [
cont1.hashes(),
cont1b.hashes(),
]
def test_content_add_duplicate(self, swh_storage, sample_data):
cont = sample_data.content
swh_storage.content_add([cont, cont])
assert list(swh_storage.content_get([cont.sha1])) == [
{"sha1": cont.sha1, "data": cont.data}
]
def test_content_update(self, swh_storage, sample_data):
cont1 = sample_data.content
if hasattr(swh_storage, "journal_writer"):
swh_storage.journal_writer.journal = None # TODO, not supported
swh_storage.content_add([cont1])
# alter the sha1_git for example
cont1b = attr.evolve(
cont1, sha1_git=hash_to_bytes("3a60a5275d0333bf13468e8b3dcab90f4046e654")
)
swh_storage.content_update([cont1b.to_dict()], keys=["sha1_git"])
results = swh_storage.content_get_metadata([cont1.sha1])
expected_content = attr.evolve(cont1b, data=None).to_dict()
del expected_content["ctime"]
assert tuple(results[cont1.sha1]) == (expected_content,)
def test_content_add_metadata(self, swh_storage, sample_data):
cont = attr.evolve(sample_data.content, data=None, ctime=now())
actual_result = swh_storage.content_add_metadata([cont])
assert actual_result == {
"content:add": 1,
}
expected_cont = cont.to_dict()
del expected_cont["ctime"]
assert tuple(swh_storage.content_get_metadata([cont.sha1])[cont.sha1]) == (
expected_cont,
)
contents = [
obj
for (obj_type, obj) in swh_storage.journal_writer.journal.objects
if obj_type == "content"
]
assert len(contents) == 1
for obj in contents:
obj = attr.evolve(obj, ctime=None)
assert obj == cont
def test_content_add_metadata_different_input(self, swh_storage, sample_data):
contents = sample_data.contents[:2]
cont = attr.evolve(contents[0], data=None, ctime=now())
cont2 = attr.evolve(contents[1], data=None, ctime=now())
actual_result = swh_storage.content_add_metadata([cont, cont2])
assert actual_result == {
"content:add": 2,
}
def test_content_add_metadata_collision(self, swh_storage, sample_data):
cont1 = attr.evolve(sample_data.content, data=None, ctime=now())
# create (corrupted) content with same sha1{,_git} but != sha256
sha1_git_array = bytearray(cont1.sha256)
sha1_git_array[0] += 1
cont1b = attr.evolve(cont1, sha256=bytes(sha1_git_array))
with pytest.raises(HashCollision) as cm:
swh_storage.content_add_metadata([cont1, cont1b])
exc = cm.value
actual_algo = exc.algo
assert actual_algo in ["sha1", "sha1_git", "blake2s256"]
actual_id = exc.hash_id
assert actual_id == getattr(cont1, actual_algo).hex()
collisions = exc.args[2]
assert len(collisions) == 2
assert collisions == [
content_hex_hashes(cont1.hashes()),
content_hex_hashes(cont1b.hashes()),
]
assert exc.colliding_content_hashes() == [
cont1.hashes(),
cont1b.hashes(),
]
def test_skipped_content_add(self, swh_storage, sample_data):
contents = sample_data.skipped_contents[:2]
cont = contents[0]
cont2 = attr.evolve(contents[1], blake2s256=None)
contents_dict = [c.to_dict() for c in [cont, cont2]]
missing = list(swh_storage.skipped_content_missing(contents_dict))
assert missing == [cont.hashes(), cont2.hashes()]
actual_result = swh_storage.skipped_content_add([cont, cont, cont2])
assert 2 <= actual_result.pop("skipped_content:add") <= 3
assert actual_result == {}
missing = list(swh_storage.skipped_content_missing(contents_dict))
assert missing == []
def test_skipped_content_add_missing_hashes(self, swh_storage, sample_data):
cont, cont2 = [
attr.evolve(c, sha1_git=None) for c in sample_data.skipped_contents[:2]
]
contents_dict = [c.to_dict() for c in [cont, cont2]]
missing = list(swh_storage.skipped_content_missing(contents_dict))
assert len(missing) == 2
actual_result = swh_storage.skipped_content_add([cont, cont, cont2])
assert 2 <= actual_result.pop("skipped_content:add") <= 3
assert actual_result == {}
missing = list(swh_storage.skipped_content_missing(contents_dict))
assert missing == []
def test_skipped_content_missing_partial_hash(self, swh_storage, sample_data):
cont = sample_data.skipped_content
cont2 = attr.evolve(cont, sha1_git=None)
contents_dict = [c.to_dict() for c in [cont, cont2]]
missing = list(swh_storage.skipped_content_missing(contents_dict))
assert len(missing) == 2
actual_result = swh_storage.skipped_content_add([cont])
assert actual_result.pop("skipped_content:add") == 1
assert actual_result == {}
missing = list(swh_storage.skipped_content_missing(contents_dict))
assert missing == [cont2.hashes()]
@pytest.mark.property_based
@settings(deadline=None) # this test is very slow
@given(
strategies.sets(
elements=strategies.sampled_from(["sha256", "sha1_git", "blake2s256"]),
min_size=0,
)
)
def test_content_missing(self, swh_storage, sample_data, algos):
algos |= {"sha1"}
content, missing_content = [sample_data.content2, sample_data.skipped_content]
swh_storage.content_add([content])
test_contents = [content.to_dict()]
missing_per_hash = defaultdict(list)
for i in range(256):
test_content = missing_content.to_dict()
for hash in algos:
test_content[hash] = bytes([i]) + test_content[hash][1:]
missing_per_hash[hash].append(test_content[hash])
test_contents.append(test_content)
assert set(swh_storage.content_missing(test_contents)) == set(
missing_per_hash["sha1"]
)
for hash in algos:
assert set(
swh_storage.content_missing(test_contents, key_hash=hash)
) == set(missing_per_hash[hash])
@pytest.mark.property_based
@given(
strategies.sets(
elements=strategies.sampled_from(["sha256", "sha1_git", "blake2s256"]),
min_size=0,
)
)
def test_content_missing_unknown_algo(self, swh_storage, sample_data, algos):
algos |= {"sha1"}
content, missing_content = [sample_data.content2, sample_data.skipped_content]
swh_storage.content_add([content])
test_contents = [content.to_dict()]
missing_per_hash = defaultdict(list)
for i in range(16):
test_content = missing_content.to_dict()
for hash in algos:
test_content[hash] = bytes([i]) + test_content[hash][1:]
missing_per_hash[hash].append(test_content[hash])
test_content["nonexisting_algo"] = b"\x00"
test_contents.append(test_content)
assert set(swh_storage.content_missing(test_contents)) == set(
missing_per_hash["sha1"]
)
for hash in algos:
assert set(
swh_storage.content_missing(test_contents, key_hash=hash)
) == set(missing_per_hash[hash])
def test_content_missing_per_sha1(self, swh_storage, sample_data):
# given
cont = sample_data.content
missing_cont = sample_data.skipped_content
swh_storage.content_add([cont])
# when
gen = swh_storage.content_missing_per_sha1([cont.sha1, missing_cont.sha1])
# then
assert list(gen) == [missing_cont.sha1]
def test_content_missing_per_sha1_git(self, swh_storage, sample_data):
cont, cont2 = sample_data.contents[:2]
missing_cont = sample_data.skipped_content
swh_storage.content_add([cont, cont2])
contents = [cont.sha1_git, cont2.sha1_git, missing_cont.sha1_git]
missing_contents = swh_storage.content_missing_per_sha1_git(contents)
assert list(missing_contents) == [missing_cont.sha1_git]
def test_content_get_partition(self, swh_storage, swh_contents):
"""content_get_partition paginates results if limit exceeded"""
expected_contents = [c.to_dict() for c in swh_contents if c.status != "absent"]
actual_contents = []
for i in range(16):
actual_result = swh_storage.content_get_partition(i, 16)
assert actual_result["next_page_token"] is None
actual_contents.extend(actual_result["contents"])
assert_contents_ok(expected_contents, actual_contents, ["sha1"])
def test_content_get_partition_full(self, swh_storage, swh_contents):
"""content_get_partition for a single partition returns all available
contents"""
expected_contents = [c.to_dict() for c in swh_contents if c.status != "absent"]
actual_result = swh_storage.content_get_partition(0, 1)
assert actual_result["next_page_token"] is None
actual_contents = actual_result["contents"]
assert_contents_ok(expected_contents, actual_contents, ["sha1"])
def test_content_get_partition_empty(self, swh_storage, swh_contents):
"""content_get_partition when at least one of the partitions is
empty"""
expected_contents = {
cont.sha1 for cont in swh_contents if cont.status != "absent"
}
# nb_partitions = smallest power of 2 such that at least one of
# the partitions is empty
nb_partitions = 1 << math.floor(math.log2(len(swh_contents)) + 1)
seen_sha1s = []
for i in range(nb_partitions):
actual_result = swh_storage.content_get_partition(
i, nb_partitions, limit=len(swh_contents) + 1
)
for cont in actual_result["contents"]:
seen_sha1s.append(cont["sha1"])
# Limit is higher than the max number of results
assert actual_result["next_page_token"] is None
assert set(seen_sha1s) == expected_contents
def test_content_get_partition_limit_none(self, swh_storage):
"""content_get_partition call with wrong limit input should fail"""
with pytest.raises(StorageArgumentException) as e:
swh_storage.content_get_partition(1, 16, limit=None)
assert e.value.args == ("limit should not be None",)
def test_generate_content_get_partition_pagination(self, swh_storage, swh_contents):
"""content_get_partition returns contents within range provided"""
expected_contents = [c.to_dict() for c in swh_contents if c.status != "absent"]
# retrieve contents
actual_contents = []
for i in range(4):
page_token = None
while True:
actual_result = swh_storage.content_get_partition(
i, 4, limit=3, page_token=page_token
)
actual_contents.extend(actual_result["contents"])
page_token = actual_result["next_page_token"]
if page_token is None:
break
assert_contents_ok(expected_contents, actual_contents, ["sha1"])
def test_content_get_metadata(self, swh_storage, sample_data):
cont1, cont2 = sample_data.contents[:2]
swh_storage.content_add([cont1, cont2])
actual_md = swh_storage.content_get_metadata([cont1.sha1, cont2.sha1])
# we only retrieve the metadata so no data nor ctime within
expected_cont1, expected_cont2 = [
attr.evolve(c, data=None).to_dict() for c in [cont1, cont2]
]
expected_cont1.pop("ctime")
expected_cont2.pop("ctime")
assert tuple(actual_md[cont1.sha1]) == (expected_cont1,)
assert tuple(actual_md[cont2.sha1]) == (expected_cont2,)
assert len(actual_md.keys()) == 2
def test_content_get_metadata_missing_sha1(self, swh_storage, sample_data):
cont1, cont2 = sample_data.contents[:2]
missing_cont = sample_data.skipped_content
swh_storage.content_add([cont1, cont2])
actual_contents = swh_storage.content_get_metadata([missing_cont.sha1])
assert len(actual_contents) == 1
assert tuple(actual_contents[missing_cont.sha1]) == ()
def test_content_get_random(self, swh_storage, sample_data):
cont, cont2, cont3 = sample_data.contents[:3]
swh_storage.content_add([cont, cont2, cont3])
assert swh_storage.content_get_random() in {
cont.sha1_git,
cont2.sha1_git,
cont3.sha1_git,
}
def test_directory_add(self, swh_storage, sample_data):
directory = sample_data.directories[1]
init_missing = list(swh_storage.directory_missing([directory.id]))
assert [directory.id] == init_missing
actual_result = swh_storage.directory_add([directory])
assert actual_result == {"directory:add": 1}
assert list(swh_storage.journal_writer.journal.objects) == [
("directory", directory)
]
actual_data = list(swh_storage.directory_ls(directory.id))
expected_data = list(transform_entries(directory))
assert sorted(expected_data, key=cmpdir) == sorted(actual_data, key=cmpdir)
after_missing = list(swh_storage.directory_missing([directory.id]))
assert after_missing == []
swh_storage.refresh_stat_counters()
assert swh_storage.stat_counters()["directory"] == 1
def test_directory_add_from_generator(self, swh_storage, sample_data):
directory = sample_data.directories[1]
def _dir_gen():
yield directory
actual_result = swh_storage.directory_add(directories=_dir_gen())
assert actual_result == {"directory:add": 1}
assert list(swh_storage.journal_writer.journal.objects) == [
("directory", directory)
]
swh_storage.refresh_stat_counters()
assert swh_storage.stat_counters()["directory"] == 1
def test_directory_add_twice(self, swh_storage, sample_data):
directory = sample_data.directories[1]
actual_result = swh_storage.directory_add([directory])
assert actual_result == {"directory:add": 1}
assert list(swh_storage.journal_writer.journal.objects) == [
("directory", directory)
]
actual_result = swh_storage.directory_add([directory])
assert actual_result == {"directory:add": 0}
assert list(swh_storage.journal_writer.journal.objects) == [
("directory", directory)
]
def test_directory_get_recursive(self, swh_storage, sample_data):
dir1, dir2, dir3 = sample_data.directories[:3]
init_missing = list(swh_storage.directory_missing([dir1.id]))
assert init_missing == [dir1.id]
actual_result = swh_storage.directory_add([dir1, dir2, dir3])
assert actual_result == {"directory:add": 3}
assert list(swh_storage.journal_writer.journal.objects) == [
("directory", dir1),
("directory", dir2),
("directory", dir3),
]
# List directory containing a file and an unknown subdirectory
actual_data = list(swh_storage.directory_ls(dir1.id, recursive=True))
expected_data = list(transform_entries(dir1))
assert sorted(expected_data, key=cmpdir) == sorted(actual_data, key=cmpdir)
# List directory containing a file and an unknown subdirectory
actual_data = list(swh_storage.directory_ls(dir2.id, recursive=True))
expected_data = list(transform_entries(dir2))
assert sorted(expected_data, key=cmpdir) == sorted(actual_data, key=cmpdir)
# List directory containing a known subdirectory, entries should
# be both those of the directory and of the subdir
actual_data = list(swh_storage.directory_ls(dir3.id, recursive=True))
expected_data = list(
itertools.chain(
transform_entries(dir3), transform_entries(dir2, prefix=b"subdir/"),
)
)
assert sorted(expected_data, key=cmpdir) == sorted(actual_data, key=cmpdir)
def test_directory_get_non_recursive(self, swh_storage, sample_data):
dir1, dir2, dir3 = sample_data.directories[:3]
init_missing = list(swh_storage.directory_missing([dir1.id]))
assert init_missing == [dir1.id]
actual_result = swh_storage.directory_add([dir1, dir2, dir3])
assert actual_result == {"directory:add": 3}
assert list(swh_storage.journal_writer.journal.objects) == [
("directory", dir1),
("directory", dir2),
("directory", dir3),
]
# List directory containing a file and an unknown subdirectory
actual_data = list(swh_storage.directory_ls(dir1.id))
expected_data = list(transform_entries(dir1))
assert sorted(expected_data, key=cmpdir) == sorted(actual_data, key=cmpdir)
# List directory contaiining a single file
actual_data = list(swh_storage.directory_ls(dir2.id))
expected_data = list(transform_entries(dir2))
assert sorted(expected_data, key=cmpdir) == sorted(actual_data, key=cmpdir)
# List directory containing a known subdirectory, entries should
# only be those of the parent directory, not of the subdir
actual_data = list(swh_storage.directory_ls(dir3.id))
expected_data = list(transform_entries(dir3))
assert sorted(expected_data, key=cmpdir) == sorted(actual_data, key=cmpdir)
def test_directory_entry_get_by_path(self, swh_storage, sample_data):
cont = sample_data.content
dir1, dir2, dir3, dir4, dir5 = sample_data.directories[:5]
# given
init_missing = list(swh_storage.directory_missing([dir3.id]))
assert init_missing == [dir3.id]
actual_result = swh_storage.directory_add([dir3, dir4])
assert actual_result == {"directory:add": 2}
expected_entries = [
{
"dir_id": dir3.id,
"name": b"foo",
"type": "file",
"target": cont.sha1_git,
"sha1": None,
"sha1_git": None,
"sha256": None,
"status": None,
"perms": from_disk.DentryPerms.content,
"length": None,
},
{
"dir_id": dir3.id,
"name": b"subdir",
"type": "dir",
"target": dir2.id,
"sha1": None,
"sha1_git": None,
"sha256": None,
"status": None,
"perms": from_disk.DentryPerms.directory,
"length": None,
},
{
"dir_id": dir3.id,
"name": b"hello",
"type": "file",
"target": dir5.id,
"sha1": None,
"sha1_git": None,
"sha256": None,
"status": None,
"perms": from_disk.DentryPerms.content,
"length": None,
},
]
# when (all must be found here)
for entry, expected_entry in zip(dir3.entries, expected_entries):
actual_entry = swh_storage.directory_entry_get_by_path(
dir3.id, [entry.name]
)
assert actual_entry == expected_entry
# same, but deeper
for entry, expected_entry in zip(dir3.entries, expected_entries):
actual_entry = swh_storage.directory_entry_get_by_path(
dir4.id, [b"subdir1", entry.name]
)
expected_entry = expected_entry.copy()
expected_entry["name"] = b"subdir1/" + expected_entry["name"]
assert actual_entry == expected_entry
# when (nothing should be found here since `dir` is not persisted.)
for entry in dir2.entries:
actual_entry = swh_storage.directory_entry_get_by_path(
dir2.id, [entry.name]
)
assert actual_entry is None
def test_directory_get_random(self, swh_storage, sample_data):
dir1, dir2, dir3 = sample_data.directories[:3]
swh_storage.directory_add([dir1, dir2, dir3])
assert swh_storage.directory_get_random() in {
dir1.id,
dir2.id,
dir3.id,
}
def test_revision_add(self, swh_storage, sample_data):
revision = sample_data.revision
init_missing = swh_storage.revision_missing([revision.id])
assert list(init_missing) == [revision.id]
actual_result = swh_storage.revision_add([revision])
assert actual_result == {"revision:add": 1}
end_missing = swh_storage.revision_missing([revision.id])
assert list(end_missing) == []
assert list(swh_storage.journal_writer.journal.objects) == [
("revision", revision)
]
# already there so nothing added
actual_result = swh_storage.revision_add([revision])
assert actual_result == {"revision:add": 0}
swh_storage.refresh_stat_counters()
assert swh_storage.stat_counters()["revision"] == 1
def test_revision_add_from_generator(self, swh_storage, sample_data):
revision = sample_data.revision
def _rev_gen():
yield revision
actual_result = swh_storage.revision_add(_rev_gen())
assert actual_result == {"revision:add": 1}
swh_storage.refresh_stat_counters()
assert swh_storage.stat_counters()["revision"] == 1
def test_revision_add_twice(self, swh_storage, sample_data):
revision, revision2 = sample_data.revisions[:2]
actual_result = swh_storage.revision_add([revision])
assert actual_result == {"revision:add": 1}
assert list(swh_storage.journal_writer.journal.objects) == [
("revision", revision)
]
actual_result = swh_storage.revision_add([revision, revision2])
assert actual_result == {"revision:add": 1}
assert list(swh_storage.journal_writer.journal.objects) == [
("revision", revision),
("revision", revision2),
]
def test_revision_add_name_clash(self, swh_storage, sample_data):
revision, revision2 = sample_data.revisions[:2]
revision1 = attr.evolve(
revision,
author=Person(
fullname=b"John Doe ",
name=b"John Doe",
email=b"john.doe@example.com",
),
)
revision2 = attr.evolve(
revision2,
author=Person(
fullname=b"John Doe ",
name=b"John Doe ",
email=b"john.doe@example.com ",
),
)
actual_result = swh_storage.revision_add([revision1, revision2])
assert actual_result == {"revision:add": 2}
def test_revision_get_order(self, swh_storage, sample_data):
revision, revision2 = sample_data.revisions[:2]
add_result = swh_storage.revision_add([revision, revision2])
assert add_result == {"revision:add": 2}
# order 1
res1 = swh_storage.revision_get([revision.id, revision2.id])
assert [Revision.from_dict(r) for r in res1] == [revision, revision2]
# order 2
res2 = swh_storage.revision_get([revision2.id, revision.id])
assert [Revision.from_dict(r) for r in res2] == [revision2, revision]
def test_revision_log(self, swh_storage, sample_data):
revision1, revision2, revision3, revision4 = sample_data.revisions[:4]
# rev4 -is-child-of-> rev3 -> rev1, (rev2 -> rev1)
swh_storage.revision_add([revision1, revision2, revision3, revision4])
# when
results = list(swh_storage.revision_log([revision4.id]))
# for comparison purposes
actual_results = [Revision.from_dict(r) for r in results]
assert len(actual_results) == 4 # rev4 -child-> rev3 -> rev1, (rev2 -> rev1)
assert actual_results == [revision4, revision3, revision1, revision2]
def test_revision_log_with_limit(self, swh_storage, sample_data):
revision1, revision2, revision3, revision4 = sample_data.revisions[:4]
# revision4 -is-child-of-> revision3
swh_storage.revision_add([revision3, revision4])
results = list(swh_storage.revision_log([revision4.id], 1))
actual_results = [Revision.from_dict(r) for r in results]
assert len(actual_results) == 1
assert actual_results[0] == revision4
def test_revision_log_unknown_revision(self, swh_storage, sample_data):
revision = sample_data.revision
rev_log = list(swh_storage.revision_log([revision.id]))
assert rev_log == []
def test_revision_shortlog(self, swh_storage, sample_data):
revision1, revision2, revision3, revision4 = sample_data.revisions[:4]
# rev4 -is-child-of-> rev3 -> (rev1, rev2); rev2 -> rev1
swh_storage.revision_add([revision1, revision2, revision3, revision4])
results = list(swh_storage.revision_shortlog([revision4.id]))
actual_results = [[id, tuple(parents)] for (id, parents) in results]
assert len(actual_results) == 4
assert actual_results == [
[revision4.id, revision4.parents],
[revision3.id, revision3.parents],
[revision1.id, revision1.parents],
[revision2.id, revision2.parents],
]
def test_revision_shortlog_with_limit(self, swh_storage, sample_data):
revision1, revision2, revision3, revision4 = sample_data.revisions[:4]
# revision4 -is-child-of-> revision3
swh_storage.revision_add([revision1, revision2, revision3, revision4])
results = list(swh_storage.revision_shortlog([revision4.id], 1))
actual_results = [[id, tuple(parents)] for (id, parents) in results]
assert len(actual_results) == 1
assert list(actual_results[0]) == [revision4.id, revision4.parents]
def test_revision_get(self, swh_storage, sample_data):
revision, revision2 = sample_data.revisions[:2]
swh_storage.revision_add([revision])
actual_revisions = list(swh_storage.revision_get([revision.id, revision2.id]))
assert len(actual_revisions) == 2
assert Revision.from_dict(actual_revisions[0]) == revision
assert actual_revisions[1] is None
def test_revision_get_no_parents(self, swh_storage, sample_data):
revision = sample_data.revision
swh_storage.revision_add([revision])
get = list(swh_storage.revision_get([revision.id]))
assert len(get) == 1
assert revision.parents == ()
assert tuple(get[0]["parents"]) == () # no parents on this one
def test_revision_get_random(self, swh_storage, sample_data):
revision1, revision2, revision3 = sample_data.revisions[:3]
swh_storage.revision_add([revision1, revision2, revision3])
assert swh_storage.revision_get_random() in {
revision1.id,
revision2.id,
revision3.id,
}
def test_release_add(self, swh_storage, sample_data):
release, release2 = sample_data.releases[:2]
init_missing = swh_storage.release_missing([release.id, release2.id])
assert list(init_missing) == [release.id, release2.id]
actual_result = swh_storage.release_add([release, release2])
assert actual_result == {"release:add": 2}
end_missing = swh_storage.release_missing([release.id, release2.id])
assert list(end_missing) == []
assert list(swh_storage.journal_writer.journal.objects) == [
("release", release),
("release", release2),
]
# already present so nothing added
actual_result = swh_storage.release_add([release, release2])
assert actual_result == {"release:add": 0}
swh_storage.refresh_stat_counters()
assert swh_storage.stat_counters()["release"] == 2
def test_release_add_from_generator(self, swh_storage, sample_data):
release, release2 = sample_data.releases[:2]
def _rel_gen():
yield release
yield release2
actual_result = swh_storage.release_add(_rel_gen())
assert actual_result == {"release:add": 2}
assert list(swh_storage.journal_writer.journal.objects) == [
("release", release),
("release", release2),
]
swh_storage.refresh_stat_counters()
assert swh_storage.stat_counters()["release"] == 2
def test_release_add_no_author_date(self, swh_storage, sample_data):
full_release = sample_data.release
release = attr.evolve(full_release, author=None, date=None)
actual_result = swh_storage.release_add([release])
assert actual_result == {"release:add": 1}
end_missing = swh_storage.release_missing([release.id])
assert list(end_missing) == []
assert list(swh_storage.journal_writer.journal.objects) == [
("release", release)
]
def test_release_add_twice(self, swh_storage, sample_data):
release, release2 = sample_data.releases[:2]
actual_result = swh_storage.release_add([release])
assert actual_result == {"release:add": 1}
assert list(swh_storage.journal_writer.journal.objects) == [
("release", release)
]
actual_result = swh_storage.release_add([release, release2, release, release2])
assert actual_result == {"release:add": 1}
assert set(swh_storage.journal_writer.journal.objects) == set(
[("release", release), ("release", release2),]
)
def test_release_add_name_clash(self, swh_storage, sample_data):
release, release2 = [
attr.evolve(
c,
author=Person(
fullname=b"John Doe ",
name=b"John Doe",
email=b"john.doe@example.com",
),
)
for c in sample_data.releases[:2]
]
actual_result = swh_storage.release_add([release, release2])
assert actual_result == {"release:add": 2}
def test_release_get(self, swh_storage, sample_data):
release, release2, release3 = sample_data.releases[:3]
# given
swh_storage.release_add([release, release2])
# when
releases = list(swh_storage.release_get([release.id, release2.id]))
actual_releases = [Release.from_dict(r) for r in releases]
# then
assert actual_releases == [release, release2]
unknown_releases = list(swh_storage.release_get([release3.id]))
assert unknown_releases[0] is None
def test_release_get_order(self, swh_storage, sample_data):
release, release2 = sample_data.releases[:2]
add_result = swh_storage.release_add([release, release2])
assert add_result == {"release:add": 2}
# order 1
res1 = swh_storage.release_get([release.id, release2.id])
assert list(res1) == [release.to_dict(), release2.to_dict()]
# order 2
res2 = swh_storage.release_get([release2.id, release.id])
assert list(res2) == [release2.to_dict(), release.to_dict()]
def test_release_get_random(self, swh_storage, sample_data):
release, release2, release3 = sample_data.releases[:3]
swh_storage.release_add([release, release2, release3])
assert swh_storage.release_get_random() in {
release.id,
release2.id,
release3.id,
}
def test_origin_add(self, swh_storage, sample_data):
origins = list(sample_data.origins[:2])
origin_urls = [o.url for o in origins]
assert swh_storage.origin_get(origin_urls) == [None, None]
stats = swh_storage.origin_add(origins)
assert stats == {"origin:add": 2}
actual_origins = swh_storage.origin_get(origin_urls)
assert actual_origins == origins
assert set(swh_storage.journal_writer.journal.objects) == set(
[("origin", origins[0]), ("origin", origins[1]),]
)
swh_storage.refresh_stat_counters()
assert swh_storage.stat_counters()["origin"] == 2
def test_origin_add_from_generator(self, swh_storage, sample_data):
origin, origin2 = sample_data.origins[:2]
def _ori_gen():
yield origin
yield origin2
stats = swh_storage.origin_add(_ori_gen())
assert stats == {"origin:add": 2}
actual_origins = swh_storage.origin_get([origin.url, origin2.url])
assert actual_origins == [origin, origin2]
assert set(swh_storage.journal_writer.journal.objects) == set(
[("origin", origin), ("origin", origin2),]
)
swh_storage.refresh_stat_counters()
assert swh_storage.stat_counters()["origin"] == 2
def test_origin_add_twice(self, swh_storage, sample_data):
origin, origin2 = sample_data.origins[:2]
add1 = swh_storage.origin_add([origin, origin2])
assert set(swh_storage.journal_writer.journal.objects) == set(
[("origin", origin), ("origin", origin2),]
)
assert add1 == {"origin:add": 2}
add2 = swh_storage.origin_add([origin, origin2])
assert set(swh_storage.journal_writer.journal.objects) == set(
[("origin", origin), ("origin", origin2),]
)
assert add2 == {"origin:add": 0}
def test_origin_get(self, swh_storage, sample_data):
origin, origin2 = sample_data.origins[:2]
assert swh_storage.origin_get([origin.url]) == [None]
swh_storage.origin_add([origin])
actual_origins = swh_storage.origin_get([origin.url])
assert actual_origins == [origin]
actual_origins = swh_storage.origin_get([origin.url, "not://exists"])
assert actual_origins == [origin, None]
def _generate_random_visits(self, nb_visits=100, start=0, end=7):
"""Generate random visits within the last 2 months (to avoid
computations)
"""
visits = []
today = now()
for weeks in range(nb_visits, 0, -1):
hours = random.randint(0, 24)
minutes = random.randint(0, 60)
seconds = random.randint(0, 60)
days = random.randint(0, 28)
weeks = random.randint(start, end)
date_visit = today - timedelta(
weeks=weeks, hours=hours, minutes=minutes, seconds=seconds, days=days
)
visits.append(date_visit)
return visits
def test_origin_visit_get__unknown_origin(self, swh_storage):
actual_page = swh_storage.origin_visit_get("foo")
assert actual_page.next_page_token is None
assert actual_page.results == []
assert actual_page == PagedResult()
def test_origin_visit_get__validation_failure(self, swh_storage, sample_data):
origin = sample_data.origin
swh_storage.origin_add([origin])
with pytest.raises(
StorageArgumentException, match="page_token must be a string"
):
swh_storage.origin_visit_get(origin.url, page_token=10) # not bytes
with pytest.raises(
- StorageArgumentException, match="order must be one of asc, desc"
+ StorageArgumentException, match="order must be a ListOrder value"
):
swh_storage.origin_visit_get(origin.url, order="foobar") # wrong order
def test_origin_visit_get_all(self, swh_storage, sample_data):
origin = sample_data.origin
swh_storage.origin_add([origin])
ov1, ov2, ov3 = swh_storage.origin_visit_add(
[
OriginVisit(
origin=origin.url,
date=sample_data.date_visit1,
type=sample_data.type_visit1,
),
OriginVisit(
origin=origin.url,
date=sample_data.date_visit2,
type=sample_data.type_visit2,
),
OriginVisit(
origin=origin.url,
date=sample_data.date_visit2,
type=sample_data.type_visit2,
),
]
)
# order asc, no pagination, no limit
actual_page = swh_storage.origin_visit_get(origin.url)
assert actual_page.next_page_token is None
assert actual_page == PagedResult(results=[ov1, ov2, ov3])
# order asc, no pagination, limit
actual_page = swh_storage.origin_visit_get(origin.url, limit=2)
next_page_token = actual_page.next_page_token
assert next_page_token is not None
assert actual_page.results == [ov1, ov2]
# order asc, pagination, no limit
actual_page = swh_storage.origin_visit_get(
origin.url, page_token=next_page_token
)
assert actual_page.next_page_token is None
assert actual_page.results == [ov3]
assert actual_page == PagedResult(results=[ov3])
next_page_token = str(ov1.visit)
actual_page = swh_storage.origin_visit_get(
origin.url, page_token=next_page_token
)
assert actual_page.next_page_token is None
assert actual_page == PagedResult(results=[ov2, ov3])
# order asc, pagination, limit
actual_page = swh_storage.origin_visit_get(
origin.url, page_token=next_page_token, limit=2
)
assert actual_page.next_page_token is None
assert actual_page.results == [ov2, ov3]
assert actual_page == PagedResult(results=[ov2, ov3])
next_page_token = str(ov2.visit)
actual_page = swh_storage.origin_visit_get(
origin.url, page_token=next_page_token, limit=1
)
assert actual_page.next_page_token is None
assert actual_page == PagedResult(results=[ov3])
# order desc, no pagination, no limit
- actual_page = swh_storage.origin_visit_get(origin.url, order="desc")
+ actual_page = swh_storage.origin_visit_get(origin.url, order=ListOrder.DESC)
assert actual_page.next_page_token is None
assert actual_page == PagedResult(results=[ov3, ov2, ov1])
# order desc, no pagination, limit
- actual_page = swh_storage.origin_visit_get(origin.url, limit=2, order="desc")
+ actual_page = swh_storage.origin_visit_get(
+ origin.url, limit=2, order=ListOrder.DESC
+ )
next_page_token = actual_page.next_page_token
assert next_page_token is not None
assert actual_page.results == [ov3, ov2]
actual_page = swh_storage.origin_visit_get(
- origin.url, page_token=next_page_token, order="desc"
+ origin.url, page_token=next_page_token, order=ListOrder.DESC
)
assert actual_page.next_page_token is None
assert actual_page.results == [ov1]
assert actual_page == PagedResult(results=[ov1])
# order desc, pagination, no limit
next_page_token = str(ov3.visit)
actual_page = swh_storage.origin_visit_get(
- origin.url, page_token=next_page_token, order="desc"
+ origin.url, page_token=next_page_token, order=ListOrder.DESC
)
assert actual_page.next_page_token is None
assert actual_page == PagedResult(results=[ov2, ov1])
# order desc, pagination, limit
actual_page = swh_storage.origin_visit_get(
- origin.url, page_token=next_page_token, order="desc", limit=1
+ origin.url, page_token=next_page_token, order=ListOrder.DESC, limit=1
)
next_page_token = actual_page.next_page_token
assert next_page_token is not None
assert actual_page.results == [ov2]
actual_page = swh_storage.origin_visit_get(
- origin.url, page_token=next_page_token, order="desc"
+ origin.url, page_token=next_page_token, order=ListOrder.DESC
)
assert actual_page == PagedResult(results=[ov1])
def test_origin_visit_status_get_random(self, swh_storage, sample_data):
origins = sample_data.origins[:2]
swh_storage.origin_add(origins)
# Add some random visits within the selection range
visits = self._generate_random_visits()
visit_type = "git"
# Add visits to those origins
for origin in origins:
for date_visit in visits:
visit = swh_storage.origin_visit_add(
[OriginVisit(origin=origin.url, date=date_visit, type=visit_type,)]
)[0]
swh_storage.origin_visit_status_add(
[
OriginVisitStatus(
origin=origin.url,
visit=visit.visit,
date=now(),
status="full",
snapshot=None,
)
]
)
swh_storage.refresh_stat_counters()
stats = swh_storage.stat_counters()
assert stats["origin"] == len(origins)
assert stats["origin_visit"] == len(origins) * len(visits)
random_ov, random_ovs = swh_storage.origin_visit_status_get_random(visit_type)
assert random_ov and random_ovs
assert random_ov.origin is not None
assert random_ov.origin == random_ovs.origin
assert random_ov.origin in [o.url for o in origins]
def test_origin_visit_status_get_random_nothing_found(
self, swh_storage, sample_data
):
origins = sample_data.origins
swh_storage.origin_add(origins)
visit_type = "hg"
# Add some visits outside of the random generation selection so nothing
# will be found by the random selection
visits = self._generate_random_visits(nb_visits=3, start=13, end=24)
for origin in origins:
for date_visit in visits:
visit = swh_storage.origin_visit_add(
[OriginVisit(origin=origin.url, date=date_visit, type=visit_type,)]
)[0]
swh_storage.origin_visit_status_add(
[
OriginVisitStatus(
origin=origin.url,
visit=visit.visit,
date=now(),
status="full",
snapshot=None,
)
]
)
random_origin_visit = swh_storage.origin_visit_status_get_random(visit_type)
assert random_origin_visit is None
def test_origin_get_by_sha1(self, swh_storage, sample_data):
origin = sample_data.origin
assert swh_storage.origin_get([origin.url])[0] is None
swh_storage.origin_add([origin])
origins = list(swh_storage.origin_get_by_sha1([sha1(origin.url)]))
assert len(origins) == 1
assert origins[0]["url"] == origin.url
def test_origin_get_by_sha1_not_found(self, swh_storage, sample_data):
origin = sample_data.origin
assert swh_storage.origin_get([origin.url])[0] is None
origins = list(swh_storage.origin_get_by_sha1([sha1(origin.url)]))
assert len(origins) == 1
assert origins[0] is None
def test_origin_search_single_result(self, swh_storage, sample_data):
origin, origin2 = sample_data.origins[:2]
found_origins = list(swh_storage.origin_search(origin.url))
assert len(found_origins) == 0
found_origins = list(swh_storage.origin_search(origin.url, regexp=True))
assert len(found_origins) == 0
swh_storage.origin_add([origin])
origin_data = origin.to_dict()
found_origins = list(swh_storage.origin_search(origin.url))
assert len(found_origins) == 1
assert found_origins[0] == origin_data
found_origins = list(
swh_storage.origin_search(f".{origin.url[1:-1]}.", regexp=True)
)
assert len(found_origins) == 1
assert found_origins[0] == origin_data
swh_storage.origin_add([origin2])
origin2_data = origin2.to_dict()
found_origins = list(swh_storage.origin_search(origin2.url))
assert len(found_origins) == 1
assert found_origins[0] == origin2_data
found_origins = list(
swh_storage.origin_search(f".{origin2.url[1:-1]}.", regexp=True)
)
assert len(found_origins) == 1
assert found_origins[0] == origin2_data
def test_origin_search_no_regexp(self, swh_storage, sample_data):
origin, origin2 = sample_data.origins[:2]
origin_dicts = [o.to_dict() for o in [origin, origin2]]
swh_storage.origin_add([origin, origin2])
# no pagination
found_origins = list(swh_storage.origin_search("/"))
assert len(found_origins) == 2
# offset=0
found_origins0 = list(swh_storage.origin_search("/", offset=0, limit=1))
assert len(found_origins0) == 1
assert found_origins0[0] in origin_dicts
# offset=1
found_origins1 = list(swh_storage.origin_search("/", offset=1, limit=1))
assert len(found_origins1) == 1
assert found_origins1[0] in origin_dicts
# check both origins were returned
assert found_origins0 != found_origins1
def test_origin_search_regexp_substring(self, swh_storage, sample_data):
origin, origin2 = sample_data.origins[:2]
origin_dicts = [o.to_dict() for o in [origin, origin2]]
swh_storage.origin_add([origin, origin2])
# no pagination
found_origins = list(swh_storage.origin_search("/", regexp=True))
assert len(found_origins) == 2
# offset=0
found_origins0 = list(
swh_storage.origin_search("/", offset=0, limit=1, regexp=True)
)
assert len(found_origins0) == 1
assert found_origins0[0] in origin_dicts
# offset=1
found_origins1 = list(
swh_storage.origin_search("/", offset=1, limit=1, regexp=True)
)
assert len(found_origins1) == 1
assert found_origins1[0] in origin_dicts
# check both origins were returned
assert found_origins0 != found_origins1
def test_origin_search_regexp_fullstring(self, swh_storage, sample_data):
origin, origin2 = sample_data.origins[:2]
origin_dicts = [o.to_dict() for o in [origin, origin2]]
swh_storage.origin_add([origin, origin2])
# no pagination
found_origins = list(swh_storage.origin_search(".*/.*", regexp=True))
assert len(found_origins) == 2
# offset=0
found_origins0 = list(
swh_storage.origin_search(".*/.*", offset=0, limit=1, regexp=True)
)
assert len(found_origins0) == 1
assert found_origins0[0] in origin_dicts
# offset=1
found_origins1 = list(
swh_storage.origin_search(".*/.*", offset=1, limit=1, regexp=True)
)
assert len(found_origins1) == 1
assert found_origins1[0] in origin_dicts
# check both origins were returned
assert found_origins0 != found_origins1
def test_origin_visit_add(self, swh_storage, sample_data):
origin1 = sample_data.origins[1]
swh_storage.origin_add([origin1])
date_visit = now()
date_visit2 = date_visit + datetime.timedelta(minutes=1)
date_visit = round_to_milliseconds(date_visit)
date_visit2 = round_to_milliseconds(date_visit2)
visit1 = OriginVisit(
origin=origin1.url, date=date_visit, type=sample_data.type_visit1,
)
visit2 = OriginVisit(
origin=origin1.url, date=date_visit2, type=sample_data.type_visit2,
)
# add once
ov1, ov2 = swh_storage.origin_visit_add([visit1, visit2])
# then again (will be ignored as they already exist)
origin_visit1, origin_visit2 = swh_storage.origin_visit_add([ov1, ov2])
assert ov1 == origin_visit1
assert ov2 == origin_visit2
ovs1 = OriginVisitStatus(
origin=origin1.url,
visit=ov1.visit,
date=date_visit,
status="created",
snapshot=None,
)
ovs2 = OriginVisitStatus(
origin=origin1.url,
visit=ov2.visit,
date=date_visit2,
status="created",
snapshot=None,
)
actual_visits = swh_storage.origin_visit_get(origin1.url).results
expected_visits = [ov1, ov2]
assert len(expected_visits) == len(actual_visits)
for visit in expected_visits:
assert visit in actual_visits
actual_objects = list(swh_storage.journal_writer.journal.objects)
expected_objects = list(
[("origin", origin1)]
+ [("origin_visit", visit) for visit in expected_visits] * 2
+ [("origin_visit_status", ovs) for ovs in [ovs1, ovs2]]
)
for obj in expected_objects:
assert obj in actual_objects
def test_origin_visit_add_validation(self, swh_storage, sample_data):
"""Unknown origin when adding visits should raise"""
visit = attr.evolve(sample_data.origin_visit, origin="something-unknonw")
with pytest.raises(StorageArgumentException, match="Unknown origin"):
swh_storage.origin_visit_add([visit])
objects = list(swh_storage.journal_writer.journal.objects)
assert not objects
def test_origin_visit_status_add_validation(self, swh_storage):
"""Wrong origin_visit_status input should raise storage argument error"""
date_visit = now()
visit_status1 = OriginVisitStatus(
origin="unknown-origin-url",
visit=10,
date=date_visit,
status="full",
snapshot=None,
)
with pytest.raises(StorageArgumentException, match="Unknown origin"):
swh_storage.origin_visit_status_add([visit_status1])
objects = list(swh_storage.journal_writer.journal.objects)
assert not objects
def test_origin_visit_status_add(self, swh_storage, sample_data):
"""Correct origin visit statuses should add a new visit status
"""
snapshot = sample_data.snapshot
origin1 = sample_data.origins[1]
origin2 = Origin(url="new-origin")
swh_storage.origin_add([origin1, origin2])
ov1, ov2 = swh_storage.origin_visit_add(
[
OriginVisit(
origin=origin1.url,
date=sample_data.date_visit1,
type=sample_data.type_visit1,
),
OriginVisit(
origin=origin2.url,
date=sample_data.date_visit2,
type=sample_data.type_visit2,
),
]
)
ovs1 = OriginVisitStatus(
origin=origin1.url,
visit=ov1.visit,
date=sample_data.date_visit1,
status="created",
snapshot=None,
)
ovs2 = OriginVisitStatus(
origin=origin2.url,
visit=ov2.visit,
date=sample_data.date_visit2,
status="created",
snapshot=None,
)
date_visit_now = round_to_milliseconds(now())
visit_status1 = OriginVisitStatus(
origin=ov1.origin,
visit=ov1.visit,
date=date_visit_now,
status="full",
snapshot=snapshot.id,
)
date_visit_now = round_to_milliseconds(now())
visit_status2 = OriginVisitStatus(
origin=ov2.origin,
visit=ov2.visit,
date=date_visit_now,
status="ongoing",
snapshot=None,
metadata={"intrinsic": "something"},
)
swh_storage.origin_visit_status_add([visit_status1, visit_status2])
visit = swh_storage.origin_visit_get_latest(origin1.url, require_snapshot=True)
visit_status = swh_storage.origin_visit_status_get_latest(
origin1.url, visit.visit, require_snapshot=True
)
assert visit_status == visit_status1
visit = swh_storage.origin_visit_get_latest(origin2.url, require_snapshot=False)
visit_status = swh_storage.origin_visit_status_get_latest(
origin2.url, visit.visit, require_snapshot=False
)
assert origin2.url != origin1.url
assert visit_status == visit_status2
actual_objects = list(swh_storage.journal_writer.journal.objects)
expected_origins = [origin1, origin2]
expected_visits = [ov1, ov2]
expected_visit_statuses = [ovs1, ovs2, visit_status1, visit_status2]
expected_objects = (
[("origin", o) for o in expected_origins]
+ [("origin_visit", v) for v in expected_visits]
+ [("origin_visit_status", ovs) for ovs in expected_visit_statuses]
)
for obj in expected_objects:
assert obj in actual_objects
def test_origin_visit_status_add_twice(self, swh_storage, sample_data):
"""Correct origin visit statuses should add a new visit status
"""
snapshot = sample_data.snapshot
origin1 = sample_data.origins[1]
swh_storage.origin_add([origin1])
ov1 = swh_storage.origin_visit_add(
[
OriginVisit(
origin=origin1.url,
date=sample_data.date_visit1,
type=sample_data.type_visit1,
),
]
)[0]
ovs1 = OriginVisitStatus(
origin=origin1.url,
visit=ov1.visit,
date=sample_data.date_visit1,
status="created",
snapshot=None,
)
date_visit_now = round_to_milliseconds(now())
visit_status1 = OriginVisitStatus(
origin=ov1.origin,
visit=ov1.visit,
date=date_visit_now,
status="full",
snapshot=snapshot.id,
)
swh_storage.origin_visit_status_add([visit_status1])
# second call will ignore existing entries (will send to storage though)
swh_storage.origin_visit_status_add([visit_status1])
visit_status = swh_storage.origin_visit_status_get_latest(ov1.origin, ov1.visit)
assert visit_status == visit_status1
actual_objects = list(swh_storage.journal_writer.journal.objects)
expected_origins = [origin1]
expected_visits = [ov1]
expected_visit_statuses = [ovs1, visit_status1, visit_status1]
# write twice in the journal
expected_objects = (
[("origin", o) for o in expected_origins]
+ [("origin_visit", v) for v in expected_visits]
+ [("origin_visit_status", ovs) for ovs in expected_visit_statuses]
)
for obj in expected_objects:
assert obj in actual_objects
def test_origin_visit_find_by_date(self, swh_storage, sample_data):
origin = sample_data.origin
swh_storage.origin_add([origin])
visit1 = OriginVisit(
origin=origin.url,
date=sample_data.date_visit2,
type=sample_data.type_visit1,
)
visit2 = OriginVisit(
origin=origin.url,
date=sample_data.date_visit3,
type=sample_data.type_visit2,
)
visit3 = OriginVisit(
origin=origin.url,
date=sample_data.date_visit2,
type=sample_data.type_visit3,
)
ov1, ov2, ov3 = swh_storage.origin_visit_add([visit1, visit2, visit3])
ovs1 = OriginVisitStatus(
origin=origin.url,
visit=ov1.visit,
date=sample_data.date_visit2,
status="ongoing",
snapshot=None,
)
ovs2 = OriginVisitStatus(
origin=origin.url,
visit=ov2.visit,
date=sample_data.date_visit3,
status="ongoing",
snapshot=None,
)
ovs3 = OriginVisitStatus(
origin=origin.url,
visit=ov3.visit,
date=sample_data.date_visit2,
status="ongoing",
snapshot=None,
)
swh_storage.origin_visit_status_add([ovs1, ovs2, ovs3])
# Simple case
actual_visit = swh_storage.origin_visit_find_by_date(
origin.url, sample_data.date_visit3
)
assert actual_visit == ov2
# There are two visits at the same date, the latest must be returned
actual_visit = swh_storage.origin_visit_find_by_date(
origin.url, sample_data.date_visit2
)
assert actual_visit == ov3
def test_origin_visit_find_by_date__unknown_origin(self, swh_storage, sample_data):
actual_visit = swh_storage.origin_visit_find_by_date(
"foo", sample_data.date_visit2
)
assert actual_visit is None
def test_origin_visit_get_by(self, swh_storage, sample_data):
snapshot = sample_data.snapshot
origins = sample_data.origins[:2]
swh_storage.origin_add(origins)
origin_url, origin_url2 = [o.url for o in origins]
visit = OriginVisit(
origin=origin_url,
date=sample_data.date_visit2,
type=sample_data.type_visit2,
)
origin_visit1 = swh_storage.origin_visit_add([visit])[0]
swh_storage.snapshot_add([snapshot])
swh_storage.origin_visit_status_add(
[
OriginVisitStatus(
origin=origin_url,
visit=origin_visit1.visit,
date=now(),
status="ongoing",
snapshot=snapshot.id,
)
]
)
# Add some other {origin, visit} entries
visit2 = OriginVisit(
origin=origin_url,
date=sample_data.date_visit3,
type=sample_data.type_visit3,
)
visit3 = OriginVisit(
origin=origin_url2,
date=sample_data.date_visit3,
type=sample_data.type_visit3,
)
swh_storage.origin_visit_add([visit2, visit3])
# when
visit1_metadata = {
"contents": 42,
"directories": 22,
}
swh_storage.origin_visit_status_add(
[
OriginVisitStatus(
origin=origin_url,
visit=origin_visit1.visit,
date=now(),
status="full",
snapshot=snapshot.id,
metadata=visit1_metadata,
)
]
)
actual_visit = swh_storage.origin_visit_get_by(origin_url, origin_visit1.visit)
assert actual_visit == origin_visit1
def test_origin_visit_get_by__no_result(self, swh_storage, sample_data):
actual_visit = swh_storage.origin_visit_get_by("unknown", 10) # unknown origin
assert actual_visit is None
origin = sample_data.origin
swh_storage.origin_add([origin])
actual_visit = swh_storage.origin_visit_get_by(origin.url, 999) # unknown visit
assert actual_visit is None
def test_origin_visit_get_latest_none(self, swh_storage, sample_data):
"""Origin visit get latest on unknown objects should return nothing
"""
# unknown origin so no result
assert swh_storage.origin_visit_get_latest("unknown-origin") is None
# unknown type
origin = sample_data.origin
swh_storage.origin_add([origin])
assert swh_storage.origin_visit_get_latest(origin.url, type="unknown") is None
def test_origin_visit_get_latest_filter_type(self, swh_storage, sample_data):
"""Filtering origin visit get latest with filter type should be ok
"""
origin = sample_data.origin
swh_storage.origin_add([origin])
visit1 = OriginVisit(
origin=origin.url, date=sample_data.date_visit1, type="git",
)
visit2 = OriginVisit(
origin=origin.url, date=sample_data.date_visit2, type="hg",
)
date_now = round_to_milliseconds(now())
visit3 = OriginVisit(origin=origin.url, date=date_now, type="hg",)
assert sample_data.date_visit1 < sample_data.date_visit2
assert sample_data.date_visit2 < date_now
ov1, ov2, ov3 = swh_storage.origin_visit_add([visit1, visit2, visit3])
# Check type filter is ok
actual_visit = swh_storage.origin_visit_get_latest(origin.url, type="git")
assert actual_visit == ov1
actual_visit = swh_storage.origin_visit_get_latest(origin.url, type="hg")
assert actual_visit == ov3
actual_visit_unknown_type = swh_storage.origin_visit_get_latest(
origin.url, type="npm", # no visit matching that type
)
assert actual_visit_unknown_type is None
def test_origin_visit_get_latest(self, swh_storage, sample_data):
empty_snapshot, complete_snapshot = sample_data.snapshots[1:3]
origin = sample_data.origin
swh_storage.origin_add([origin])
visit1 = OriginVisit(
origin=origin.url, date=sample_data.date_visit1, type="git",
)
visit2 = OriginVisit(
origin=origin.url, date=sample_data.date_visit2, type="hg",
)
date_now = round_to_milliseconds(now())
visit3 = OriginVisit(origin=origin.url, date=date_now, type="hg",)
assert visit1.date < visit2.date
assert visit2.date < visit3.date
ov1, ov2, ov3 = swh_storage.origin_visit_add([visit1, visit2, visit3])
# no filters, latest visit is the last one (whose date is most recent)
actual_visit = swh_storage.origin_visit_get_latest(origin.url)
assert actual_visit == ov3
# 3 visits, none has snapshot so nothing is returned
actual_visit = swh_storage.origin_visit_get_latest(
origin.url, require_snapshot=True
)
assert actual_visit is None
# visit are created with "created" status, so nothing will get returned
actual_visit = swh_storage.origin_visit_get_latest(
origin.url, allowed_statuses=["partial"]
)
assert actual_visit is None
# visit are created with "created" status, so most recent again
actual_visit = swh_storage.origin_visit_get_latest(
origin.url, allowed_statuses=["created"]
)
assert actual_visit == ov3
# Add snapshot to visit1; require_snapshot=True makes it return first visit
swh_storage.snapshot_add([complete_snapshot])
visit_status_with_snapshot = OriginVisitStatus(
origin=origin.url,
visit=ov1.visit,
date=round_to_milliseconds(now()),
status="ongoing",
snapshot=complete_snapshot.id,
)
swh_storage.origin_visit_status_add([visit_status_with_snapshot])
# only the first visit has a snapshot now
actual_visit = swh_storage.origin_visit_get_latest(
origin.url, require_snapshot=True
)
assert actual_visit == ov1
# only the first visit has a status ongoing now
actual_visit = swh_storage.origin_visit_get_latest(
origin.url, allowed_statuses=["ongoing"]
)
assert actual_visit == ov1
actual_visit_status = swh_storage.origin_visit_status_get_latest(
origin.url, ov1.visit, require_snapshot=True
)
assert actual_visit_status == visit_status_with_snapshot
# ... and require_snapshot=False (defaults) still returns latest visit (3rd)
actual_visit = swh_storage.origin_visit_get_latest(
origin.url, require_snapshot=False
)
assert actual_visit == ov3
# no specific filter, this returns as before the latest visit
actual_visit = swh_storage.origin_visit_get_latest(origin.url)
assert actual_visit == ov3
# Status filter: all three visits are status=ongoing, so no visit
# returned
actual_visit = swh_storage.origin_visit_get_latest(
origin.url, allowed_statuses=["full"]
)
assert actual_visit is None
visit_status1_full = OriginVisitStatus(
origin=origin.url,
visit=ov1.visit,
date=round_to_milliseconds(now()),
status="full",
snapshot=complete_snapshot.id,
)
# Mark the first visit as completed and check status filter again
swh_storage.origin_visit_status_add([visit_status1_full])
# only the first visit has the full status
actual_visit = swh_storage.origin_visit_get_latest(
origin.url, allowed_statuses=["full"]
)
assert actual_visit == ov1
actual_visit_status = swh_storage.origin_visit_status_get_latest(
origin.url, ov1.visit, allowed_statuses=["full"]
)
assert actual_visit_status == visit_status1_full
# no specific filter, this returns as before the latest visit
actual_visit = swh_storage.origin_visit_get_latest(origin.url)
assert actual_visit == ov3
# Add snapshot to visit2 and check that the new snapshot is returned
swh_storage.snapshot_add([empty_snapshot])
visit_status2_full = OriginVisitStatus(
origin=origin.url,
visit=ov2.visit,
date=round_to_milliseconds(now()),
status="ongoing",
snapshot=empty_snapshot.id,
)
swh_storage.origin_visit_status_add([visit_status2_full])
actual_visit = swh_storage.origin_visit_get_latest(
origin.url, require_snapshot=True
)
# 2nd visit is most recent with a snapshot
assert actual_visit == ov2
actual_visit_status = swh_storage.origin_visit_status_get_latest(
origin.url, ov2.visit, require_snapshot=True
)
assert actual_visit_status == visit_status2_full
# no specific filter, this returns as before the latest visit, 3rd one
actual_origin = swh_storage.origin_visit_get_latest(origin.url)
assert actual_origin == ov3
# full status is still the first visit
actual_visit = swh_storage.origin_visit_get_latest(
origin.url, allowed_statuses=["full"]
)
assert actual_visit == ov1
# Add snapshot to visit3 (same date as visit2)
visit_status3_with_snapshot = OriginVisitStatus(
origin=origin.url,
visit=ov3.visit,
date=round_to_milliseconds(now()),
status="ongoing",
snapshot=complete_snapshot.id,
)
swh_storage.origin_visit_status_add([visit_status3_with_snapshot])
# full status is still the first visit
actual_visit = swh_storage.origin_visit_get_latest(
origin.url, allowed_statuses=["full"], require_snapshot=True,
)
assert actual_visit == ov1
actual_visit_status = swh_storage.origin_visit_status_get_latest(
origin.url,
visit=actual_visit.visit,
allowed_statuses=["full"],
require_snapshot=True,
)
assert actual_visit_status == visit_status1_full
# most recent is still the 3rd visit
actual_visit = swh_storage.origin_visit_get_latest(origin.url)
assert actual_visit == ov3
# 3rd visit has a snapshot now, so it's elected
actual_visit = swh_storage.origin_visit_get_latest(
origin.url, require_snapshot=True
)
assert actual_visit == ov3
actual_visit_status = swh_storage.origin_visit_status_get_latest(
origin.url, ov3.visit, require_snapshot=True
)
assert actual_visit_status == visit_status3_with_snapshot
def test_origin_visit_get_latest__same_date(self, swh_storage, sample_data):
empty_snapshot, complete_snapshot = sample_data.snapshots[1:3]
origin = sample_data.origin
swh_storage.origin_add([origin])
visit1 = OriginVisit(
origin=origin.url, date=sample_data.date_visit1, type="git",
)
visit2 = OriginVisit(
origin=origin.url, date=sample_data.date_visit1, type="hg",
)
ov1, ov2 = swh_storage.origin_visit_add([visit1, visit2])
# ties should be broken by using the visit id
actual_visit = swh_storage.origin_visit_get_latest(origin.url)
assert actual_visit == ov2
def test_origin_visit_status_get_latest(self, swh_storage, sample_data):
snapshot = sample_data.snapshots[2]
origin1 = sample_data.origin
swh_storage.origin_add([origin1])
# to have some reference visits
ov1, ov2 = swh_storage.origin_visit_add(
[
OriginVisit(
origin=origin1.url,
date=sample_data.date_visit1,
type=sample_data.type_visit1,
),
OriginVisit(
origin=origin1.url,
date=sample_data.date_visit2,
type=sample_data.type_visit2,
),
]
)
swh_storage.snapshot_add([snapshot])
date_now = round_to_milliseconds(now())
assert sample_data.date_visit1 < sample_data.date_visit2
assert sample_data.date_visit2 < date_now
ovs1 = OriginVisitStatus(
origin=origin1.url,
visit=ov1.visit,
date=sample_data.date_visit1,
status="partial",
snapshot=None,
)
ovs2 = OriginVisitStatus(
origin=origin1.url,
visit=ov1.visit,
date=sample_data.date_visit2,
status="ongoing",
snapshot=None,
)
ovs3 = OriginVisitStatus(
origin=origin1.url,
visit=ov2.visit,
date=sample_data.date_visit2
+ datetime.timedelta(minutes=1), # to not be ignored
status="ongoing",
snapshot=None,
)
ovs4 = OriginVisitStatus(
origin=origin1.url,
visit=ov2.visit,
date=date_now,
status="full",
snapshot=snapshot.id,
metadata={"something": "wicked"},
)
swh_storage.origin_visit_status_add([ovs1, ovs2, ovs3, ovs4])
# unknown origin so no result
actual_origin_visit = swh_storage.origin_visit_status_get_latest(
"unknown-origin", ov1.visit
)
assert actual_origin_visit is None
# unknown visit so no result
actual_origin_visit = swh_storage.origin_visit_status_get_latest(
ov1.origin, ov1.visit + 10
)
assert actual_origin_visit is None
# Two visits, both with no snapshot, take the most recent
actual_origin_visit2 = swh_storage.origin_visit_status_get_latest(
origin1.url, ov1.visit
)
assert isinstance(actual_origin_visit2, OriginVisitStatus)
assert actual_origin_visit2 == ovs2
assert ovs2.origin == origin1.url
assert ovs2.visit == ov1.visit
actual_origin_visit = swh_storage.origin_visit_status_get_latest(
origin1.url, ov1.visit, require_snapshot=True
)
# there is no visit with snapshot yet for that visit
assert actual_origin_visit is None
actual_origin_visit2 = swh_storage.origin_visit_status_get_latest(
origin1.url, ov1.visit, allowed_statuses=["partial", "ongoing"]
)
# visit status with partial status visit elected
assert actual_origin_visit2 == ovs2
assert actual_origin_visit2.status == "ongoing"
actual_origin_visit4 = swh_storage.origin_visit_status_get_latest(
origin1.url, ov2.visit, require_snapshot=True
)
assert actual_origin_visit4 == ovs4
assert actual_origin_visit4.snapshot == snapshot.id
actual_origin_visit = swh_storage.origin_visit_status_get_latest(
origin1.url, ov2.visit, require_snapshot=True, allowed_statuses=["ongoing"]
)
# nothing matches so nothing
assert actual_origin_visit is None # there is no visit with status full
actual_origin_visit3 = swh_storage.origin_visit_status_get_latest(
origin1.url, ov2.visit, allowed_statuses=["ongoing"]
)
assert actual_origin_visit3 == ovs3
def test_person_fullname_unicity(self, swh_storage, sample_data):
revision, rev2 = sample_data.revisions[0:2]
# create a revision with same committer fullname but wo name and email
revision2 = attr.evolve(
rev2,
committer=Person(
fullname=revision.committer.fullname, name=None, email=None
),
)
swh_storage.revision_add([revision, revision2])
# when getting added revisions
revisions = list(swh_storage.revision_get([revision.id, revision2.id]))
# then check committers are the same
assert revisions[0]["committer"] == revisions[1]["committer"]
def test_snapshot_add_get_empty(self, swh_storage, sample_data):
empty_snapshot = sample_data.snapshots[1]
empty_snapshot_dict = empty_snapshot.to_dict()
origin = sample_data.origin
swh_storage.origin_add([origin])
ov1 = swh_storage.origin_visit_add(
[
OriginVisit(
origin=origin.url,
date=sample_data.date_visit1,
type=sample_data.type_visit1,
)
]
)[0]
actual_result = swh_storage.snapshot_add([empty_snapshot])
assert actual_result == {"snapshot:add": 1}
date_now = now()
swh_storage.origin_visit_status_add(
[
OriginVisitStatus(
origin=origin.url,
visit=ov1.visit,
date=date_now,
status="full",
snapshot=empty_snapshot.id,
)
]
)
by_id = swh_storage.snapshot_get(empty_snapshot.id)
assert by_id == {**empty_snapshot_dict, "next_branch": None}
by_ov = swh_storage.snapshot_get_by_origin_visit(origin.url, ov1.visit)
assert by_ov == {**empty_snapshot_dict, "next_branch": None}
ovs1 = OriginVisitStatus.from_dict(
{
"origin": origin.url,
"date": sample_data.date_visit1,
"visit": ov1.visit,
"status": "created",
"snapshot": None,
"metadata": None,
}
)
ovs2 = OriginVisitStatus.from_dict(
{
"origin": origin.url,
"date": date_now,
"visit": ov1.visit,
"status": "full",
"metadata": None,
"snapshot": empty_snapshot.id,
}
)
actual_objects = list(swh_storage.journal_writer.journal.objects)
expected_objects = [
("origin", origin),
("origin_visit", ov1),
("origin_visit_status", ovs1,),
("snapshot", empty_snapshot),
("origin_visit_status", ovs2,),
]
for obj in expected_objects:
assert obj in actual_objects
def test_snapshot_add_get_complete(self, swh_storage, sample_data):
complete_snapshot = sample_data.snapshots[2]
complete_snapshot_dict = complete_snapshot.to_dict()
origin = sample_data.origin
swh_storage.origin_add([origin])
visit = OriginVisit(
origin=origin.url,
date=sample_data.date_visit1,
type=sample_data.type_visit1,
)
origin_visit1 = swh_storage.origin_visit_add([visit])[0]
visit_id = origin_visit1.visit
actual_result = swh_storage.snapshot_add([complete_snapshot])
swh_storage.origin_visit_status_add(
[
OriginVisitStatus(
origin=origin.url,
visit=origin_visit1.visit,
date=now(),
status="ongoing",
snapshot=complete_snapshot.id,
)
]
)
assert actual_result == {"snapshot:add": 1}
by_id = swh_storage.snapshot_get(complete_snapshot.id)
assert by_id == {**complete_snapshot_dict, "next_branch": None}
by_ov = swh_storage.snapshot_get_by_origin_visit(origin.url, visit_id)
assert by_ov == {**complete_snapshot_dict, "next_branch": None}
def test_snapshot_add_many(self, swh_storage, sample_data):
snapshot, _, complete_snapshot = sample_data.snapshots[:3]
actual_result = swh_storage.snapshot_add([snapshot, complete_snapshot])
assert actual_result == {"snapshot:add": 2}
assert swh_storage.snapshot_get(complete_snapshot.id) == {
**complete_snapshot.to_dict(),
"next_branch": None,
}
assert swh_storage.snapshot_get(snapshot.id) == {
**snapshot.to_dict(),
"next_branch": None,
}
swh_storage.refresh_stat_counters()
assert swh_storage.stat_counters()["snapshot"] == 2
def test_snapshot_add_many_from_generator(self, swh_storage, sample_data):
snapshot, _, complete_snapshot = sample_data.snapshots[:3]
def _snp_gen():
yield from [snapshot, complete_snapshot]
actual_result = swh_storage.snapshot_add(_snp_gen())
assert actual_result == {"snapshot:add": 2}
swh_storage.refresh_stat_counters()
assert swh_storage.stat_counters()["snapshot"] == 2
def test_snapshot_add_many_incremental(self, swh_storage, sample_data):
snapshot, _, complete_snapshot = sample_data.snapshots[:3]
actual_result = swh_storage.snapshot_add([complete_snapshot])
assert actual_result == {"snapshot:add": 1}
actual_result2 = swh_storage.snapshot_add([snapshot, complete_snapshot])
assert actual_result2 == {"snapshot:add": 1}
assert swh_storage.snapshot_get(complete_snapshot.id) == {
**complete_snapshot.to_dict(),
"next_branch": None,
}
assert swh_storage.snapshot_get(snapshot.id) == {
**snapshot.to_dict(),
"next_branch": None,
}
def test_snapshot_add_twice(self, swh_storage, sample_data):
snapshot, empty_snapshot = sample_data.snapshots[:2]
actual_result = swh_storage.snapshot_add([empty_snapshot])
assert actual_result == {"snapshot:add": 1}
assert list(swh_storage.journal_writer.journal.objects) == [
("snapshot", empty_snapshot)
]
actual_result = swh_storage.snapshot_add([snapshot])
assert actual_result == {"snapshot:add": 1}
assert list(swh_storage.journal_writer.journal.objects) == [
("snapshot", empty_snapshot),
("snapshot", snapshot),
]
def test_snapshot_add_count_branches(self, swh_storage, sample_data):
complete_snapshot = sample_data.snapshots[2]
actual_result = swh_storage.snapshot_add([complete_snapshot])
assert actual_result == {"snapshot:add": 1}
snp_size = swh_storage.snapshot_count_branches(complete_snapshot.id)
expected_snp_size = {
"alias": 1,
"content": 1,
"directory": 2,
"release": 1,
"revision": 1,
"snapshot": 1,
None: 1,
}
assert snp_size == expected_snp_size
def test_snapshot_add_get_paginated(self, swh_storage, sample_data):
complete_snapshot = sample_data.snapshots[2]
swh_storage.snapshot_add([complete_snapshot])
snp_id = complete_snapshot.id
branches = complete_snapshot.to_dict()["branches"]
branch_names = list(sorted(branches))
# Test branch_from
snapshot = swh_storage.snapshot_get_branches(snp_id, branches_from=b"release")
rel_idx = branch_names.index(b"release")
expected_snapshot = {
"id": snp_id,
"branches": {name: branches[name] for name in branch_names[rel_idx:]},
"next_branch": None,
}
assert snapshot == expected_snapshot
# Test branches_count
snapshot = swh_storage.snapshot_get_branches(snp_id, branches_count=1)
expected_snapshot = {
"id": snp_id,
"branches": {branch_names[0]: branches[branch_names[0]],},
"next_branch": b"content",
}
assert snapshot == expected_snapshot
# test branch_from + branches_count
snapshot = swh_storage.snapshot_get_branches(
snp_id, branches_from=b"directory", branches_count=3
)
dir_idx = branch_names.index(b"directory")
expected_snapshot = {
"id": snp_id,
"branches": {
name: branches[name] for name in branch_names[dir_idx : dir_idx + 3]
},
"next_branch": branch_names[dir_idx + 3],
}
assert snapshot == expected_snapshot
def test_snapshot_add_get_filtered(self, swh_storage, sample_data):
origin = sample_data.origin
complete_snapshot = sample_data.snapshots[2]
swh_storage.origin_add([origin])
visit = OriginVisit(
origin=origin.url,
date=sample_data.date_visit1,
type=sample_data.type_visit1,
)
origin_visit1 = swh_storage.origin_visit_add([visit])[0]
swh_storage.snapshot_add([complete_snapshot])
swh_storage.origin_visit_status_add(
[
OriginVisitStatus(
origin=origin.url,
visit=origin_visit1.visit,
date=now(),
status="ongoing",
snapshot=complete_snapshot.id,
)
]
)
snp_id = complete_snapshot.id
branches = complete_snapshot.to_dict()["branches"]
snapshot = swh_storage.snapshot_get_branches(
snp_id, target_types=["release", "revision"]
)
expected_snapshot = {
"id": snp_id,
"branches": {
name: tgt
for name, tgt in branches.items()
if tgt and tgt["target_type"] in ["release", "revision"]
},
"next_branch": None,
}
assert snapshot == expected_snapshot
snapshot = swh_storage.snapshot_get_branches(snp_id, target_types=["alias"])
expected_snapshot = {
"id": snp_id,
"branches": {
name: tgt
for name, tgt in branches.items()
if tgt and tgt["target_type"] == "alias"
},
"next_branch": None,
}
assert snapshot == expected_snapshot
def test_snapshot_add_get_filtered_and_paginated(self, swh_storage, sample_data):
complete_snapshot = sample_data.snapshots[2]
swh_storage.snapshot_add([complete_snapshot])
snp_id = complete_snapshot.id
branches = complete_snapshot.to_dict()["branches"]
branch_names = list(sorted(branches))
# Test branch_from
snapshot = swh_storage.snapshot_get_branches(
snp_id, target_types=["directory", "release"], branches_from=b"directory2"
)
expected_snapshot = {
"id": snp_id,
"branches": {name: branches[name] for name in (b"directory2", b"release")},
"next_branch": None,
}
assert snapshot == expected_snapshot
# Test branches_count
snapshot = swh_storage.snapshot_get_branches(
snp_id, target_types=["directory", "release"], branches_count=1
)
expected_snapshot = {
"id": snp_id,
"branches": {b"directory": branches[b"directory"]},
"next_branch": b"directory2",
}
assert snapshot == expected_snapshot
# Test branches_count
snapshot = swh_storage.snapshot_get_branches(
snp_id, target_types=["directory", "release"], branches_count=2
)
expected_snapshot = {
"id": snp_id,
"branches": {
name: branches[name] for name in (b"directory", b"directory2")
},
"next_branch": b"release",
}
assert snapshot == expected_snapshot
# test branch_from + branches_count
snapshot = swh_storage.snapshot_get_branches(
snp_id,
target_types=["directory", "release"],
branches_from=b"directory2",
branches_count=1,
)
dir_idx = branch_names.index(b"directory2")
expected_snapshot = {
"id": snp_id,
"branches": {branch_names[dir_idx]: branches[branch_names[dir_idx]],},
"next_branch": b"release",
}
assert snapshot == expected_snapshot
def test_snapshot_add_get_branch_by_type(self, swh_storage, sample_data):
complete_snapshot = sample_data.snapshots[2]
snapshot = complete_snapshot.to_dict()
alias1 = b"alias1"
alias2 = b"alias2"
target1 = random.choice(list(snapshot["branches"].keys()))
target2 = random.choice(list(snapshot["branches"].keys()))
snapshot["branches"][alias2] = {
"target": target2,
"target_type": "alias",
}
snapshot["branches"][alias1] = {
"target": target1,
"target_type": "alias",
}
new_snapshot = Snapshot.from_dict(snapshot)
swh_storage.snapshot_add([new_snapshot])
branches = swh_storage.snapshot_get_branches(
new_snapshot.id,
target_types=["alias"],
branches_from=alias1,
branches_count=1,
)["branches"]
assert len(branches) == 1
assert alias1 in branches
def test_snapshot_add_get(self, swh_storage, sample_data):
snapshot = sample_data.snapshot
origin = sample_data.origin
swh_storage.origin_add([origin])
visit = OriginVisit(
origin=origin.url,
date=sample_data.date_visit1,
type=sample_data.type_visit1,
)
ov1 = swh_storage.origin_visit_add([visit])[0]
swh_storage.snapshot_add([snapshot])
swh_storage.origin_visit_status_add(
[
OriginVisitStatus(
origin=origin.url,
visit=ov1.visit,
date=now(),
status="ongoing",
snapshot=snapshot.id,
)
]
)
expected_snapshot = {**snapshot.to_dict(), "next_branch": None}
by_id = swh_storage.snapshot_get(snapshot.id)
assert by_id == expected_snapshot
by_ov = swh_storage.snapshot_get_by_origin_visit(origin.url, ov1.visit)
assert by_ov == expected_snapshot
actual_visit = swh_storage.origin_visit_get_by(origin.url, ov1.visit)
assert actual_visit == ov1
visit_status = swh_storage.origin_visit_status_get_latest(
origin.url, ov1.visit, require_snapshot=True
)
assert visit_status.snapshot == snapshot.id
def test_snapshot_add_twice__by_origin_visit(self, swh_storage, sample_data):
snapshot = sample_data.snapshot
origin = sample_data.origin
swh_storage.origin_add([origin])
ov1 = swh_storage.origin_visit_add(
[
OriginVisit(
origin=origin.url,
date=sample_data.date_visit1,
type=sample_data.type_visit1,
)
]
)[0]
swh_storage.snapshot_add([snapshot])
date_now2 = now()
swh_storage.origin_visit_status_add(
[
OriginVisitStatus(
origin=origin.url,
visit=ov1.visit,
date=date_now2,
status="ongoing",
snapshot=snapshot.id,
)
]
)
expected_snapshot = {**snapshot.to_dict(), "next_branch": None}
by_ov1 = swh_storage.snapshot_get_by_origin_visit(origin.url, ov1.visit)
assert by_ov1 == expected_snapshot
ov2 = swh_storage.origin_visit_add(
[
OriginVisit(
origin=origin.url,
date=sample_data.date_visit2,
type=sample_data.type_visit2,
)
]
)[0]
date_now4 = now()
swh_storage.origin_visit_status_add(
[
OriginVisitStatus(
origin=origin.url,
visit=ov2.visit,
date=date_now4,
status="ongoing",
snapshot=snapshot.id,
)
]
)
by_ov2 = swh_storage.snapshot_get_by_origin_visit(origin.url, ov2.visit)
assert by_ov2 == expected_snapshot
ovs1 = OriginVisitStatus.from_dict(
{
"origin": origin.url,
"date": sample_data.date_visit1,
"visit": ov1.visit,
"status": "created",
"metadata": None,
"snapshot": None,
}
)
ovs2 = OriginVisitStatus.from_dict(
{
"origin": origin.url,
"date": date_now2,
"visit": ov1.visit,
"status": "ongoing",
"metadata": None,
"snapshot": snapshot.id,
}
)
ovs3 = OriginVisitStatus.from_dict(
{
"origin": origin.url,
"date": sample_data.date_visit2,
"visit": ov2.visit,
"status": "created",
"metadata": None,
"snapshot": None,
}
)
ovs4 = OriginVisitStatus.from_dict(
{
"origin": origin.url,
"date": date_now4,
"visit": ov2.visit,
"status": "ongoing",
"metadata": None,
"snapshot": snapshot.id,
}
)
actual_objects = list(swh_storage.journal_writer.journal.objects)
expected_objects = [
("origin", origin),
("origin_visit", ov1),
("origin_visit_status", ovs1),
("snapshot", snapshot),
("origin_visit_status", ovs2),
("origin_visit", ov2),
("origin_visit_status", ovs3),
("origin_visit_status", ovs4),
]
for obj in expected_objects:
assert obj in actual_objects
def test_snapshot_get_random(self, swh_storage, sample_data):
snapshot, empty_snapshot, complete_snapshot = sample_data.snapshots[:3]
swh_storage.snapshot_add([snapshot, empty_snapshot, complete_snapshot])
assert swh_storage.snapshot_get_random() in {
snapshot.id,
empty_snapshot.id,
complete_snapshot.id,
}
def test_snapshot_missing(self, swh_storage, sample_data):
snapshot, missing_snapshot = sample_data.snapshots[:2]
snapshots = [snapshot.id, missing_snapshot.id]
swh_storage.snapshot_add([snapshot])
missing_snapshots = swh_storage.snapshot_missing(snapshots)
assert list(missing_snapshots) == [missing_snapshot.id]
def test_stat_counters(self, swh_storage, sample_data):
origin = sample_data.origin
snapshot = sample_data.snapshot
revision = sample_data.revision
release = sample_data.release
directory = sample_data.directory
content = sample_data.content
expected_keys = ["content", "directory", "origin", "revision"]
# Initially, all counters are 0
swh_storage.refresh_stat_counters()
counters = swh_storage.stat_counters()
assert set(expected_keys) <= set(counters)
for key in expected_keys:
assert counters[key] == 0
# Add a content. Only the content counter should increase.
swh_storage.content_add([content])
swh_storage.refresh_stat_counters()
counters = swh_storage.stat_counters()
assert set(expected_keys) <= set(counters)
for key in expected_keys:
if key != "content":
assert counters[key] == 0
assert counters["content"] == 1
# Add other objects. Check their counter increased as well.
swh_storage.origin_add([origin])
visit = OriginVisit(
origin=origin.url,
date=sample_data.date_visit2,
type=sample_data.type_visit2,
)
origin_visit1 = swh_storage.origin_visit_add([visit])[0]
swh_storage.snapshot_add([snapshot])
swh_storage.origin_visit_status_add(
[
OriginVisitStatus(
origin=origin.url,
visit=origin_visit1.visit,
date=now(),
status="ongoing",
snapshot=snapshot.id,
)
]
)
swh_storage.directory_add([directory])
swh_storage.revision_add([revision])
swh_storage.release_add([release])
swh_storage.refresh_stat_counters()
counters = swh_storage.stat_counters()
assert counters["content"] == 1
assert counters["directory"] == 1
assert counters["snapshot"] == 1
assert counters["origin"] == 1
assert counters["origin_visit"] == 1
assert counters["revision"] == 1
assert counters["release"] == 1
assert counters["snapshot"] == 1
if "person" in counters:
assert counters["person"] == 3
def test_content_find_ctime(self, swh_storage, sample_data):
origin_content = sample_data.content
ctime = round_to_milliseconds(now())
content = attr.evolve(origin_content, data=None, ctime=ctime)
swh_storage.content_add_metadata([content])
actually_present = swh_storage.content_find({"sha1": content.sha1})
assert actually_present[0] == content.to_dict()
def test_content_find_with_present_content(self, swh_storage, sample_data):
content = sample_data.content
expected_content = content.to_dict()
del expected_content["data"]
del expected_content["ctime"]
# 1. with something to find
swh_storage.content_add([content])
actually_present = swh_storage.content_find({"sha1": content.sha1})
assert 1 == len(actually_present)
actually_present[0].pop("ctime")
assert actually_present[0] == expected_content
# 2. with something to find
actually_present = swh_storage.content_find({"sha1_git": content.sha1_git})
assert 1 == len(actually_present)
actually_present[0].pop("ctime")
assert actually_present[0] == expected_content
# 3. with something to find
actually_present = swh_storage.content_find({"sha256": content.sha256})
assert 1 == len(actually_present)
actually_present[0].pop("ctime")
assert actually_present[0] == expected_content
# 4. with something to find
actually_present = swh_storage.content_find(content.hashes())
assert 1 == len(actually_present)
actually_present[0].pop("ctime")
assert actually_present[0] == expected_content
def test_content_find_with_non_present_content(self, swh_storage, sample_data):
missing_content = sample_data.skipped_content
# 1. with something that does not exist
actually_present = swh_storage.content_find({"sha1": missing_content.sha1})
assert actually_present == []
# 2. with something that does not exist
actually_present = swh_storage.content_find(
{"sha1_git": missing_content.sha1_git}
)
assert actually_present == []
# 3. with something that does not exist
actually_present = swh_storage.content_find({"sha256": missing_content.sha256})
assert actually_present == []
def test_content_find_with_duplicate_input(self, swh_storage, sample_data):
content = sample_data.content
# Create fake data with colliding sha256 and blake2s256
sha1_array = bytearray(content.sha1)
sha1_array[0] += 1
sha1git_array = bytearray(content.sha1_git)
sha1git_array[0] += 1
duplicated_content = attr.evolve(
content, sha1=bytes(sha1_array), sha1_git=bytes(sha1git_array)
)
# Inject the data
swh_storage.content_add([content, duplicated_content])
actual_result = list(
swh_storage.content_find(
{
"blake2s256": duplicated_content.blake2s256,
"sha256": duplicated_content.sha256,
}
)
)
expected_content = content.to_dict()
expected_duplicated_content = duplicated_content.to_dict()
for key in ["data", "ctime"]: # so we can compare
for dict_ in [
expected_content,
expected_duplicated_content,
actual_result[0],
actual_result[1],
]:
dict_.pop(key, None)
expected_result = [expected_content, expected_duplicated_content]
for result in expected_result:
assert result in actual_result
def test_content_find_with_duplicate_sha256(self, swh_storage, sample_data):
content = sample_data.content
hashes = {}
# Create fake data with colliding sha256
for hashalgo in ("sha1", "sha1_git", "blake2s256"):
value = bytearray(getattr(content, hashalgo))
value[0] += 1
hashes[hashalgo] = bytes(value)
duplicated_content = attr.evolve(
content,
sha1=hashes["sha1"],
sha1_git=hashes["sha1_git"],
blake2s256=hashes["blake2s256"],
)
swh_storage.content_add([content, duplicated_content])
actual_result = list(
swh_storage.content_find({"sha256": duplicated_content.sha256})
)
assert len(actual_result) == 2
expected_content = content.to_dict()
expected_duplicated_content = duplicated_content.to_dict()
for key in ["data", "ctime"]: # so we can compare
for dict_ in [
expected_content,
expected_duplicated_content,
actual_result[0],
actual_result[1],
]:
dict_.pop(key, None)
assert sorted(actual_result, key=lambda x: x["sha1"]) == [
expected_content,
expected_duplicated_content,
]
# Find with both sha256 and blake2s256
actual_result = list(
swh_storage.content_find(
{
"sha256": duplicated_content.sha256,
"blake2s256": duplicated_content.blake2s256,
}
)
)
assert len(actual_result) == 1
actual_result[0].pop("ctime")
assert actual_result == [expected_duplicated_content]
def test_content_find_with_duplicate_blake2s256(self, swh_storage, sample_data):
content = sample_data.content
# Create fake data with colliding sha256 and blake2s256
sha1_array = bytearray(content.sha1)
sha1_array[0] += 1
sha1git_array = bytearray(content.sha1_git)
sha1git_array[0] += 1
sha256_array = bytearray(content.sha256)
sha256_array[0] += 1
duplicated_content = attr.evolve(
content,
sha1=bytes(sha1_array),
sha1_git=bytes(sha1git_array),
sha256=bytes(sha256_array),
)
swh_storage.content_add([content, duplicated_content])
actual_result = list(
swh_storage.content_find({"blake2s256": duplicated_content.blake2s256})
)
expected_content = content.to_dict()
expected_duplicated_content = duplicated_content.to_dict()
for key in ["data", "ctime"]: # so we can compare
for dict_ in [
expected_content,
expected_duplicated_content,
actual_result[0],
actual_result[1],
]:
dict_.pop(key, None)
expected_result = [expected_content, expected_duplicated_content]
for result in expected_result:
assert result in actual_result
# Find with both sha256 and blake2s256
actual_result = list(
swh_storage.content_find(
{
"sha256": duplicated_content.sha256,
"blake2s256": duplicated_content.blake2s256,
}
)
)
actual_result[0].pop("ctime")
assert actual_result == [expected_duplicated_content]
def test_content_find_bad_input(self, swh_storage):
# 1. with bad input
with pytest.raises(StorageArgumentException):
swh_storage.content_find({}) # empty is bad
# 2. with bad input
with pytest.raises(StorageArgumentException):
swh_storage.content_find({"unknown-sha1": "something"}) # not the right key
def test_object_find_by_sha1_git(self, swh_storage, sample_data):
content = sample_data.content
directory = sample_data.directory
revision = sample_data.revision
release = sample_data.release
sha1_gits = [b"00000000000000000000"]
expected = {
b"00000000000000000000": [],
}
swh_storage.content_add([content])
sha1_gits.append(content.sha1_git)
expected[content.sha1_git] = [
{"sha1_git": content.sha1_git, "type": "content",}
]
swh_storage.directory_add([directory])
sha1_gits.append(directory.id)
expected[directory.id] = [{"sha1_git": directory.id, "type": "directory",}]
swh_storage.revision_add([revision])
sha1_gits.append(revision.id)
expected[revision.id] = [{"sha1_git": revision.id, "type": "revision",}]
swh_storage.release_add([release])
sha1_gits.append(release.id)
expected[release.id] = [{"sha1_git": release.id, "type": "release",}]
ret = swh_storage.object_find_by_sha1_git(sha1_gits)
assert expected == ret
def test_metadata_fetcher_add_get(self, swh_storage, sample_data):
fetcher = sample_data.metadata_fetcher
actual_fetcher = swh_storage.metadata_fetcher_get(fetcher.name, fetcher.version)
assert actual_fetcher is None # does not exist
swh_storage.metadata_fetcher_add([fetcher])
res = swh_storage.metadata_fetcher_get(fetcher.name, fetcher.version)
assert res == fetcher
actual_objects = list(swh_storage.journal_writer.journal.objects)
expected_objects = [
("metadata_fetcher", fetcher),
]
for obj in expected_objects:
assert obj in actual_objects
def test_metadata_fetcher_add_zero(self, swh_storage, sample_data):
fetcher = sample_data.metadata_fetcher
actual_fetcher = swh_storage.metadata_fetcher_get(fetcher.name, fetcher.version)
assert actual_fetcher is None # does not exist
swh_storage.metadata_fetcher_add([])
def test_metadata_authority_add_get(self, swh_storage, sample_data):
authority = sample_data.metadata_authority
actual_authority = swh_storage.metadata_authority_get(
authority.type, authority.url
)
assert actual_authority is None # does not exist
swh_storage.metadata_authority_add([authority])
res = swh_storage.metadata_authority_get(authority.type, authority.url)
assert res == authority
actual_objects = list(swh_storage.journal_writer.journal.objects)
expected_objects = [
("metadata_authority", authority),
]
for obj in expected_objects:
assert obj in actual_objects
def test_metadata_authority_add_zero(self, swh_storage, sample_data):
authority = sample_data.metadata_authority
actual_authority = swh_storage.metadata_authority_get(
authority.type, authority.url
)
assert actual_authority is None # does not exist
swh_storage.metadata_authority_add([])
def test_content_metadata_add(self, swh_storage, sample_data):
content = sample_data.content
fetcher = sample_data.metadata_fetcher
authority = sample_data.metadata_authority
content_metadata = sample_data.content_metadata[:2]
content_swhid = SWHID(
object_type="content", object_id=hash_to_bytes(content.sha1_git)
)
swh_storage.metadata_fetcher_add([fetcher])
swh_storage.metadata_authority_add([authority])
swh_storage.raw_extrinsic_metadata_add(content_metadata)
result = swh_storage.raw_extrinsic_metadata_get(
MetadataTargetType.CONTENT, content_swhid, authority
)
assert result["next_page_token"] is None
assert list(sorted(result["results"], key=lambda x: x.discovery_date,)) == list(
content_metadata
)
actual_objects = list(swh_storage.journal_writer.journal.objects)
expected_objects = [
("metadata_authority", authority),
("metadata_fetcher", fetcher),
] + [("raw_extrinsic_metadata", item) for item in content_metadata]
for obj in expected_objects:
assert obj in actual_objects
def test_content_metadata_add_duplicate(self, swh_storage, sample_data):
"""Duplicates should be silently updated."""
content = sample_data.content
fetcher = sample_data.metadata_fetcher
authority = sample_data.metadata_authority
content_metadata, content_metadata2 = sample_data.content_metadata[:2]
content_swhid = SWHID(
object_type="content", object_id=hash_to_bytes(content.sha1_git)
)
new_content_metadata2 = attr.evolve(
content_metadata2, format="new-format", metadata=b"new-metadata",
)
swh_storage.metadata_fetcher_add([fetcher])
swh_storage.metadata_authority_add([authority])
swh_storage.raw_extrinsic_metadata_add([content_metadata, content_metadata2])
swh_storage.raw_extrinsic_metadata_add([new_content_metadata2])
result = swh_storage.raw_extrinsic_metadata_get(
MetadataTargetType.CONTENT, content_swhid, authority
)
assert result["next_page_token"] is None
expected_results1 = (content_metadata, new_content_metadata2)
expected_results2 = (content_metadata, content_metadata2)
assert tuple(sorted(result["results"], key=lambda x: x.discovery_date,)) in (
expected_results1, # cassandra
expected_results2, # postgresql
)
def test_content_metadata_get(self, swh_storage, sample_data):
content, content2 = sample_data.contents[:2]
fetcher, fetcher2 = sample_data.fetchers[:2]
authority, authority2 = sample_data.authorities[:2]
(
content1_metadata1,
content1_metadata2,
content1_metadata3,
) = sample_data.content_metadata[:3]
content1_swhid = SWHID(object_type="content", object_id=content.sha1_git)
content2_swhid = SWHID(object_type="content", object_id=content2.sha1_git)
content2_metadata = attr.evolve(content1_metadata2, id=content2_swhid)
swh_storage.metadata_authority_add([authority, authority2])
swh_storage.metadata_fetcher_add([fetcher, fetcher2])
swh_storage.raw_extrinsic_metadata_add(
[
content1_metadata1,
content1_metadata2,
content1_metadata3,
content2_metadata,
]
)
result = swh_storage.raw_extrinsic_metadata_get(
MetadataTargetType.CONTENT, content1_swhid, authority
)
assert result["next_page_token"] is None
assert [content1_metadata1, content1_metadata2] == list(
sorted(result["results"], key=lambda x: x.discovery_date,)
)
result = swh_storage.raw_extrinsic_metadata_get(
MetadataTargetType.CONTENT, content1_swhid, authority2
)
assert result["next_page_token"] is None
assert [content1_metadata3] == list(
sorted(result["results"], key=lambda x: x.discovery_date,)
)
result = swh_storage.raw_extrinsic_metadata_get(
MetadataTargetType.CONTENT, content2_swhid, authority
)
assert result["next_page_token"] is None
assert [content2_metadata] == list(result["results"],)
def test_content_metadata_get_after(self, swh_storage, sample_data):
content = sample_data.content
fetcher = sample_data.metadata_fetcher
authority = sample_data.metadata_authority
content_metadata, content_metadata2 = sample_data.content_metadata[:2]
content_swhid = SWHID(object_type="content", object_id=content.sha1_git)
swh_storage.metadata_fetcher_add([fetcher])
swh_storage.metadata_authority_add([authority])
swh_storage.raw_extrinsic_metadata_add([content_metadata, content_metadata2])
result = swh_storage.raw_extrinsic_metadata_get(
MetadataTargetType.CONTENT,
content_swhid,
authority,
after=content_metadata.discovery_date - timedelta(seconds=1),
)
assert result["next_page_token"] is None
assert [content_metadata, content_metadata2] == list(
sorted(result["results"], key=lambda x: x.discovery_date,)
)
result = swh_storage.raw_extrinsic_metadata_get(
MetadataTargetType.CONTENT,
content_swhid,
authority,
after=content_metadata.discovery_date,
)
assert result["next_page_token"] is None
assert result["results"] == [content_metadata2]
result = swh_storage.raw_extrinsic_metadata_get(
MetadataTargetType.CONTENT,
content_swhid,
authority,
after=content_metadata2.discovery_date,
)
assert result["next_page_token"] is None
assert result["results"] == []
def test_content_metadata_get_paginate(self, swh_storage, sample_data):
content = sample_data.content
fetcher = sample_data.metadata_fetcher
authority = sample_data.metadata_authority
content_metadata, content_metadata2 = sample_data.content_metadata[:2]
content_swhid = SWHID(object_type="content", object_id=content.sha1_git)
swh_storage.metadata_fetcher_add([fetcher])
swh_storage.metadata_authority_add([authority])
swh_storage.raw_extrinsic_metadata_add([content_metadata, content_metadata2])
swh_storage.raw_extrinsic_metadata_get(
MetadataTargetType.CONTENT, content_swhid, authority
)
result = swh_storage.raw_extrinsic_metadata_get(
MetadataTargetType.CONTENT, content_swhid, authority, limit=1
)
assert result["next_page_token"] is not None
assert result["results"] == [content_metadata]
result = swh_storage.raw_extrinsic_metadata_get(
MetadataTargetType.CONTENT,
content_swhid,
authority,
limit=1,
page_token=result["next_page_token"],
)
assert result["next_page_token"] is None
assert result["results"] == [content_metadata2]
def test_content_metadata_get_paginate_same_date(self, swh_storage, sample_data):
content = sample_data.content
fetcher1, fetcher2 = sample_data.fetchers[:2]
authority = sample_data.metadata_authority
content_metadata, content_metadata2 = sample_data.content_metadata[:2]
content_swhid = SWHID(object_type="content", object_id=content.sha1_git)
swh_storage.metadata_fetcher_add([fetcher1, fetcher2])
swh_storage.metadata_authority_add([authority])
new_content_metadata2 = attr.evolve(
content_metadata2,
discovery_date=content_metadata2.discovery_date,
fetcher=attr.evolve(fetcher2, metadata=None),
)
swh_storage.raw_extrinsic_metadata_add(
[content_metadata, new_content_metadata2]
)
result = swh_storage.raw_extrinsic_metadata_get(
MetadataTargetType.CONTENT, content_swhid, authority, limit=1
)
assert result["next_page_token"] is not None
assert result["results"] == [content_metadata]
result = swh_storage.raw_extrinsic_metadata_get(
MetadataTargetType.CONTENT,
content_swhid,
authority,
limit=1,
page_token=result["next_page_token"],
)
assert result["next_page_token"] is None
assert result["results"] == [new_content_metadata2]
def test_content_metadata_get__invalid_id(self, swh_storage, sample_data):
origin = sample_data.origin
fetcher = sample_data.metadata_fetcher
authority = sample_data.metadata_authority
content_metadata, content_metadata2 = sample_data.content_metadata[:2]
swh_storage.metadata_fetcher_add([fetcher])
swh_storage.metadata_authority_add([authority])
swh_storage.raw_extrinsic_metadata_add([content_metadata, content_metadata2])
with pytest.raises(StorageArgumentException, match="SWHID"):
swh_storage.raw_extrinsic_metadata_get(
MetadataTargetType.CONTENT, origin.url, authority
)
def test_origin_metadata_add(self, swh_storage, sample_data):
origin = sample_data.origin
fetcher = sample_data.metadata_fetcher
authority = sample_data.metadata_authority
origin_metadata, origin_metadata2 = sample_data.origin_metadata[:2]
assert swh_storage.origin_add([origin]) == {"origin:add": 1}
swh_storage.metadata_fetcher_add([fetcher])
swh_storage.metadata_authority_add([authority])
swh_storage.raw_extrinsic_metadata_add([origin_metadata, origin_metadata2])
result = swh_storage.raw_extrinsic_metadata_get(
MetadataTargetType.ORIGIN, origin.url, authority
)
assert result["next_page_token"] is None
assert list(sorted(result["results"], key=lambda x: x.discovery_date)) == [
origin_metadata,
origin_metadata2,
]
actual_objects = list(swh_storage.journal_writer.journal.objects)
expected_objects = [
("metadata_authority", authority),
("metadata_fetcher", fetcher),
("raw_extrinsic_metadata", origin_metadata),
("raw_extrinsic_metadata", origin_metadata2),
]
for obj in expected_objects:
assert obj in actual_objects
def test_origin_metadata_add_duplicate(self, swh_storage, sample_data):
"""Duplicates should be silently updated."""
origin = sample_data.origin
fetcher = sample_data.metadata_fetcher
authority = sample_data.metadata_authority
origin_metadata, origin_metadata2 = sample_data.origin_metadata[:2]
assert swh_storage.origin_add([origin]) == {"origin:add": 1}
new_origin_metadata2 = attr.evolve(
origin_metadata2, format="new-format", metadata=b"new-metadata",
)
swh_storage.metadata_fetcher_add([fetcher])
swh_storage.metadata_authority_add([authority])
swh_storage.raw_extrinsic_metadata_add([origin_metadata, origin_metadata2])
swh_storage.raw_extrinsic_metadata_add([new_origin_metadata2])
result = swh_storage.raw_extrinsic_metadata_get(
MetadataTargetType.ORIGIN, origin.url, authority
)
assert result["next_page_token"] is None
# which of the two behavior happens is backend-specific.
expected_results1 = (origin_metadata, new_origin_metadata2)
expected_results2 = (origin_metadata, origin_metadata2)
assert tuple(sorted(result["results"], key=lambda x: x.discovery_date,)) in (
expected_results1, # cassandra
expected_results2, # postgresql
)
def test_origin_metadata_get(self, swh_storage, sample_data):
origin, origin2 = sample_data.origins[:2]
fetcher, fetcher2 = sample_data.fetchers[:2]
authority, authority2 = sample_data.authorities[:2]
(
origin1_metadata1,
origin1_metadata2,
origin1_metadata3,
) = sample_data.origin_metadata[:3]
assert swh_storage.origin_add([origin, origin2]) == {"origin:add": 2}
origin2_metadata = attr.evolve(origin1_metadata2, id=origin2.url)
swh_storage.metadata_authority_add([authority, authority2])
swh_storage.metadata_fetcher_add([fetcher, fetcher2])
swh_storage.raw_extrinsic_metadata_add(
[origin1_metadata1, origin1_metadata2, origin1_metadata3, origin2_metadata]
)
result = swh_storage.raw_extrinsic_metadata_get(
MetadataTargetType.ORIGIN, origin.url, authority
)
assert result["next_page_token"] is None
assert [origin1_metadata1, origin1_metadata2] == list(
sorted(result["results"], key=lambda x: x.discovery_date,)
)
result = swh_storage.raw_extrinsic_metadata_get(
MetadataTargetType.ORIGIN, origin.url, authority2
)
assert result["next_page_token"] is None
assert [origin1_metadata3] == list(
sorted(result["results"], key=lambda x: x.discovery_date,)
)
result = swh_storage.raw_extrinsic_metadata_get(
MetadataTargetType.ORIGIN, origin2.url, authority
)
assert result["next_page_token"] is None
assert [origin2_metadata] == list(result["results"],)
def test_origin_metadata_get_after(self, swh_storage, sample_data):
origin = sample_data.origin
fetcher = sample_data.metadata_fetcher
authority = sample_data.metadata_authority
origin_metadata, origin_metadata2 = sample_data.origin_metadata[:2]
assert swh_storage.origin_add([origin]) == {"origin:add": 1}
swh_storage.metadata_fetcher_add([fetcher])
swh_storage.metadata_authority_add([authority])
swh_storage.raw_extrinsic_metadata_add([origin_metadata, origin_metadata2])
result = swh_storage.raw_extrinsic_metadata_get(
MetadataTargetType.ORIGIN,
origin.url,
authority,
after=origin_metadata.discovery_date - timedelta(seconds=1),
)
assert result["next_page_token"] is None
assert list(sorted(result["results"], key=lambda x: x.discovery_date,)) == [
origin_metadata,
origin_metadata2,
]
result = swh_storage.raw_extrinsic_metadata_get(
MetadataTargetType.ORIGIN,
origin.url,
authority,
after=origin_metadata.discovery_date,
)
assert result["next_page_token"] is None
assert result["results"] == [origin_metadata2]
result = swh_storage.raw_extrinsic_metadata_get(
MetadataTargetType.ORIGIN,
origin.url,
authority,
after=origin_metadata2.discovery_date,
)
assert result["next_page_token"] is None
assert result["results"] == []
def test_origin_metadata_get_paginate(self, swh_storage, sample_data):
origin = sample_data.origin
fetcher = sample_data.metadata_fetcher
authority = sample_data.metadata_authority
origin_metadata, origin_metadata2 = sample_data.origin_metadata[:2]
assert swh_storage.origin_add([origin]) == {"origin:add": 1}
swh_storage.metadata_fetcher_add([fetcher])
swh_storage.metadata_authority_add([authority])
swh_storage.raw_extrinsic_metadata_add([origin_metadata, origin_metadata2])
swh_storage.raw_extrinsic_metadata_get(
MetadataTargetType.ORIGIN, origin.url, authority
)
result = swh_storage.raw_extrinsic_metadata_get(
MetadataTargetType.ORIGIN, origin.url, authority, limit=1
)
assert result["next_page_token"] is not None
assert result["results"] == [origin_metadata]
result = swh_storage.raw_extrinsic_metadata_get(
MetadataTargetType.ORIGIN,
origin.url,
authority,
limit=1,
page_token=result["next_page_token"],
)
assert result["next_page_token"] is None
assert result["results"] == [origin_metadata2]
def test_origin_metadata_get_paginate_same_date(self, swh_storage, sample_data):
origin = sample_data.origin
fetcher1, fetcher2 = sample_data.fetchers[:2]
authority = sample_data.metadata_authority
origin_metadata, origin_metadata2 = sample_data.origin_metadata[:2]
assert swh_storage.origin_add([origin]) == {"origin:add": 1}
swh_storage.metadata_fetcher_add([fetcher1, fetcher2])
swh_storage.metadata_authority_add([authority])
new_origin_metadata2 = attr.evolve(
origin_metadata2,
discovery_date=origin_metadata2.discovery_date,
fetcher=attr.evolve(fetcher2, metadata=None),
)
swh_storage.raw_extrinsic_metadata_add([origin_metadata, new_origin_metadata2])
result = swh_storage.raw_extrinsic_metadata_get(
MetadataTargetType.ORIGIN, origin.url, authority, limit=1
)
assert result["next_page_token"] is not None
assert result["results"] == [origin_metadata]
result = swh_storage.raw_extrinsic_metadata_get(
MetadataTargetType.ORIGIN,
origin.url,
authority,
limit=1,
page_token=result["next_page_token"],
)
assert result["next_page_token"] is None
assert result["results"] == [new_origin_metadata2]
def test_origin_metadata_add_missing_authority(self, swh_storage, sample_data):
origin = sample_data.origin
fetcher = sample_data.metadata_fetcher
origin_metadata, origin_metadata2 = sample_data.origin_metadata[:2]
assert swh_storage.origin_add([origin]) == {"origin:add": 1}
swh_storage.metadata_fetcher_add([fetcher])
with pytest.raises(StorageArgumentException, match="authority"):
swh_storage.raw_extrinsic_metadata_add([origin_metadata, origin_metadata2])
def test_origin_metadata_add_missing_fetcher(self, swh_storage, sample_data):
origin = sample_data.origin
authority = sample_data.metadata_authority
origin_metadata, origin_metadata2 = sample_data.origin_metadata[:2]
assert swh_storage.origin_add([origin]) == {"origin:add": 1}
swh_storage.metadata_authority_add([authority])
with pytest.raises(StorageArgumentException, match="fetcher"):
swh_storage.raw_extrinsic_metadata_add([origin_metadata, origin_metadata2])
def test_origin_metadata_get__invalid_id_type(self, swh_storage, sample_data):
origin = sample_data.origin
authority = sample_data.metadata_authority
fetcher = sample_data.metadata_fetcher
origin_metadata, origin_metadata2 = sample_data.origin_metadata[:2]
content_metadata = sample_data.content_metadata[0]
assert swh_storage.origin_add([origin]) == {"origin:add": 1}
swh_storage.metadata_fetcher_add([fetcher])
swh_storage.metadata_authority_add([authority])
swh_storage.raw_extrinsic_metadata_add([origin_metadata, origin_metadata2])
with pytest.raises(StorageArgumentException, match="SWHID"):
swh_storage.raw_extrinsic_metadata_get(
MetadataTargetType.ORIGIN, content_metadata.id, authority,
)
class TestStorageGeneratedData:
def test_generate_content_get(self, swh_storage, swh_contents):
contents_with_data = [c.to_dict() for c in swh_contents if c.status != "absent"]
# input the list of sha1s we want from storage
get_sha1s = [c["sha1"] for c in contents_with_data]
# retrieve contents
actual_contents = list(swh_storage.content_get(get_sha1s))
assert None not in actual_contents
assert_contents_ok(contents_with_data, actual_contents)
def test_generate_content_get_metadata(self, swh_storage, swh_contents):
# input the list of sha1s we want from storage
expected_contents = [c.to_dict() for c in swh_contents if c.status != "absent"]
get_sha1s = [c["sha1"] for c in expected_contents]
# retrieve contents
meta_contents = swh_storage.content_get_metadata(get_sha1s)
assert len(list(meta_contents)) == len(get_sha1s)
actual_contents = []
for contents in meta_contents.values():
actual_contents.extend(contents)
keys_to_check = {"length", "status", "sha1", "sha1_git", "sha256", "blake2s256"}
assert_contents_ok(
expected_contents, actual_contents, keys_to_check=keys_to_check
)
def test_generate_content_get_range(self, swh_storage, swh_contents):
"""content_get_range returns complete range"""
present_contents = [c.to_dict() for c in swh_contents if c.status != "absent"]
get_sha1s = sorted([c.sha1 for c in swh_contents if c.status != "absent"])
start = get_sha1s[2]
end = get_sha1s[-2]
actual_result = swh_storage.content_get_range(start, end)
assert actual_result["next"] is None
actual_contents = actual_result["contents"]
expected_contents = [c for c in present_contents if start <= c["sha1"] <= end]
if expected_contents:
assert_contents_ok(expected_contents, actual_contents, ["sha1"])
else:
assert actual_contents == []
def test_generate_content_get_range_full(self, swh_storage, swh_contents):
"""content_get_range for a full range returns all available contents"""
present_contents = [c.to_dict() for c in swh_contents if c.status != "absent"]
start = b"0" * 40
end = b"f" * 40
actual_result = swh_storage.content_get_range(start, end)
assert actual_result["next"] is None
actual_contents = actual_result["contents"]
expected_contents = [c for c in present_contents if start <= c["sha1"] <= end]
if expected_contents:
assert_contents_ok(expected_contents, actual_contents, ["sha1"])
else:
assert actual_contents == []
def test_generate_content_get_range_empty(self, swh_storage, swh_contents):
"""content_get_range for an empty range returns nothing"""
start = b"0" * 40
end = b"f" * 40
actual_result = swh_storage.content_get_range(end, start)
assert actual_result["next"] is None
assert len(actual_result["contents"]) == 0
def test_generate_content_get_range_limit_none(self, swh_storage):
"""content_get_range call with wrong limit input should fail"""
with pytest.raises(StorageArgumentException) as e:
swh_storage.content_get_range(start=None, end=None, limit=None)
assert e.value.args == ("limit should not be None",)
def test_generate_content_get_range_no_limit(self, swh_storage, swh_contents):
"""content_get_range returns contents within range provided"""
# input the list of sha1s we want from storage
get_sha1s = sorted([c.sha1 for c in swh_contents if c.status != "absent"])
start = get_sha1s[0]
end = get_sha1s[-1]
# retrieve contents
actual_result = swh_storage.content_get_range(start, end)
actual_contents = actual_result["contents"]
assert actual_result["next"] is None
assert len(actual_contents) == len(get_sha1s)
expected_contents = [c.to_dict() for c in swh_contents if c.status != "absent"]
assert_contents_ok(expected_contents, actual_contents, ["sha1"])
def test_generate_content_get_range_limit(self, swh_storage, swh_contents):
"""content_get_range paginates results if limit exceeded"""
contents_map = {c.sha1: c.to_dict() for c in swh_contents}
# input the list of sha1s we want from storage
get_sha1s = sorted([c.sha1 for c in swh_contents if c.status != "absent"])
start = get_sha1s[0]
end = get_sha1s[-1]
# retrieve contents limited to n-1 results
limited_results = len(get_sha1s) - 1
actual_result = swh_storage.content_get_range(start, end, limit=limited_results)
actual_contents = actual_result["contents"]
assert actual_result["next"] == get_sha1s[-1]
assert len(actual_contents) == limited_results
expected_contents = [contents_map[sha1] for sha1 in get_sha1s[:-1]]
assert_contents_ok(expected_contents, actual_contents, ["sha1"])
# retrieve next part
actual_results2 = swh_storage.content_get_range(start=end, end=end)
assert actual_results2["next"] is None
actual_contents2 = actual_results2["contents"]
assert len(actual_contents2) == 1
assert_contents_ok([contents_map[get_sha1s[-1]]], actual_contents2, ["sha1"])
def test_origin_get_range_from_zero(self, swh_storage, swh_origins):
actual_origins = list(
swh_storage.origin_get_range(origin_from=0, origin_count=0)
)
assert len(actual_origins) == 0
actual_origins = list(
swh_storage.origin_get_range(origin_from=0, origin_count=1)
)
assert len(actual_origins) == 1
assert actual_origins[0]["id"] == 1
assert actual_origins[0]["url"] == swh_origins[0].url
@pytest.mark.parametrize(
"origin_from,origin_count",
[(1, 1), (1, 10), (1, 20), (1, 101), (11, 0), (11, 10), (91, 11)],
)
def test_origin_get_range(
self, swh_storage, swh_origins, origin_from, origin_count
):
actual_origins = list(
swh_storage.origin_get_range(
origin_from=origin_from, origin_count=origin_count
)
)
origins_with_id = list(enumerate(swh_origins, start=1))
expected_origins = [
{"url": origin.url, "id": origin_id,}
for (origin_id, origin) in origins_with_id[
origin_from - 1 : origin_from + origin_count - 1
]
]
assert actual_origins == expected_origins
@pytest.mark.parametrize("limit", [1, 7, 10, 100, 1000])
def test_origin_list(self, swh_storage, swh_origins, limit):
returned_origins = []
page_token = None
i = 0
while True:
result = swh_storage.origin_list(page_token=page_token, limit=limit)
assert len(result["origins"]) <= limit
returned_origins.extend(origin["url"] for origin in result["origins"])
i += 1
page_token = result.get("next_page_token")
if page_token is None:
assert i * limit >= len(swh_origins)
break
else:
assert len(result["origins"]) == limit
expected_origins = [origin.url for origin in swh_origins]
assert sorted(returned_origins) == sorted(expected_origins)
def test_origin_count(self, swh_storage, sample_data):
swh_storage.origin_add(sample_data.origins)
assert swh_storage.origin_count("github") == 3
assert swh_storage.origin_count("gitlab") == 2
assert swh_storage.origin_count(".*user.*", regexp=True) == 5
assert swh_storage.origin_count(".*user.*", regexp=False) == 0
assert swh_storage.origin_count(".*user1.*", regexp=True) == 2
assert swh_storage.origin_count(".*user1.*", regexp=False) == 0
def test_origin_count_with_visit_no_visits(self, swh_storage, sample_data):
swh_storage.origin_add(sample_data.origins)
# none of them have visits, so with_visit=True => 0
assert swh_storage.origin_count("github", with_visit=True) == 0
assert swh_storage.origin_count("gitlab", with_visit=True) == 0
assert swh_storage.origin_count(".*user.*", regexp=True, with_visit=True) == 0
assert swh_storage.origin_count(".*user.*", regexp=False, with_visit=True) == 0
assert swh_storage.origin_count(".*user1.*", regexp=True, with_visit=True) == 0
assert swh_storage.origin_count(".*user1.*", regexp=False, with_visit=True) == 0
def test_origin_count_with_visit_with_visits_no_snapshot(
self, swh_storage, sample_data
):
swh_storage.origin_add(sample_data.origins)
origin_url = "https://github.com/user1/repo1"
visit = OriginVisit(origin=origin_url, date=now(), type="git",)
swh_storage.origin_visit_add([visit])
assert swh_storage.origin_count("github", with_visit=False) == 3
# it has a visit, but no snapshot, so with_visit=True => 0
assert swh_storage.origin_count("github", with_visit=True) == 0
assert swh_storage.origin_count("gitlab", with_visit=False) == 2
# these gitlab origins have no visit
assert swh_storage.origin_count("gitlab", with_visit=True) == 0
assert (
swh_storage.origin_count("github.*user1", regexp=True, with_visit=False)
== 1
)
assert (
swh_storage.origin_count("github.*user1", regexp=True, with_visit=True) == 0
)
assert swh_storage.origin_count("github", regexp=True, with_visit=True) == 0
def test_origin_count_with_visit_with_visits_and_snapshot(
self, swh_storage, sample_data
):
snapshot = sample_data.snapshot
swh_storage.origin_add(sample_data.origins)
swh_storage.snapshot_add([snapshot])
origin_url = "https://github.com/user1/repo1"
visit = OriginVisit(origin=origin_url, date=now(), type="git",)
visit = swh_storage.origin_visit_add([visit])[0]
swh_storage.origin_visit_status_add(
[
OriginVisitStatus(
origin=origin_url,
visit=visit.visit,
date=now(),
status="ongoing",
snapshot=snapshot.id,
)
]
)
assert swh_storage.origin_count("github", with_visit=False) == 3
# github/user1 has a visit and a snapshot, so with_visit=True => 1
assert swh_storage.origin_count("github", with_visit=True) == 1
assert (
swh_storage.origin_count("github.*user1", regexp=True, with_visit=False)
== 1
)
assert (
swh_storage.origin_count("github.*user1", regexp=True, with_visit=True) == 1
)
assert swh_storage.origin_count("github", regexp=True, with_visit=True) == 1
@settings(suppress_health_check=[HealthCheck.too_slow])
@given(strategies.lists(objects(split_content=True), max_size=2))
def test_add_arbitrary(self, swh_storage, objects):
for (obj_type, obj) in objects:
if obj.object_type == "origin_visit":
swh_storage.origin_add([Origin(url=obj.origin)])
visit = OriginVisit(origin=obj.origin, date=obj.date, type=obj.type,)
swh_storage.origin_visit_add([visit])
else:
method = getattr(swh_storage, obj_type + "_add")
try:
method([obj])
except HashCollision:
pass
@pytest.mark.db
class TestLocalStorage:
"""Test the local storage"""
# This test is only relevant on the local storage, with an actual
# objstorage raising an exception
def test_content_add_objstorage_exception(self, swh_storage, sample_data):
content = sample_data.content
swh_storage.objstorage.content_add = Mock(
side_effect=Exception("mocked broken objstorage")
)
with pytest.raises(Exception, match="mocked broken"):
swh_storage.content_add([content])
missing = list(swh_storage.content_missing([content.hashes()]))
assert missing == [content.sha1]
@pytest.mark.db
class TestStorageRaceConditions:
@pytest.mark.xfail
def test_content_add_race(self, swh_storage, sample_data):
content = sample_data.content
results = queue.Queue()
def thread():
try:
with db_transaction(swh_storage) as (db, cur):
ret = swh_storage.content_add([content], db=db, cur=cur)
results.put((threading.get_ident(), "data", ret))
except Exception as e:
results.put((threading.get_ident(), "exc", e))
t1 = threading.Thread(target=thread)
t2 = threading.Thread(target=thread)
t1.start()
# this avoids the race condition
# import time
# time.sleep(1)
t2.start()
t1.join()
t2.join()
r1 = results.get(block=False)
r2 = results.get(block=False)
with pytest.raises(queue.Empty):
results.get(block=False)
assert r1[0] != r2[0]
assert r1[1] == "data", "Got exception %r in Thread%s" % (r1[2], r1[0])
assert r2[1] == "data", "Got exception %r in Thread%s" % (r2[2], r2[0])
@pytest.mark.db
class TestPgStorage:
"""This class is dedicated for the rare case where the schema needs to
be altered dynamically.
Otherwise, the tests could be blocking when ran altogether.
"""
def test_content_update_with_new_cols(self, swh_storage, sample_data):
content, content2 = sample_data.contents[:2]
swh_storage.journal_writer.journal = None # TODO, not supported
with db_transaction(swh_storage) as (_, cur):
cur.execute(
"""alter table content
add column test text default null,
add column test2 text default null"""
)
swh_storage.content_add([content])
cont = content.to_dict()
cont["test"] = "value-1"
cont["test2"] = "value-2"
swh_storage.content_update([cont], keys=["test", "test2"])
with db_transaction(swh_storage) as (_, cur):
cur.execute(
"""SELECT sha1, sha1_git, sha256, length, status,
test, test2
FROM content WHERE sha1 = %s""",
(cont["sha1"],),
)
datum = cur.fetchone()
assert datum == (
cont["sha1"],
cont["sha1_git"],
cont["sha256"],
cont["length"],
"visible",
cont["test"],
cont["test2"],
)
with db_transaction(swh_storage) as (_, cur):
cur.execute(
"""alter table content drop column test,
drop column test2"""
)
def test_content_add_db(self, swh_storage, sample_data):
content = sample_data.content
actual_result = swh_storage.content_add([content])
assert actual_result == {
"content:add": 1,
"content:add:bytes": content.length,
}
if hasattr(swh_storage, "objstorage"):
assert content.sha1 in swh_storage.objstorage.objstorage
with db_transaction(swh_storage) as (_, cur):
cur.execute(
"SELECT sha1, sha1_git, sha256, length, status"
" FROM content WHERE sha1 = %s",
(content.sha1,),
)
datum = cur.fetchone()
assert datum == (
content.sha1,
content.sha1_git,
content.sha256,
content.length,
"visible",
)
contents = [
obj
for (obj_type, obj) in swh_storage.journal_writer.journal.objects
if obj_type == "content"
]
assert len(contents) == 1
assert contents[0] == attr.evolve(content, data=None)
def test_content_add_metadata_db(self, swh_storage, sample_data):
content = attr.evolve(sample_data.content, data=None, ctime=now())
actual_result = swh_storage.content_add_metadata([content])
assert actual_result == {
"content:add": 1,
}
if hasattr(swh_storage, "objstorage"):
assert content.sha1 not in swh_storage.objstorage.objstorage
with db_transaction(swh_storage) as (_, cur):
cur.execute(
"SELECT sha1, sha1_git, sha256, length, status"
" FROM content WHERE sha1 = %s",
(content.sha1,),
)
datum = cur.fetchone()
assert datum == (
content.sha1,
content.sha1_git,
content.sha256,
content.length,
"visible",
)
contents = [
obj
for (obj_type, obj) in swh_storage.journal_writer.journal.objects
if obj_type == "content"
]
assert len(contents) == 1
assert contents[0] == content
def test_skipped_content_add_db(self, swh_storage, sample_data):
content, cont2 = sample_data.skipped_contents[:2]
content2 = attr.evolve(cont2, blake2s256=None)
actual_result = swh_storage.skipped_content_add([content, content, content2])
assert 2 <= actual_result.pop("skipped_content:add") <= 3
assert actual_result == {}
with db_transaction(swh_storage) as (_, cur):
cur.execute(
"SELECT sha1, sha1_git, sha256, blake2s256, "
"length, status, reason "
"FROM skipped_content ORDER BY sha1_git"
)
dbdata = cur.fetchall()
assert len(dbdata) == 2
assert dbdata[0] == (
content.sha1,
content.sha1_git,
content.sha256,
content.blake2s256,
content.length,
"absent",
"Content too long",
)
assert dbdata[1] == (
content2.sha1,
content2.sha1_git,
content2.sha256,
content2.blake2s256,
content2.length,
"absent",
"Content too long",
)
def test_clear_buffers(self, swh_storage):
"""Calling clear buffers on real storage does nothing
"""
assert swh_storage.clear_buffers() is None
def test_flush(self, swh_storage):
"""Calling clear buffers on real storage does nothing
"""
assert swh_storage.flush() == {}