diff --git a/sql/upgrades/162.sql b/sql/upgrades/162.sql
new file mode 100644
index 00000000..e4a47e31
--- /dev/null
+++ b/sql/upgrades/162.sql
@@ -0,0 +1,52 @@
+-- SWH DB schema upgrade
+-- from_version: 161
+-- to_version: 162
+-- description: Make swh_directory_walk_one join skipped_content in addition to content
+
+-- latest schema version
+insert into dbversion(version, release, description)
+      values(162, now(), 'Work Still In Progress');
+
+create or replace function swh_directory_walk_one(walked_dir_id sha1_git)
+    returns setof directory_entry
+    language sql
+    stable
+as $$
+    with dir as (
+	select id as dir_id, dir_entries, file_entries, rev_entries
+	from directory
+	where id = walked_dir_id),
+    ls_d as (select dir_id, unnest(dir_entries) as entry_id from dir),
+    ls_f as (select dir_id, unnest(file_entries) as entry_id from dir),
+    ls_r as (select dir_id, unnest(rev_entries) as entry_id from dir)
+    (select dir_id, 'dir'::directory_entry_type as type,
+            e.target, e.name, e.perms, NULL::content_status,
+            NULL::sha1, NULL::sha1_git, NULL::sha256, NULL::bigint
+     from ls_d
+     left join directory_entry_dir e on ls_d.entry_id = e.id)
+    union
+    (with known_contents as
+	(select dir_id, 'file'::directory_entry_type as type,
+            e.target, e.name, e.perms, c.status,
+            c.sha1, c.sha1_git, c.sha256, c.length
+         from ls_f
+         left join directory_entry_file e on ls_f.entry_id = e.id
+         inner join content c on e.target = c.sha1_git)
+        select * from known_contents
+	union
+	(select dir_id, 'file'::directory_entry_type as type,
+            e.target, e.name, e.perms, c.status,
+            c.sha1, c.sha1_git, c.sha256, c.length
+         from ls_f
+         left join directory_entry_file e on ls_f.entry_id = e.id
+         left join skipped_content c on e.target = c.sha1_git
+         where not exists (select 1 from known_contents where known_contents.sha1_git=e.target)))
+    union
+    (select dir_id, 'rev'::directory_entry_type as type,
+            e.target, e.name, e.perms, NULL::content_status,
+            NULL::sha1, NULL::sha1_git, NULL::sha256, NULL::bigint
+     from ls_r
+     left join directory_entry_rev e on ls_r.entry_id = e.id)
+    order by name;
+$$;
+
diff --git a/swh/storage/cassandra/cql.py b/swh/storage/cassandra/cql.py
index 979119af..9ea99fe6 100644
--- a/swh/storage/cassandra/cql.py
+++ b/swh/storage/cassandra/cql.py
@@ -1,964 +1,975 @@
 # Copyright (C) 2019-2020  The Software Heritage developers
 # See the AUTHORS file at the top-level directory of this distribution
 # License: GNU General Public License version 3, or any later version
 # See top-level LICENSE file for more information
 
 import dataclasses
 import datetime
 import functools
 import logging
 import random
 from typing import (
     Any,
     Callable,
     Dict,
     Iterable,
     Iterator,
     List,
     Optional,
     Tuple,
     Type,
     TypeVar,
     Union,
 )
 
 from cassandra import CoordinationFailure
 from cassandra.cluster import Cluster, EXEC_PROFILE_DEFAULT, ExecutionProfile, ResultSet
 from cassandra.policies import DCAwareRoundRobinPolicy, TokenAwarePolicy
 from cassandra.query import PreparedStatement, BoundStatement, dict_factory
 from tenacity import (
     retry,
     stop_after_attempt,
     wait_random_exponential,
     retry_if_exception_type,
 )
 from mypy_extensions import NamedArg
 
 from swh.model.model import (
     Content,
     SkippedContent,
     Sha1Git,
     TimestampWithTimezone,
     Timestamp,
     Person,
 )
 
 from swh.storage.interface import ListOrder
 
 from .common import TOKEN_BEGIN, TOKEN_END, hash_url, remove_keys
 from .model import (
     BaseRow,
     ContentRow,
     DirectoryEntryRow,
     DirectoryRow,
+    MAGIC_NULL_PK,
     MetadataAuthorityRow,
     MetadataFetcherRow,
     ObjectCountRow,
     OriginRow,
     OriginVisitRow,
     OriginVisitStatusRow,
     RawExtrinsicMetadataRow,
     ReleaseRow,
     RevisionParentRow,
     RevisionRow,
     SkippedContentRow,
     SnapshotBranchRow,
     SnapshotRow,
 )
 from .schema import CREATE_TABLES_QUERIES, HASH_ALGORITHMS
 
 
 logger = logging.getLogger(__name__)
 
 
 _execution_profiles = {
     EXEC_PROFILE_DEFAULT: ExecutionProfile(
         load_balancing_policy=TokenAwarePolicy(DCAwareRoundRobinPolicy()),
         row_factory=dict_factory,
     ),
 }
 # Configuration for cassandra-driver's access to servers:
 # * hit the right server directly when sending a query (TokenAwarePolicy),
 # * if there's more than one, then pick one at random that's in the same
 #   datacenter as the client (DCAwareRoundRobinPolicy)
 
 
 def create_keyspace(
     hosts: List[str], keyspace: str, port: int = 9042, *, durable_writes=True
 ):
     cluster = Cluster(hosts, port=port, execution_profiles=_execution_profiles)
     session = cluster.connect()
     extra_params = ""
     if not durable_writes:
         extra_params = "AND durable_writes = false"
     session.execute(
         """CREATE KEYSPACE IF NOT EXISTS "%s"
                        WITH REPLICATION = {
                            'class' : 'SimpleStrategy',
                            'replication_factor' : 1
                        } %s;
                     """
         % (keyspace, extra_params)
     )
     session.execute('USE "%s"' % keyspace)
     for query in CREATE_TABLES_QUERIES:
         session.execute(query)
 
 
 TRet = TypeVar("TRet")
 
 
 def _prepared_statement(
     query: str,
 ) -> Callable[[Callable[..., TRet]], Callable[..., TRet]]:
     """Returns a decorator usable on methods of CqlRunner, to
     inject them with a 'statement' argument, that is a prepared
     statement corresponding to the query.
 
     This only works on methods of CqlRunner, as preparing a
     statement requires a connection to a Cassandra server."""
 
     def decorator(f):
         @functools.wraps(f)
         def newf(self, *args, **kwargs) -> TRet:
             if f.__name__ not in self._prepared_statements:
                 statement: PreparedStatement = self._session.prepare(query)
                 self._prepared_statements[f.__name__] = statement
             return f(
                 self, *args, **kwargs, statement=self._prepared_statements[f.__name__]
             )
 
         return newf
 
     return decorator
 
 
 TArg = TypeVar("TArg")
 TSelf = TypeVar("TSelf")
 
 
 def _prepared_insert_statement(
     row_class: Type[BaseRow],
 ) -> Callable[
     [Callable[[TSelf, TArg, NamedArg(Any, "statement")], TRet]],  # noqa
     Callable[[TSelf, TArg], TRet],
 ]:
     """Shorthand for using `_prepared_statement` for `INSERT INTO`
     statements."""
     columns = row_class.cols()
     return _prepared_statement(
         "INSERT INTO %s (%s) VALUES (%s)"
         % (row_class.TABLE, ", ".join(columns), ", ".join("?" for _ in columns),)
     )
 
 
 def _prepared_exists_statement(
     table_name: str,
 ) -> Callable[
     [Callable[[TSelf, TArg, NamedArg(Any, "statement")], TRet]],  # noqa
     Callable[[TSelf, TArg], TRet],
 ]:
     """Shorthand for using `_prepared_statement` for queries that only
     check which ids in a list exist in the table."""
     return _prepared_statement(f"SELECT id FROM {table_name} WHERE id IN ?")
 
 
 def _prepared_select_statement(
     row_class: Type[BaseRow], clauses: str = "", cols: Optional[List[str]] = None,
 ) -> Callable[[Callable[..., TRet]], Callable[..., TRet]]:
     if cols is None:
         cols = row_class.cols()
 
     return _prepared_statement(
         f"SELECT {', '.join(cols)} FROM {row_class.TABLE} {clauses}"
     )
 
 
 def _prepared_select_statements(
     row_class: Type[BaseRow], queries: Dict[Any, str],
 ) -> Callable[[Callable[..., TRet]], Callable[..., TRet]]:
     """Like _prepared_statement, but supports multiple statements, passed a dict,
     and passes a dict of prepared statements to the decorated method"""
     cols = row_class.cols()
 
     statement_start = f"SELECT {', '.join(cols)} FROM {row_class.TABLE} "
 
     def decorator(f):
         @functools.wraps(f)
         def newf(self, *args, **kwargs) -> TRet:
             if f.__name__ not in self._prepared_statements:
                 self._prepared_statements[f.__name__] = {
                     key: self._session.prepare(statement_start + query)
                     for (key, query) in queries.items()
                 }
             return f(
                 self, *args, **kwargs, statements=self._prepared_statements[f.__name__]
             )
 
         return newf
 
     return decorator
 
 
 class CqlRunner:
     """Class managing prepared statements and building queries to be sent
     to Cassandra."""
 
     def __init__(self, hosts: List[str], keyspace: str, port: int):
         self._cluster = Cluster(
             hosts, port=port, execution_profiles=_execution_profiles
         )
         self._session = self._cluster.connect(keyspace)
         self._cluster.register_user_type(
             keyspace, "microtimestamp_with_timezone", TimestampWithTimezone
         )
         self._cluster.register_user_type(keyspace, "microtimestamp", Timestamp)
         self._cluster.register_user_type(keyspace, "person", Person)
 
         # directly a PreparedStatement for methods decorated with
         # @_prepared_statements (and its wrappers, _prepared_insert_statement,
         # _prepared_exists_statement, and _prepared_select_statement);
         # and a dict of PreparedStatements with @_prepared_select_statements
         self._prepared_statements: Dict[
             str, Union[PreparedStatement, Dict[Any, PreparedStatement]]
         ] = {}
 
     ##########################
     # Common utility functions
     ##########################
 
     MAX_RETRIES = 3
 
     @retry(
         wait=wait_random_exponential(multiplier=1, max=10),
         stop=stop_after_attempt(MAX_RETRIES),
         retry=retry_if_exception_type(CoordinationFailure),
     )
     def _execute_with_retries(self, statement, args) -> ResultSet:
         return self._session.execute(statement, args, timeout=1000.0)
 
     @_prepared_statement(
         "UPDATE object_count SET count = count + ? "
         "WHERE partition_key = 0 AND object_type = ?"
     )
     def _increment_counter(
         self, object_type: str, nb: int, *, statement: PreparedStatement
     ) -> None:
         self._execute_with_retries(statement, [nb, object_type])
 
     def _add_one(self, statement, obj: BaseRow) -> None:
         self._increment_counter(obj.TABLE, 1)
         self._execute_with_retries(statement, dataclasses.astuple(obj))
 
     _T = TypeVar("_T", bound=BaseRow)
 
     def _get_random_row(self, row_class: Type[_T], statement) -> Optional[_T]:  # noqa
         """Takes a prepared statement of the form
         "SELECT * FROM <table> WHERE token(<keys>) > ? LIMIT 1"
         and uses it to return a random row"""
         token = random.randint(TOKEN_BEGIN, TOKEN_END)
         rows = self._execute_with_retries(statement, [token])
         if not rows:
             # There are no row with a greater token; wrap around to get
             # the row with the smallest token
             rows = self._execute_with_retries(statement, [TOKEN_BEGIN])
         if rows:
             return row_class.from_dict(rows.one())  # type: ignore
         else:
             return None
 
     def _missing(self, statement, ids):
         rows = self._execute_with_retries(statement, [ids])
         found_ids = {row["id"] for row in rows}
         return [id_ for id_ in ids if id_ not in found_ids]
 
     ##########################
     # 'content' table
     ##########################
 
     def _content_add_finalize(self, statement: BoundStatement) -> None:
         """Returned currified by content_add_prepare, to be called when the
         content row should be added to the primary table."""
         self._execute_with_retries(statement, None)
         self._increment_counter("content", 1)
 
     @_prepared_insert_statement(ContentRow)
     def content_add_prepare(
         self, content: ContentRow, *, statement
     ) -> Tuple[int, Callable[[], None]]:
         """Prepares insertion of a Content to the main 'content' table.
         Returns a token (to be used in secondary tables), and a function to be
         called to perform the insertion in the main table."""
         statement = statement.bind(dataclasses.astuple(content))
 
         # Type used for hashing keys (usually, it will be
         # cassandra.metadata.Murmur3Token)
         token_class = self._cluster.metadata.token_map.token_class
 
         # Token of the row when it will be inserted. This is equivalent to
         # "SELECT token({', '.join(ContentRow.PARTITION_KEY)}) FROM content WHERE ..."
         # after the row is inserted; but we need the token to insert in the
         # index tables *before* inserting to the main 'content' table
         token = token_class.from_key(statement.routing_key).value
         assert TOKEN_BEGIN <= token <= TOKEN_END
 
         # Function to be called after the indexes contain their respective
         # row
         finalizer = functools.partial(self._content_add_finalize, statement)
 
         return (token, finalizer)
 
     @_prepared_select_statement(
         ContentRow, f"WHERE {' AND '.join(map('%s = ?'.__mod__, HASH_ALGORITHMS))}"
     )
     def content_get_from_pk(
         self, content_hashes: Dict[str, bytes], *, statement
     ) -> Optional[ContentRow]:
         rows = list(
             self._execute_with_retries(
                 statement, [content_hashes[algo] for algo in HASH_ALGORITHMS]
             )
         )
         assert len(rows) <= 1
         if rows:
             return ContentRow(**rows[0])
         else:
             return None
 
     @_prepared_select_statement(
         ContentRow, f"WHERE token({', '.join(ContentRow.PARTITION_KEY)}) = ?"
     )
     def content_get_from_token(self, token, *, statement) -> Iterable[ContentRow]:
         return map(ContentRow.from_dict, self._execute_with_retries(statement, [token]))
 
     @_prepared_select_statement(
         ContentRow, f"WHERE token({', '.join(ContentRow.PARTITION_KEY)}) > ? LIMIT 1"
     )
     def content_get_random(self, *, statement) -> Optional[ContentRow]:
         return self._get_random_row(ContentRow, statement)
 
     @_prepared_statement(
         (
             "SELECT token({0}) AS tok, {1} FROM content "
             "WHERE token({0}) >= ? AND token({0}) <= ? LIMIT ?"
         ).format(", ".join(ContentRow.PARTITION_KEY), ", ".join(ContentRow.cols()))
     )
     def content_get_token_range(
         self, start: int, end: int, limit: int, *, statement
     ) -> Iterable[Tuple[int, ContentRow]]:
         """Returns an iterable of (token, row)"""
         return (
             (row["tok"], ContentRow.from_dict(remove_keys(row, ("tok",))))
             for row in self._execute_with_retries(statement, [start, end, limit])
         )
 
     ##########################
     # 'content_by_*' tables
     ##########################
 
     @_prepared_statement(
         "SELECT sha1_git AS id FROM content_by_sha1_git WHERE sha1_git IN ?"
     )
     def content_missing_by_sha1_git(
         self, ids: List[bytes], *, statement
     ) -> List[bytes]:
         return self._missing(statement, ids)
 
     def content_index_add_one(self, algo: str, content: Content, token: int) -> None:
         """Adds a row mapping content[algo] to the token of the Content in
         the main 'content' table."""
         query = (
             f"INSERT INTO content_by_{algo} ({algo}, target_token) " f"VALUES (%s, %s)"
         )
         self._execute_with_retries(query, [content.get_hash(algo), token])
 
     def content_get_tokens_from_single_hash(
         self, algo: str, hash_: bytes
     ) -> Iterable[int]:
         assert algo in HASH_ALGORITHMS
         query = f"SELECT target_token FROM content_by_{algo} WHERE {algo} = %s"
         return (
             row["target_token"] for row in self._execute_with_retries(query, [hash_])
         )
 
     ##########################
     # 'skipped_content' table
     ##########################
 
-    _magic_null_pk = b"<null>"
-    """
-    NULLs (or all-empty blobs) are not allowed in primary keys; instead use a
-    special value that can't possibly be a valid hash.
-    """
-
     def _skipped_content_add_finalize(self, statement: BoundStatement) -> None:
         """Returned currified by skipped_content_add_prepare, to be called
         when the content row should be added to the primary table."""
         self._execute_with_retries(statement, None)
         self._increment_counter("skipped_content", 1)
 
     @_prepared_insert_statement(SkippedContentRow)
     def skipped_content_add_prepare(
         self, content, *, statement
     ) -> Tuple[int, Callable[[], None]]:
         """Prepares insertion of a Content to the main 'skipped_content' table.
         Returns a token (to be used in secondary tables), and a function to be
         called to perform the insertion in the main table."""
 
         # Replace NULLs (which are not allowed in the partition key) with
         # an empty byte string
         for key in SkippedContentRow.PARTITION_KEY:
             if getattr(content, key) is None:
-                setattr(content, key, self._magic_null_pk)
+                setattr(content, key, MAGIC_NULL_PK)
 
         statement = statement.bind(dataclasses.astuple(content))
 
         # Type used for hashing keys (usually, it will be
         # cassandra.metadata.Murmur3Token)
         token_class = self._cluster.metadata.token_map.token_class
 
         # Token of the row when it will be inserted. This is equivalent to
         # "SELECT token({', '.join(SkippedContentRow.PARTITION_KEY)})
         #  FROM skipped_content WHERE ..."
         # after the row is inserted; but we need the token to insert in the
         # index tables *before* inserting to the main 'skipped_content' table
         token = token_class.from_key(statement.routing_key).value
         assert TOKEN_BEGIN <= token <= TOKEN_END
 
         # Function to be called after the indexes contain their respective
         # row
         finalizer = functools.partial(self._skipped_content_add_finalize, statement)
 
         return (token, finalizer)
 
     @_prepared_select_statement(
         SkippedContentRow,
         f"WHERE {' AND '.join(map('%s = ?'.__mod__, HASH_ALGORITHMS))}",
     )
     def skipped_content_get_from_pk(
         self, content_hashes: Dict[str, bytes], *, statement
     ) -> Optional[SkippedContentRow]:
         rows = list(
             self._execute_with_retries(
                 statement,
-                [
-                    content_hashes[algo] or self._magic_null_pk
-                    for algo in HASH_ALGORITHMS
-                ],
+                [content_hashes[algo] or MAGIC_NULL_PK for algo in HASH_ALGORITHMS],
             )
         )
         assert len(rows) <= 1
         if rows:
-            # TODO: convert _magic_null_pk back to None?
             return SkippedContentRow.from_dict(rows[0])
         else:
             return None
 
+    @_prepared_select_statement(
+        SkippedContentRow,
+        f"WHERE token({', '.join(SkippedContentRow.PARTITION_KEY)}) = ?",
+    )
+    def skipped_content_get_from_token(
+        self, token, *, statement
+    ) -> Iterable[SkippedContentRow]:
+        return map(
+            SkippedContentRow.from_dict, self._execute_with_retries(statement, [token])
+        )
+
     ##########################
     # 'skipped_content_by_*' tables
     ##########################
 
     def skipped_content_index_add_one(
         self, algo: str, content: SkippedContent, token: int
     ) -> None:
         """Adds a row mapping content[algo] to the token of the SkippedContent
         in the main 'skipped_content' table."""
         query = (
             f"INSERT INTO skipped_content_by_{algo} ({algo}, target_token) "
             f"VALUES (%s, %s)"
         )
         self._execute_with_retries(
-            query, [content.get_hash(algo) or self._magic_null_pk, token]
+            query, [content.get_hash(algo) or MAGIC_NULL_PK, token]
+        )
+
+    def skipped_content_get_tokens_from_single_hash(
+        self, algo: str, hash_: bytes
+    ) -> Iterable[int]:
+        assert algo in HASH_ALGORITHMS
+        query = f"SELECT target_token FROM skipped_content_by_{algo} WHERE {algo} = %s"
+        return (
+            row["target_token"] for row in self._execute_with_retries(query, [hash_])
         )
 
     ##########################
     # 'revision' table
     ##########################
 
     @_prepared_exists_statement("revision")
     def revision_missing(self, ids: List[bytes], *, statement) -> List[bytes]:
         return self._missing(statement, ids)
 
     @_prepared_insert_statement(RevisionRow)
     def revision_add_one(self, revision: RevisionRow, *, statement) -> None:
         self._add_one(statement, revision)
 
     @_prepared_statement("SELECT id FROM revision WHERE id IN ?")
     def revision_get_ids(self, revision_ids, *, statement) -> Iterable[int]:
         return (
             row["id"] for row in self._execute_with_retries(statement, [revision_ids])
         )
 
     @_prepared_select_statement(RevisionRow, "WHERE id IN ?")
     def revision_get(
         self, revision_ids: List[Sha1Git], *, statement
     ) -> Iterable[RevisionRow]:
         return map(
             RevisionRow.from_dict, self._execute_with_retries(statement, [revision_ids])
         )
 
     @_prepared_select_statement(RevisionRow, "WHERE token(id) > ? LIMIT 1")
     def revision_get_random(self, *, statement) -> Optional[RevisionRow]:
         return self._get_random_row(RevisionRow, statement)
 
     ##########################
     # 'revision_parent' table
     ##########################
 
     @_prepared_insert_statement(RevisionParentRow)
     def revision_parent_add_one(
         self, revision_parent: RevisionParentRow, *, statement
     ) -> None:
         self._add_one(statement, revision_parent)
 
     @_prepared_statement("SELECT parent_id FROM revision_parent WHERE id = ?")
     def revision_parent_get(
         self, revision_id: Sha1Git, *, statement
     ) -> Iterable[bytes]:
         return (
             row["parent_id"]
             for row in self._execute_with_retries(statement, [revision_id])
         )
 
     ##########################
     # 'release' table
     ##########################
 
     @_prepared_exists_statement("release")
     def release_missing(self, ids: List[bytes], *, statement) -> List[bytes]:
         return self._missing(statement, ids)
 
     @_prepared_insert_statement(ReleaseRow)
     def release_add_one(self, release: ReleaseRow, *, statement) -> None:
         self._add_one(statement, release)
 
     @_prepared_select_statement(ReleaseRow, "WHERE id in ?")
     def release_get(self, release_ids: List[str], *, statement) -> Iterable[ReleaseRow]:
         return map(
             ReleaseRow.from_dict, self._execute_with_retries(statement, [release_ids])
         )
 
     @_prepared_select_statement(ReleaseRow, "WHERE token(id) > ? LIMIT 1")
     def release_get_random(self, *, statement) -> Optional[ReleaseRow]:
         return self._get_random_row(ReleaseRow, statement)
 
     ##########################
     # 'directory' table
     ##########################
 
     @_prepared_exists_statement("directory")
     def directory_missing(self, ids: List[bytes], *, statement) -> List[bytes]:
         return self._missing(statement, ids)
 
     @_prepared_insert_statement(DirectoryRow)
     def directory_add_one(self, directory: DirectoryRow, *, statement) -> None:
         """Called after all calls to directory_entry_add_one, to
         commit/finalize the directory."""
         self._add_one(statement, directory)
 
     @_prepared_select_statement(DirectoryRow, "WHERE token(id) > ? LIMIT 1")
     def directory_get_random(self, *, statement) -> Optional[DirectoryRow]:
         return self._get_random_row(DirectoryRow, statement)
 
     ##########################
     # 'directory_entry' table
     ##########################
 
     @_prepared_insert_statement(DirectoryEntryRow)
     def directory_entry_add_one(self, entry: DirectoryEntryRow, *, statement) -> None:
         self._add_one(statement, entry)
 
     @_prepared_select_statement(DirectoryEntryRow, "WHERE directory_id IN ?")
     def directory_entry_get(
         self, directory_ids, *, statement
     ) -> Iterable[DirectoryEntryRow]:
         return map(
             DirectoryEntryRow.from_dict,
             self._execute_with_retries(statement, [directory_ids]),
         )
 
     ##########################
     # 'snapshot' table
     ##########################
 
     @_prepared_exists_statement("snapshot")
     def snapshot_missing(self, ids: List[bytes], *, statement) -> List[bytes]:
         return self._missing(statement, ids)
 
     @_prepared_insert_statement(SnapshotRow)
     def snapshot_add_one(self, snapshot: SnapshotRow, *, statement) -> None:
         self._add_one(statement, snapshot)
 
     @_prepared_select_statement(SnapshotRow, "WHERE token(id) > ? LIMIT 1")
     def snapshot_get_random(self, *, statement) -> Optional[SnapshotRow]:
         return self._get_random_row(SnapshotRow, statement)
 
     ##########################
     # 'snapshot_branch' table
     ##########################
 
     @_prepared_insert_statement(SnapshotBranchRow)
     def snapshot_branch_add_one(self, branch: SnapshotBranchRow, *, statement) -> None:
         self._add_one(statement, branch)
 
     @_prepared_statement(
         "SELECT ascii_bins_count(target_type) AS counts "
         "FROM snapshot_branch "
         "WHERE snapshot_id = ? "
     )
     def snapshot_count_branches(
         self, snapshot_id: Sha1Git, *, statement
     ) -> Dict[Optional[str], int]:
         """Returns a dictionary from type names to the number of branches
         of that type."""
         row = self._execute_with_retries(statement, [snapshot_id]).one()
         (nb_none, counts) = row["counts"]
         return {None: nb_none, **counts}
 
     @_prepared_select_statement(
         SnapshotBranchRow, "WHERE snapshot_id = ? AND name >= ? LIMIT ?"
     )
     def snapshot_branch_get(
         self, snapshot_id: Sha1Git, from_: bytes, limit: int, *, statement
     ) -> Iterable[SnapshotBranchRow]:
         return map(
             SnapshotBranchRow.from_dict,
             self._execute_with_retries(statement, [snapshot_id, from_, limit]),
         )
 
     ##########################
     # 'origin' table
     ##########################
 
     @_prepared_insert_statement(OriginRow)
     def origin_add_one(self, origin: OriginRow, *, statement) -> None:
         self._add_one(statement, origin)
 
     @_prepared_select_statement(OriginRow, "WHERE sha1 = ?")
     def origin_get_by_sha1(self, sha1: bytes, *, statement) -> Iterable[OriginRow]:
         return map(OriginRow.from_dict, self._execute_with_retries(statement, [sha1]))
 
     def origin_get_by_url(self, url: str) -> Iterable[OriginRow]:
         return self.origin_get_by_sha1(hash_url(url))
 
     @_prepared_statement(
         f'SELECT token(sha1) AS tok, {", ".join(OriginRow.cols())} '
         f"FROM origin WHERE token(sha1) >= ? LIMIT ?"
     )
     def origin_list(
         self, start_token: int, limit: int, *, statement
     ) -> Iterable[Tuple[int, OriginRow]]:
         """Returns an iterable of (token, origin)"""
         return (
             (row["tok"], OriginRow.from_dict(remove_keys(row, ("tok",))))
             for row in self._execute_with_retries(statement, [start_token, limit])
         )
 
     @_prepared_select_statement(OriginRow)
     def origin_iter_all(self, *, statement) -> Iterable[OriginRow]:
         return map(OriginRow.from_dict, self._execute_with_retries(statement, []))
 
     @_prepared_statement("SELECT next_visit_id FROM origin WHERE sha1 = ?")
     def _origin_get_next_visit_id(self, origin_sha1: bytes, *, statement) -> int:
         rows = list(self._execute_with_retries(statement, [origin_sha1]))
         assert len(rows) == 1  # TODO: error handling
         return rows[0]["next_visit_id"]
 
     @_prepared_statement(
         "UPDATE origin SET next_visit_id=? WHERE sha1 = ? IF next_visit_id=?"
     )
     def origin_generate_unique_visit_id(self, origin_url: str, *, statement) -> int:
         origin_sha1 = hash_url(origin_url)
         next_id = self._origin_get_next_visit_id(origin_sha1)
         while True:
             res = list(
                 self._execute_with_retries(
                     statement, [next_id + 1, origin_sha1, next_id]
                 )
             )
             assert len(res) == 1
             if res[0]["[applied]"]:
                 # No data race
                 return next_id
             else:
                 # Someone else updated it before we did, let's try again
                 next_id = res[0]["next_visit_id"]
                 # TODO: abort after too many attempts
 
         return next_id
 
     ##########################
     # 'origin_visit' table
     ##########################
 
     @_prepared_select_statements(
         OriginVisitRow,
         {
             (True, ListOrder.ASC): (
                 "WHERE origin = ? AND visit > ? ORDER BY visit ASC LIMIT ?"
             ),
             (True, ListOrder.DESC): (
                 "WHERE origin = ? AND visit < ? ORDER BY visit DESC LIMIT ?"
             ),
             (False, ListOrder.ASC): "WHERE origin = ? ORDER BY visit ASC LIMIT ?",
             (False, ListOrder.DESC): "WHERE origin = ? ORDER BY visit DESC LIMIT ?",
         },
     )
     def origin_visit_get(
         self,
         origin_url: str,
         last_visit: Optional[int],
         limit: int,
         order: ListOrder,
         *,
         statements,
     ) -> Iterable[OriginVisitRow]:
         args: List[Any] = [origin_url]
 
         if last_visit is not None:
             args.append(last_visit)
 
         args.append(limit)
 
         statement = statements[(last_visit is not None, order)]
         return map(
             OriginVisitRow.from_dict, self._execute_with_retries(statement, args)
         )
 
     @_prepared_insert_statement(OriginVisitRow)
     def origin_visit_add_one(self, visit: OriginVisitRow, *, statement) -> None:
         self._add_one(statement, visit)
 
     @_prepared_select_statement(OriginVisitRow, "WHERE origin = ? AND visit = ?")
     def origin_visit_get_one(
         self, origin_url: str, visit_id: int, *, statement
     ) -> Optional[OriginVisitRow]:
         # TODO: error handling
         rows = list(self._execute_with_retries(statement, [origin_url, visit_id]))
         if rows:
             return OriginVisitRow.from_dict(rows[0])
         else:
             return None
 
     @_prepared_select_statement(OriginVisitRow, "WHERE origin = ?")
     def origin_visit_get_all(
         self, origin_url: str, *, statement
     ) -> Iterable[OriginVisitRow]:
         return map(
             OriginVisitRow.from_dict,
             self._execute_with_retries(statement, [origin_url]),
         )
 
     @_prepared_select_statement(OriginVisitRow, "WHERE token(origin) >= ?")
     def _origin_visit_iter_from(
         self, min_token: int, *, statement
     ) -> Iterable[OriginVisitRow]:
         return map(
             OriginVisitRow.from_dict, self._execute_with_retries(statement, [min_token])
         )
 
     @_prepared_select_statement(OriginVisitRow, "WHERE token(origin) < ?")
     def _origin_visit_iter_to(
         self, max_token: int, *, statement
     ) -> Iterable[OriginVisitRow]:
         return map(
             OriginVisitRow.from_dict, self._execute_with_retries(statement, [max_token])
         )
 
     def origin_visit_iter(self, start_token: int) -> Iterator[OriginVisitRow]:
         """Returns all origin visits in order from this token,
         and wraps around the token space."""
         yield from self._origin_visit_iter_from(start_token)
         yield from self._origin_visit_iter_to(start_token)
 
     ##########################
     # 'origin_visit_status' table
     ##########################
 
     @_prepared_select_statements(
         OriginVisitStatusRow,
         {
             (True, ListOrder.ASC): (
                 "WHERE origin = ? AND visit = ? AND date >= ? "
                 "ORDER BY visit ASC LIMIT ?"
             ),
             (True, ListOrder.DESC): (
                 "WHERE origin = ? AND visit = ? AND date <= ? "
                 "ORDER BY visit DESC LIMIT ?"
             ),
             (False, ListOrder.ASC): (
                 "WHERE origin = ? AND visit = ? ORDER BY visit ASC LIMIT ?"
             ),
             (False, ListOrder.DESC): (
                 "WHERE origin = ? AND visit = ? ORDER BY visit DESC LIMIT ?"
             ),
         },
     )
     def origin_visit_status_get_range(
         self,
         origin: str,
         visit: int,
         date_from: Optional[datetime.datetime],
         limit: int,
         order: ListOrder,
         *,
         statements,
     ) -> Iterable[OriginVisitStatusRow]:
         args: List[Any] = [origin, visit]
 
         if date_from is not None:
             args.append(date_from)
 
         args.append(limit)
 
         statement = statements[(date_from is not None, order)]
 
         return map(
             OriginVisitStatusRow.from_dict, self._execute_with_retries(statement, args)
         )
 
     @_prepared_insert_statement(OriginVisitStatusRow)
     def origin_visit_status_add_one(
         self, visit_update: OriginVisitStatusRow, *, statement
     ) -> None:
         self._add_one(statement, visit_update)
 
     def origin_visit_status_get_latest(
         self, origin: str, visit: int,
     ) -> Optional[OriginVisitStatusRow]:
         """Given an origin visit id, return its latest origin_visit_status
 
          """
         return next(self.origin_visit_status_get(origin, visit), None)
 
     @_prepared_select_statement(
         OriginVisitStatusRow, "WHERE origin = ? AND visit = ? ORDER BY date DESC"
     )
     def origin_visit_status_get(
         self, origin: str, visit: int, *, statement,
     ) -> Iterator[OriginVisitStatusRow]:
         """Return all origin visit statuses for a given visit
 
         """
         return map(
             OriginVisitStatusRow.from_dict,
             self._execute_with_retries(statement, [origin, visit]),
         )
 
     ##########################
     # 'metadata_authority' table
     ##########################
 
     @_prepared_insert_statement(MetadataAuthorityRow)
     def metadata_authority_add(self, authority: MetadataAuthorityRow, *, statement):
         self._add_one(statement, authority)
 
     @_prepared_select_statement(MetadataAuthorityRow, "WHERE type = ? AND url = ?")
     def metadata_authority_get(
         self, type, url, *, statement
     ) -> Optional[MetadataAuthorityRow]:
         rows = list(self._execute_with_retries(statement, [type, url]))
         if rows:
             return MetadataAuthorityRow.from_dict(rows[0])
         else:
             return None
 
     ##########################
     # 'metadata_fetcher' table
     ##########################
 
     @_prepared_insert_statement(MetadataFetcherRow)
     def metadata_fetcher_add(self, fetcher, *, statement):
         self._add_one(statement, fetcher)
 
     @_prepared_select_statement(MetadataFetcherRow, "WHERE name = ? AND version = ?")
     def metadata_fetcher_get(
         self, name, version, *, statement
     ) -> Optional[MetadataFetcherRow]:
         rows = list(self._execute_with_retries(statement, [name, version]))
         if rows:
             return MetadataFetcherRow.from_dict(rows[0])
         else:
             return None
 
     #########################
     # 'raw_extrinsic_metadata' table
     #########################
 
     @_prepared_insert_statement(RawExtrinsicMetadataRow)
     def raw_extrinsic_metadata_add(self, raw_extrinsic_metadata, *, statement):
         self._add_one(statement, raw_extrinsic_metadata)
 
     @_prepared_select_statement(
         RawExtrinsicMetadataRow,
         "WHERE id=? AND authority_url=? AND discovery_date>? AND authority_type=?",
     )
     def raw_extrinsic_metadata_get_after_date(
         self,
         id: str,
         authority_type: str,
         authority_url: str,
         after: datetime.datetime,
         *,
         statement,
     ) -> Iterable[RawExtrinsicMetadataRow]:
         return map(
             RawExtrinsicMetadataRow.from_dict,
             self._execute_with_retries(
                 statement, [id, authority_url, after, authority_type]
             ),
         )
 
     @_prepared_select_statement(
         RawExtrinsicMetadataRow,
         "WHERE id=? AND authority_type=? AND authority_url=? "
         "AND (discovery_date, fetcher_name, fetcher_version) > (?, ?, ?)",
     )
     def raw_extrinsic_metadata_get_after_date_and_fetcher(
         self,
         id: str,
         authority_type: str,
         authority_url: str,
         after_date: datetime.datetime,
         after_fetcher_name: str,
         after_fetcher_version: str,
         *,
         statement,
     ) -> Iterable[RawExtrinsicMetadataRow]:
         return map(
             RawExtrinsicMetadataRow.from_dict,
             self._execute_with_retries(
                 statement,
                 [
                     id,
                     authority_type,
                     authority_url,
                     after_date,
                     after_fetcher_name,
                     after_fetcher_version,
                 ],
             ),
         )
 
     @_prepared_select_statement(
         RawExtrinsicMetadataRow, "WHERE id=? AND authority_url=? AND authority_type=?"
     )
     def raw_extrinsic_metadata_get(
         self, id: str, authority_type: str, authority_url: str, *, statement
     ) -> Iterable[RawExtrinsicMetadataRow]:
         return map(
             RawExtrinsicMetadataRow.from_dict,
             self._execute_with_retries(statement, [id, authority_url, authority_type]),
         )
 
     ##########################
     # Miscellaneous
     ##########################
 
     @_prepared_statement("SELECT uuid() FROM revision LIMIT 1;")
     def check_read(self, *, statement):
         self._execute_with_retries(statement, [])
 
     @_prepared_select_statement(ObjectCountRow, "WHERE partition_key=0")
     def stat_counters(self, *, statement) -> Iterable[ObjectCountRow]:
         return map(ObjectCountRow.from_dict, self._execute_with_retries(statement, []))
diff --git a/swh/storage/cassandra/model.py b/swh/storage/cassandra/model.py
index dd31a283..fddd1c2a 100644
--- a/swh/storage/cassandra/model.py
+++ b/swh/storage/cassandra/model.py
@@ -1,263 +1,278 @@
 # Copyright (C) 2020  The Software Heritage developers
 # See the AUTHORS file at the top-level directory of this distribution
 # License: GNU General Public License version 3, or any later version
 # See top-level LICENSE file for more information
 
 """Classes representing tables in the Cassandra database.
 
 They are very close to classes found in swh.model.model, but most of
 them are subtly different:
 
 * Large objects are split into other classes (eg. RevisionRow has no
   'parents' field, because parents are stored in a different table,
   represented by RevisionParentRow)
 * They have a "cols" field, which returns the list of column names
   of the table
 * They only use types that map directly to Cassandra's schema (ie. no enums)
 
 Therefore, this model doesn't reuse swh.model.model, except for types
 that can be mapped to UDTs (Person and TimestampWithTimezone).
 """
 
 import dataclasses
 import datetime
 from typing import Any, ClassVar, Dict, List, Optional, Tuple, Type, TypeVar
 
 from swh.model.model import Person, TimestampWithTimezone
 
 
+MAGIC_NULL_PK = b"<null>"
+"""
+NULLs (or all-empty blobs) are not allowed in primary keys; instead we use a
+special value that can't possibly be a valid hash.
+"""
+
+
 T = TypeVar("T", bound="BaseRow")
 
 
 class BaseRow:
     TABLE: ClassVar[str]
     PARTITION_KEY: ClassVar[Tuple[str, ...]]
     CLUSTERING_KEY: ClassVar[Tuple[str, ...]] = ()
 
     @classmethod
     def from_dict(cls: Type[T], d: Dict[str, Any]) -> T:
         return cls(**d)  # type: ignore
 
     @classmethod
     def cols(cls) -> List[str]:
         return [field.name for field in dataclasses.fields(cls)]
 
     def to_dict(self) -> Dict[str, Any]:
         return dataclasses.asdict(self)
 
 
 @dataclasses.dataclass
 class ContentRow(BaseRow):
     TABLE = "content"
     PARTITION_KEY = ("sha1", "sha1_git", "sha256", "blake2s256")
 
     sha1: bytes
     sha1_git: bytes
     sha256: bytes
     blake2s256: bytes
     length: int
     ctime: datetime.datetime
     status: str
 
 
 @dataclasses.dataclass
 class SkippedContentRow(BaseRow):
     TABLE = "skipped_content"
     PARTITION_KEY = ("sha1", "sha1_git", "sha256", "blake2s256")
 
     sha1: Optional[bytes]
     sha1_git: Optional[bytes]
     sha256: Optional[bytes]
     blake2s256: Optional[bytes]
     length: Optional[int]
     ctime: Optional[datetime.datetime]
     status: str
     reason: str
     origin: str
 
+    @classmethod
+    def from_dict(cls, d: Dict[str, Any]) -> "SkippedContentRow":
+        d = d.copy()
+        for k in ("sha1", "sha1_git", "sha256", "blake2s256"):
+            if d[k] == MAGIC_NULL_PK:
+                d[k] = None
+        return super().from_dict(d)
+
 
 @dataclasses.dataclass
 class DirectoryRow(BaseRow):
     TABLE = "directory"
     PARTITION_KEY = ("id",)
 
     id: bytes
 
 
 @dataclasses.dataclass
 class DirectoryEntryRow(BaseRow):
     TABLE = "directory_entry"
     PARTITION_KEY = ("directory_id",)
     CLUSTERING_KEY = ("name",)
 
     directory_id: bytes
     name: bytes
     target: bytes
     perms: int
     type: str
 
 
 @dataclasses.dataclass
 class RevisionRow(BaseRow):
     TABLE = "revision"
     PARTITION_KEY = ("id",)
 
     id: bytes
     date: Optional[TimestampWithTimezone]
     committer_date: Optional[TimestampWithTimezone]
     type: str
     directory: bytes
     message: bytes
     author: Person
     committer: Person
     synthetic: bool
     metadata: str
     extra_headers: dict
 
 
 @dataclasses.dataclass
 class RevisionParentRow(BaseRow):
     TABLE = "revision_parent"
     PARTITION_KEY = ("id",)
     CLUSTERING_KEY = ("parent_rank",)
 
     id: bytes
     parent_rank: int
     parent_id: bytes
 
 
 @dataclasses.dataclass
 class ReleaseRow(BaseRow):
     TABLE = "release"
     PARTITION_KEY = ("id",)
 
     id: bytes
     target_type: str
     target: bytes
     date: TimestampWithTimezone
     name: bytes
     message: bytes
     author: Person
     synthetic: bool
 
 
 @dataclasses.dataclass
 class SnapshotRow(BaseRow):
     TABLE = "snapshot"
     PARTITION_KEY = ("id",)
 
     id: bytes
 
 
 @dataclasses.dataclass
 class SnapshotBranchRow(BaseRow):
     TABLE = "snapshot_branch"
     PARTITION_KEY = ("snapshot_id",)
     CLUSTERING_KEY = ("name",)
 
     snapshot_id: bytes
     name: bytes
     target_type: Optional[str]
     target: Optional[bytes]
 
 
 @dataclasses.dataclass
 class OriginVisitRow(BaseRow):
     TABLE = "origin_visit"
     PARTITION_KEY = ("origin",)
     CLUSTERING_KEY = ("visit",)
 
     origin: str
     visit: int
     date: datetime.datetime
     type: str
 
 
 @dataclasses.dataclass
 class OriginVisitStatusRow(BaseRow):
     TABLE = "origin_visit_status"
     PARTITION_KEY = ("origin",)
     CLUSTERING_KEY = ("visit", "date")
 
     origin: str
     visit: int
     date: datetime.datetime
     status: str
     metadata: str
     snapshot: bytes
 
 
 @dataclasses.dataclass
 class OriginRow(BaseRow):
     TABLE = "origin"
     PARTITION_KEY = ("sha1",)
 
     sha1: bytes
     url: str
     next_visit_id: int
 
 
 @dataclasses.dataclass
 class MetadataAuthorityRow(BaseRow):
     TABLE = "metadata_authority"
     PARTITION_KEY = ("url",)
     CLUSTERING_KEY = ("type",)
 
     url: str
     type: str
     metadata: str
 
 
 @dataclasses.dataclass
 class MetadataFetcherRow(BaseRow):
     TABLE = "metadata_fetcher"
     PARTITION_KEY = ("name",)
     CLUSTERING_KEY = ("version",)
 
     name: str
     version: str
     metadata: str
 
 
 @dataclasses.dataclass
 class RawExtrinsicMetadataRow(BaseRow):
     TABLE = "raw_extrinsic_metadata"
     PARTITION_KEY = ("id",)
     CLUSTERING_KEY = (
         "authority_type",
         "authority_url",
         "discovery_date",
         "fetcher_name",
         "fetcher_version",
     )
 
     type: str
     id: str
 
     authority_type: str
     authority_url: str
     discovery_date: datetime.datetime
     fetcher_name: str
     fetcher_version: str
 
     format: str
     metadata: bytes
 
     origin: Optional[str]
     visit: Optional[int]
     snapshot: Optional[str]
     release: Optional[str]
     revision: Optional[str]
     path: Optional[bytes]
     directory: Optional[str]
 
 
 @dataclasses.dataclass
 class ObjectCountRow(BaseRow):
     TABLE = "object_count"
     PARTITION_KEY = ("partition_key",)
     CLUSTERING_KEY = ("object_type",)
 
     partition_key: int
     object_type: str
     count: int
diff --git a/swh/storage/cassandra/storage.py b/swh/storage/cassandra/storage.py
index f49bee51..d02d96d3 100644
--- a/swh/storage/cassandra/storage.py
+++ b/swh/storage/cassandra/storage.py
@@ -1,1300 +1,1311 @@
 # Copyright (C) 2019-2020  The Software Heritage developers
 # See the AUTHORS file at the top-level directory of this distribution
 # License: GNU General Public License version 3, or any later version
 # See top-level LICENSE file for more information
 
 import base64
 import datetime
 import itertools
 import json
 import random
 import re
 from typing import Any, Callable, Dict, List, Iterable, Optional, Set, Tuple, Union
 
 import attr
 
 from swh.core.api.serializers import msgpack_loads, msgpack_dumps
 from swh.model.identifiers import parse_swhid, SWHID
 from swh.model.hashutil import DEFAULT_ALGORITHMS
 from swh.model.model import (
     Revision,
     Release,
     Directory,
     DirectoryEntry,
     Content,
     SkippedContent,
     OriginVisit,
     OriginVisitStatus,
     Snapshot,
     SnapshotBranch,
     TargetType,
     Origin,
     MetadataAuthority,
     MetadataAuthorityType,
     MetadataFetcher,
     MetadataTargetType,
     RawExtrinsicMetadata,
     Sha1Git,
 )
 from swh.storage.interface import (
     ListOrder,
     PagedResult,
     PartialBranches,
     Sha1,
     VISIT_STATUSES,
 )
 from swh.storage.objstorage import ObjStorage
 from swh.storage.writer import JournalWriter
 from swh.storage.utils import map_optional, now
 
 from ..exc import StorageArgumentException, HashCollision
 from .common import TOKEN_BEGIN, TOKEN_END, hash_url, remove_keys
 from . import converters
 from .cql import CqlRunner
 from .schema import HASH_ALGORITHMS
 from .model import (
     ContentRow,
     DirectoryEntryRow,
     DirectoryRow,
     MetadataAuthorityRow,
     MetadataFetcherRow,
     OriginRow,
     OriginVisitRow,
     OriginVisitStatusRow,
     RawExtrinsicMetadataRow,
     RevisionParentRow,
     SkippedContentRow,
     SnapshotBranchRow,
     SnapshotRow,
 )
 
 
 # Max block size of contents to return
 BULK_BLOCK_CONTENT_LEN_MAX = 10000
 
 
 class CassandraStorage:
     def __init__(self, hosts, keyspace, objstorage, port=9042, journal_writer=None):
         self._cql_runner: CqlRunner = CqlRunner(hosts, keyspace, port)
         self.journal_writer: JournalWriter = JournalWriter(journal_writer)
         self.objstorage: ObjStorage = ObjStorage(objstorage)
 
     def check_config(self, *, check_write: bool) -> bool:
         self._cql_runner.check_read()
 
         return True
 
     def _content_get_from_hash(self, algo, hash_) -> Iterable:
         """From the name of a hash algorithm and a value of that hash,
         looks up the "hash -> token" secondary table (content_by_{algo})
         to get tokens.
         Then, looks up the main table (content) to get all contents with
         that token, and filters out contents whose hash doesn't match."""
         found_tokens = self._cql_runner.content_get_tokens_from_single_hash(algo, hash_)
 
         for token in found_tokens:
             assert isinstance(token, int), found_tokens
             # Query the main table ('content').
             res = self._cql_runner.content_get_from_token(token)
 
             for row in res:
                 # re-check the the hash (in case of murmur3 collision)
                 if getattr(row, algo) == hash_:
                     yield row
 
     def _content_add(self, contents: List[Content], with_data: bool) -> Dict:
         # Filter-out content already in the database.
         contents = [
             c for c in contents if not self._cql_runner.content_get_from_pk(c.to_dict())
         ]
 
         self.journal_writer.content_add(contents)
 
         if with_data:
             # First insert to the objstorage, if the endpoint is
             # `content_add` (as opposed to `content_add_metadata`).
             # TODO: this should probably be done in concurrently to inserting
             # in index tables (but still before the main table; so an entry is
             # only added to the main table after everything else was
             # successfully inserted.
             summary = self.objstorage.content_add(
                 c for c in contents if c.status != "absent"
             )
             content_add_bytes = summary["content:add:bytes"]
 
         content_add = 0
         for content in contents:
             content_add += 1
 
             # Check for sha1 or sha1_git collisions. This test is not atomic
             # with the insertion, so it won't detect a collision if both
             # contents are inserted at the same time, but it's good enough.
             #
             # The proper way to do it would probably be a BATCH, but this
             # would be inefficient because of the number of partitions we
             # need to affect (len(HASH_ALGORITHMS)+1, which is currently 5)
             for algo in {"sha1", "sha1_git"}:
                 collisions = []
                 # Get tokens of 'content' rows with the same value for
                 # sha1/sha1_git
                 rows = self._content_get_from_hash(algo, content.get_hash(algo))
                 for row in rows:
                     if getattr(row, algo) != content.get_hash(algo):
                         # collision of token(partition key), ignore this
                         # row
                         continue
 
                     for other_algo in HASH_ALGORITHMS:
                         if getattr(row, other_algo) != content.get_hash(other_algo):
                             # This hash didn't match; discard the row.
                             collisions.append(
                                 {k: getattr(row, k) for k in HASH_ALGORITHMS}
                             )
 
                 if collisions:
                     collisions.append(content.hashes())
                     raise HashCollision(algo, content.get_hash(algo), collisions)
 
             (token, insertion_finalizer) = self._cql_runner.content_add_prepare(
                 ContentRow(**remove_keys(content.to_dict(), ("data",)))
             )
 
             # Then add to index tables
             for algo in HASH_ALGORITHMS:
                 self._cql_runner.content_index_add_one(algo, content, token)
 
             # Then to the main table
             insertion_finalizer()
 
         summary = {
             "content:add": content_add,
         }
 
         if with_data:
             summary["content:add:bytes"] = content_add_bytes
 
         return summary
 
     def content_add(self, content: List[Content]) -> Dict:
         contents = [attr.evolve(c, ctime=now()) for c in content]
         return self._content_add(list(contents), with_data=True)
 
     def content_update(
         self, contents: List[Dict[str, Any]], keys: List[str] = []
     ) -> None:
         raise NotImplementedError(
             "content_update is not supported by the Cassandra backend"
         )
 
     def content_add_metadata(self, content: List[Content]) -> Dict:
         return self._content_add(content, with_data=False)
 
     def content_get_data(self, content: Sha1) -> Optional[bytes]:
         # FIXME: Make this method support slicing the `data`
         return self.objstorage.content_get(content)
 
     def content_get_partition(
         self,
         partition_id: int,
         nb_partitions: int,
         page_token: Optional[str] = None,
         limit: int = 1000,
     ) -> PagedResult[Content]:
         if limit is None:
             raise StorageArgumentException("limit should not be None")
 
         # Compute start and end of the range of tokens covered by the
         # requested partition
         partition_size = (TOKEN_END - TOKEN_BEGIN) // nb_partitions
         range_start = TOKEN_BEGIN + partition_id * partition_size
         range_end = TOKEN_BEGIN + (partition_id + 1) * partition_size
 
         # offset the range start according to the `page_token`.
         if page_token is not None:
             if not (range_start <= int(page_token) <= range_end):
                 raise StorageArgumentException("Invalid page_token.")
             range_start = int(page_token)
 
         next_page_token: Optional[str] = None
 
         rows = self._cql_runner.content_get_token_range(
             range_start, range_end, limit + 1
         )
         contents = []
         for counter, (tok, row) in enumerate(rows):
             if row.status == "absent":
                 continue
             row_d = row.to_dict()
             if counter >= limit:
                 next_page_token = str(tok)
                 break
             row_d.pop("ctime")
             contents.append(Content(**row_d))
 
         assert len(contents) <= limit
         return PagedResult(results=contents, next_page_token=next_page_token)
 
     def content_get(self, contents: List[Sha1]) -> List[Optional[Content]]:
         contents_by_sha1: Dict[Sha1, Optional[Content]] = {}
         for sha1 in contents:
             # Get all (sha1, sha1_git, sha256, blake2s256) whose sha1
             # matches the argument, from the index table ('content_by_sha1')
             for row in self._content_get_from_hash("sha1", sha1):
                 row_d = row.to_dict()
                 row_d.pop("ctime")
                 content = Content(**row_d)
                 contents_by_sha1[content.sha1] = content
         return [contents_by_sha1.get(sha1) for sha1 in contents]
 
     def content_find(self, content: Dict[str, Any]) -> List[Content]:
         # Find an algorithm that is common to all the requested contents.
         # It will be used to do an initial filtering efficiently.
         filter_algos = list(set(content).intersection(HASH_ALGORITHMS))
         if not filter_algos:
             raise StorageArgumentException(
                 "content keys must contain at least one "
                 f"of: {', '.join(sorted(HASH_ALGORITHMS))}"
             )
         common_algo = filter_algos[0]
 
         results = []
         rows = self._content_get_from_hash(common_algo, content[common_algo])
         for row in rows:
             # Re-check all the hashes, in case of collisions (either of the
             # hash of the partition key, or the hashes in it)
             for algo in HASH_ALGORITHMS:
                 if content.get(algo) and getattr(row, algo) != content[algo]:
                     # This hash didn't match; discard the row.
                     break
             else:
                 # All hashes match, keep this row.
                 row_d = row.to_dict()
                 row_d["ctime"] = row.ctime.replace(tzinfo=datetime.timezone.utc)
                 results.append(Content(**row_d))
         return results
 
     def content_missing(
         self, contents: List[Dict[str, Any]], key_hash: str = "sha1"
     ) -> Iterable[bytes]:
         if key_hash not in DEFAULT_ALGORITHMS:
             raise StorageArgumentException(
                 "key_hash should be one of {','.join(DEFAULT_ALGORITHMS)}"
             )
 
         for content in contents:
             res = self.content_find(content)
             if not res:
                 yield content[key_hash]
 
     def content_missing_per_sha1(self, contents: List[bytes]) -> Iterable[bytes]:
         return self.content_missing([{"sha1": c} for c in contents])
 
     def content_missing_per_sha1_git(
         self, contents: List[Sha1Git]
     ) -> Iterable[Sha1Git]:
         return self.content_missing(
             [{"sha1_git": c for c in contents}], key_hash="sha1_git"
         )
 
     def content_get_random(self) -> Sha1Git:
         content = self._cql_runner.content_get_random()
         assert content, "Could not find any content"
         return content.sha1_git
 
     def _skipped_content_add(self, contents: List[SkippedContent]) -> Dict:
         # Filter-out content already in the database.
         contents = [
             c
             for c in contents
             if not self._cql_runner.skipped_content_get_from_pk(c.to_dict())
         ]
 
         self.journal_writer.skipped_content_add(contents)
 
         for content in contents:
             # Compute token of the row in the main table
             (token, insertion_finalizer) = self._cql_runner.skipped_content_add_prepare(
                 SkippedContentRow.from_dict({"origin": None, **content.to_dict()})
             )
 
             # Then add to index tables
             for algo in HASH_ALGORITHMS:
                 self._cql_runner.skipped_content_index_add_one(algo, content, token)
 
             # Then to the main table
             insertion_finalizer()
 
         return {"skipped_content:add": len(contents)}
 
     def skipped_content_add(self, content: List[SkippedContent]) -> Dict:
         contents = [attr.evolve(c, ctime=now()) for c in content]
         return self._skipped_content_add(contents)
 
     def skipped_content_missing(
         self, contents: List[Dict[str, Any]]
     ) -> Iterable[Dict[str, Any]]:
         for content in contents:
             if not self._cql_runner.skipped_content_get_from_pk(content):
                 yield {algo: content[algo] for algo in DEFAULT_ALGORITHMS}
 
     def directory_add(self, directories: List[Directory]) -> Dict:
         # Filter out directories that are already inserted.
         missing = self.directory_missing([dir_.id for dir_ in directories])
         directories = [dir_ for dir_ in directories if dir_.id in missing]
 
         self.journal_writer.directory_add(directories)
 
         for directory in directories:
             # Add directory entries to the 'directory_entry' table
             for entry in directory.entries:
                 self._cql_runner.directory_entry_add_one(
                     DirectoryEntryRow(directory_id=directory.id, **entry.to_dict())
                 )
 
             # Add the directory *after* adding all the entries, so someone
             # calling snapshot_get_branch in the meantime won't end up
             # with half the entries.
             self._cql_runner.directory_add_one(DirectoryRow(id=directory.id))
 
         return {"directory:add": len(directories)}
 
     def directory_missing(self, directories: List[Sha1Git]) -> Iterable[Sha1Git]:
         return self._cql_runner.directory_missing(directories)
 
     def _join_dentry_to_content(self, dentry: DirectoryEntry) -> Dict[str, Any]:
+        contents: Union[List[Content], List[SkippedContentRow]]
         keys = (
             "status",
             "sha1",
             "sha1_git",
             "sha256",
             "length",
         )
         ret = dict.fromkeys(keys)
         ret.update(dentry.to_dict())
         if ret["type"] == "file":
             contents = self.content_find({"sha1_git": ret["target"]})
+            if not contents:
+                tokens = list(
+                    self._cql_runner.skipped_content_get_tokens_from_single_hash(
+                        "sha1_git", ret["target"]
+                    )
+                )
+                if tokens:
+                    contents = list(
+                        self._cql_runner.skipped_content_get_from_token(tokens[0])
+                    )
             if contents:
                 content = contents[0]
                 for key in keys:
                     ret[key] = getattr(content, key)
         return ret
 
     def _directory_ls(
         self, directory_id: Sha1Git, recursive: bool, prefix: bytes = b""
     ) -> Iterable[Dict[str, Any]]:
         if self.directory_missing([directory_id]):
             return
         rows = list(self._cql_runner.directory_entry_get([directory_id]))
 
         for row in rows:
             entry_d = row.to_dict()
             # Build and yield the directory entry dict
             del entry_d["directory_id"]
             entry = DirectoryEntry.from_dict(entry_d)
             ret = self._join_dentry_to_content(entry)
             ret["name"] = prefix + ret["name"]
             ret["dir_id"] = directory_id
             yield ret
 
             if recursive and ret["type"] == "dir":
                 yield from self._directory_ls(
                     ret["target"], True, prefix + ret["name"] + b"/"
                 )
 
     def directory_entry_get_by_path(
         self, directory: Sha1Git, paths: List[bytes]
     ) -> Optional[Dict[str, Any]]:
         return self._directory_entry_get_by_path(directory, paths, b"")
 
     def _directory_entry_get_by_path(
         self, directory: Sha1Git, paths: List[bytes], prefix: bytes
     ) -> Optional[Dict[str, Any]]:
         if not paths:
             return None
 
         contents = list(self.directory_ls(directory))
 
         if not contents:
             return None
 
         def _get_entry(entries, name):
             """Finds the entry with the requested name, prepends the
             prefix (to get its full path), and returns it.
 
             If no entry has that name, returns None."""
             for entry in entries:
                 if entry["name"] == name:
                     entry = entry.copy()
                     entry["name"] = prefix + entry["name"]
                     return entry
 
         first_item = _get_entry(contents, paths[0])
 
         if len(paths) == 1:
             return first_item
 
         if not first_item or first_item["type"] != "dir":
             return None
 
         return self._directory_entry_get_by_path(
             first_item["target"], paths[1:], prefix + paths[0] + b"/"
         )
 
     def directory_ls(
         self, directory: Sha1Git, recursive: bool = False
     ) -> Iterable[Dict[str, Any]]:
         yield from self._directory_ls(directory, recursive)
 
     def directory_get_random(self) -> Sha1Git:
         directory = self._cql_runner.directory_get_random()
         assert directory, "Could not find any directory"
         return directory.id
 
     def revision_add(self, revisions: List[Revision]) -> Dict:
         # Filter-out revisions already in the database
         missing = self.revision_missing([rev.id for rev in revisions])
         revisions = [rev for rev in revisions if rev.id in missing]
         self.journal_writer.revision_add(revisions)
 
         for revision in revisions:
             revobject = converters.revision_to_db(revision)
             if revobject:
                 # Add parents first
                 for (rank, parent) in enumerate(revision.parents):
                     self._cql_runner.revision_parent_add_one(
                         RevisionParentRow(
                             id=revobject.id, parent_rank=rank, parent_id=parent
                         )
                     )
 
                 # Then write the main revision row.
                 # Writing this after all parents were written ensures that
                 # read endpoints don't return a partial view while writing
                 # the parents
                 self._cql_runner.revision_add_one(revobject)
 
         return {"revision:add": len(revisions)}
 
     def revision_missing(self, revisions: List[Sha1Git]) -> Iterable[Sha1Git]:
         return self._cql_runner.revision_missing(revisions)
 
     def revision_get(self, revision_ids: List[Sha1Git]) -> List[Optional[Revision]]:
         rows = self._cql_runner.revision_get(revision_ids)
         revisions: Dict[Sha1Git, Revision] = {}
         for row in rows:
             # TODO: use a single query to get all parents?
             # (it might have lower latency, but requires more code and more
             # bandwidth, because revision id would be part of each returned
             # row)
             parents = tuple(self._cql_runner.revision_parent_get(row.id))
             # parent_rank is the clustering key, so results are already
             # sorted by rank.
             rev = converters.revision_from_db(row, parents=parents)
             revisions[rev.id] = rev
 
         return [revisions.get(rev_id) for rev_id in revision_ids]
 
     def _get_parent_revs(
         self,
         rev_ids: Iterable[Sha1Git],
         seen: Set[Sha1Git],
         limit: Optional[int],
         short: bool,
     ) -> Union[
         Iterable[Dict[str, Any]], Iterable[Tuple[Sha1Git, Tuple[Sha1Git, ...]]],
     ]:
         if limit and len(seen) >= limit:
             return
         rev_ids = [id_ for id_ in rev_ids if id_ not in seen]
         if not rev_ids:
             return
         seen |= set(rev_ids)
 
         # We need this query, even if short=True, to return consistent
         # results (ie. not return only a subset of a revision's parents
         # if it is being written)
         if short:
             ids = self._cql_runner.revision_get_ids(rev_ids)
             for id_ in ids:
                 # TODO: use a single query to get all parents?
                 # (it might have less latency, but requires less code and more
                 # bandwidth (because revision id would be part of each returned
                 # row)
                 parents = tuple(self._cql_runner.revision_parent_get(id_))
 
                 # parent_rank is the clustering key, so results are already
                 # sorted by rank.
 
                 yield (id_, parents)
                 yield from self._get_parent_revs(parents, seen, limit, short)
         else:
             rows = self._cql_runner.revision_get(rev_ids)
 
             for row in rows:
                 # TODO: use a single query to get all parents?
                 # (it might have less latency, but requires less code and more
                 # bandwidth (because revision id would be part of each returned
                 # row)
                 parents = tuple(self._cql_runner.revision_parent_get(row.id))
 
                 # parent_rank is the clustering key, so results are already
                 # sorted by rank.
 
                 rev = converters.revision_from_db(row, parents=parents)
                 yield rev.to_dict()
                 yield from self._get_parent_revs(parents, seen, limit, short)
 
     def revision_log(
         self, revisions: List[Sha1Git], limit: Optional[int] = None
     ) -> Iterable[Optional[Dict[str, Any]]]:
         seen: Set[Sha1Git] = set()
         yield from self._get_parent_revs(revisions, seen, limit, False)
 
     def revision_shortlog(
         self, revisions: List[Sha1Git], limit: Optional[int] = None
     ) -> Iterable[Optional[Tuple[Sha1Git, Tuple[Sha1Git, ...]]]]:
         seen: Set[Sha1Git] = set()
         yield from self._get_parent_revs(revisions, seen, limit, True)
 
     def revision_get_random(self) -> Sha1Git:
         revision = self._cql_runner.revision_get_random()
         assert revision, "Could not find any revision"
         return revision.id
 
     def release_add(self, releases: List[Release]) -> Dict:
         to_add = []
         for rel in releases:
             if rel not in to_add:
                 to_add.append(rel)
         missing = set(self.release_missing([rel.id for rel in to_add]))
         to_add = [rel for rel in to_add if rel.id in missing]
 
         self.journal_writer.release_add(to_add)
 
         for release in to_add:
             if release:
                 self._cql_runner.release_add_one(converters.release_to_db(release))
 
         return {"release:add": len(to_add)}
 
     def release_missing(self, releases: List[Sha1Git]) -> Iterable[Sha1Git]:
         return self._cql_runner.release_missing(releases)
 
     def release_get(self, releases: List[Sha1Git]) -> List[Optional[Release]]:
         rows = self._cql_runner.release_get(releases)
         rels: Dict[Sha1Git, Release] = {}
         for row in rows:
             release = converters.release_from_db(row)
             rels[row.id] = release
 
         return [rels.get(rel_id) for rel_id in releases]
 
     def release_get_random(self) -> Sha1Git:
         release = self._cql_runner.release_get_random()
         assert release, "Could not find any release"
         return release.id
 
     def snapshot_add(self, snapshots: List[Snapshot]) -> Dict:
         missing = self._cql_runner.snapshot_missing([snp.id for snp in snapshots])
         snapshots = [snp for snp in snapshots if snp.id in missing]
 
         for snapshot in snapshots:
             self.journal_writer.snapshot_add([snapshot])
 
             # Add branches
             for (branch_name, branch) in snapshot.branches.items():
                 if branch is None:
                     target_type: Optional[str] = None
                     target: Optional[bytes] = None
                 else:
                     target_type = branch.target_type.value
                     target = branch.target
                 self._cql_runner.snapshot_branch_add_one(
                     SnapshotBranchRow(
                         snapshot_id=snapshot.id,
                         name=branch_name,
                         target_type=target_type,
                         target=target,
                     )
                 )
 
             # Add the snapshot *after* adding all the branches, so someone
             # calling snapshot_get_branch in the meantime won't end up
             # with half the branches.
             self._cql_runner.snapshot_add_one(SnapshotRow(id=snapshot.id))
 
         return {"snapshot:add": len(snapshots)}
 
     def snapshot_missing(self, snapshots: List[Sha1Git]) -> Iterable[Sha1Git]:
         return self._cql_runner.snapshot_missing(snapshots)
 
     def snapshot_get(self, snapshot_id: Sha1Git) -> Optional[Dict[str, Any]]:
         d = self.snapshot_get_branches(snapshot_id)
         if d is None:
             return None
         return {
             "id": d["id"],
             "branches": {
                 name: branch.to_dict() if branch else None
                 for (name, branch) in d["branches"].items()
             },
             "next_branch": d["next_branch"],
         }
 
     def snapshot_count_branches(
         self, snapshot_id: Sha1Git
     ) -> Optional[Dict[Optional[str], int]]:
         if self._cql_runner.snapshot_missing([snapshot_id]):
             # Makes sure we don't fetch branches for a snapshot that is
             # being added.
             return None
         return self._cql_runner.snapshot_count_branches(snapshot_id)
 
     def snapshot_get_branches(
         self,
         snapshot_id: Sha1Git,
         branches_from: bytes = b"",
         branches_count: int = 1000,
         target_types: Optional[List[str]] = None,
     ) -> Optional[PartialBranches]:
         if self._cql_runner.snapshot_missing([snapshot_id]):
             # Makes sure we don't fetch branches for a snapshot that is
             # being added.
             return None
 
         branches: List = []
         while len(branches) < branches_count + 1:
             new_branches = list(
                 self._cql_runner.snapshot_branch_get(
                     snapshot_id, branches_from, branches_count + 1
                 )
             )
 
             if not new_branches:
                 break
 
             branches_from = new_branches[-1].name
 
             new_branches_filtered = new_branches
 
             # Filter by target_type
             if target_types:
                 new_branches_filtered = [
                     branch
                     for branch in new_branches_filtered
                     if branch.target is not None and branch.target_type in target_types
                 ]
 
             branches.extend(new_branches_filtered)
 
             if len(new_branches) < branches_count + 1:
                 break
 
         if len(branches) > branches_count:
             last_branch = branches.pop(-1).name
         else:
             last_branch = None
 
         return PartialBranches(
             id=snapshot_id,
             branches={
                 branch.name: None
                 if branch.target is None
                 else SnapshotBranch(
                     target=branch.target, target_type=TargetType(branch.target_type)
                 )
                 for branch in branches
             },
             next_branch=last_branch,
         )
 
     def snapshot_get_random(self) -> Sha1Git:
         snapshot = self._cql_runner.snapshot_get_random()
         assert snapshot, "Could not find any snapshot"
         return snapshot.id
 
     def object_find_by_sha1_git(self, ids: List[Sha1Git]) -> Dict[Sha1Git, List[Dict]]:
         results: Dict[Sha1Git, List[Dict]] = {id_: [] for id_ in ids}
         missing_ids = set(ids)
 
         # Mind the order, revision is the most likely one for a given ID,
         # so we check revisions first.
         queries: List[Tuple[str, Callable[[List[Sha1Git]], List[Sha1Git]]]] = [
             ("revision", self._cql_runner.revision_missing),
             ("release", self._cql_runner.release_missing),
             ("content", self._cql_runner.content_missing_by_sha1_git),
             ("directory", self._cql_runner.directory_missing),
         ]
 
         for (object_type, query_fn) in queries:
             found_ids = missing_ids - set(query_fn(list(missing_ids)))
             for sha1_git in found_ids:
                 results[sha1_git].append(
                     {"sha1_git": sha1_git, "type": object_type,}
                 )
                 missing_ids.remove(sha1_git)
 
             if not missing_ids:
                 # We found everything, skipping the next queries.
                 break
 
         return results
 
     def origin_get(self, origins: List[str]) -> Iterable[Optional[Origin]]:
         return [self.origin_get_one(origin) for origin in origins]
 
     def origin_get_one(self, origin_url: str) -> Optional[Origin]:
         """Given an origin url, return the origin if it exists, None otherwise
 
         """
         rows = list(self._cql_runner.origin_get_by_url(origin_url))
         if rows:
             assert len(rows) == 1
             return Origin(url=rows[0].url)
         else:
             return None
 
     def origin_get_by_sha1(self, sha1s: List[bytes]) -> List[Optional[Dict[str, Any]]]:
         results = []
         for sha1 in sha1s:
             rows = list(self._cql_runner.origin_get_by_sha1(sha1))
             origin = {"url": rows[0].url} if rows else None
             results.append(origin)
         return results
 
     def origin_list(
         self, page_token: Optional[str] = None, limit: int = 100
     ) -> PagedResult[Origin]:
         # Compute what token to begin the listing from
         start_token = TOKEN_BEGIN
         if page_token:
             start_token = int(page_token)
             if not (TOKEN_BEGIN <= start_token <= TOKEN_END):
                 raise StorageArgumentException("Invalid page_token.")
         next_page_token = None
 
         origins = []
         # Take one more origin so we can reuse it as the next page token if any
         for (tok, row) in self._cql_runner.origin_list(start_token, limit + 1):
             origins.append(Origin(url=row.url))
             # keep reference of the last id for pagination purposes
             last_id = tok
 
         if len(origins) > limit:
             # last origin id is the next page token
             next_page_token = str(last_id)
             # excluding that origin from the result to respect the limit size
             origins = origins[:limit]
 
         assert len(origins) <= limit
 
         return PagedResult(results=origins, next_page_token=next_page_token)
 
     def origin_search(
         self,
         url_pattern: str,
         page_token: Optional[str] = None,
         limit: int = 50,
         regexp: bool = False,
         with_visit: bool = False,
     ) -> PagedResult[Origin]:
         # TODO: remove this endpoint, swh-search should be used instead.
         next_page_token = None
         offset = int(page_token) if page_token else 0
 
         origin_rows = [row for row in self._cql_runner.origin_iter_all()]
         if regexp:
             pat = re.compile(url_pattern)
             origin_rows = [row for row in origin_rows if pat.search(row.url)]
         else:
             origin_rows = [row for row in origin_rows if url_pattern in row.url]
 
         if with_visit:
             origin_rows = [row for row in origin_rows if row.next_visit_id > 1]
 
         origins = [Origin(url=row.url) for row in origin_rows]
 
         origins = origins[offset : offset + limit + 1]
         if len(origins) > limit:
             # next offset
             next_page_token = str(offset + limit)
             # excluding that origin from the result to respect the limit size
             origins = origins[:limit]
 
         assert len(origins) <= limit
         return PagedResult(results=origins, next_page_token=next_page_token)
 
     def origin_count(
         self, url_pattern: str, regexp: bool = False, with_visit: bool = False
     ) -> int:
         raise NotImplementedError(
             "The Cassandra backend does not implement origin_count"
         )
 
     def origin_add(self, origins: List[Origin]) -> Dict[str, int]:
         to_add = [ori for ori in origins if self.origin_get_one(ori.url) is None]
         self.journal_writer.origin_add(to_add)
         for origin in to_add:
             self._cql_runner.origin_add_one(
                 OriginRow(sha1=hash_url(origin.url), url=origin.url, next_visit_id=1)
             )
         return {"origin:add": len(to_add)}
 
     def origin_visit_add(self, visits: List[OriginVisit]) -> Iterable[OriginVisit]:
         for visit in visits:
             origin = self.origin_get_one(visit.origin)
             if not origin:  # Cannot add a visit without an origin
                 raise StorageArgumentException("Unknown origin %s", visit.origin)
 
         all_visits = []
         nb_visits = 0
         for visit in visits:
             nb_visits += 1
             if not visit.visit:
                 visit_id = self._cql_runner.origin_generate_unique_visit_id(
                     visit.origin
                 )
                 visit = attr.evolve(visit, visit=visit_id)
             self.journal_writer.origin_visit_add([visit])
             self._cql_runner.origin_visit_add_one(OriginVisitRow(**visit.to_dict()))
             assert visit.visit is not None
             all_visits.append(visit)
             self._origin_visit_status_add(
                 OriginVisitStatus(
                     origin=visit.origin,
                     visit=visit.visit,
                     date=visit.date,
                     status="created",
                     snapshot=None,
                 )
             )
 
         return all_visits
 
     def _origin_visit_status_add(self, visit_status: OriginVisitStatus) -> None:
         """Add an origin visit status"""
         self.journal_writer.origin_visit_status_add([visit_status])
         self._cql_runner.origin_visit_status_add_one(
             converters.visit_status_to_row(visit_status)
         )
 
     def origin_visit_status_add(self, visit_statuses: List[OriginVisitStatus]) -> None:
         # First round to check existence (fail early if any is ko)
         for visit_status in visit_statuses:
             origin_url = self.origin_get_one(visit_status.origin)
             if not origin_url:
                 raise StorageArgumentException(f"Unknown origin {visit_status.origin}")
 
         for visit_status in visit_statuses:
             self._origin_visit_status_add(visit_status)
 
     def _origin_visit_apply_status(
         self, visit: Dict[str, Any], visit_status: OriginVisitStatusRow
     ) -> Dict[str, Any]:
         """Retrieve the latest visit status information for the origin visit.
         Then merge it with the visit and return it.
 
         """
         return {
             # default to the values in visit
             **visit,
             # override with the last update
             **visit_status.to_dict(),
             # visit['origin'] is the URL (via a join), while
             # visit_status['origin'] is only an id.
             "origin": visit["origin"],
             # but keep the date of the creation of the origin visit
             "date": visit["date"],
         }
 
     def _origin_visit_get_latest_status(self, visit: OriginVisit) -> OriginVisitStatus:
         """Retrieve the latest visit status information for the origin visit object.
 
         """
         assert visit.visit
         row = self._cql_runner.origin_visit_status_get_latest(visit.origin, visit.visit)
         assert row is not None
         visit_status = converters.row_to_visit_status(row)
         return attr.evolve(visit_status, origin=visit.origin)
 
     @staticmethod
     def _format_origin_visit_row(visit):
         return {
             **visit.to_dict(),
             "origin": visit.origin,
             "date": visit.date.replace(tzinfo=datetime.timezone.utc),
         }
 
     def origin_visit_get(
         self,
         origin: str,
         page_token: Optional[str] = None,
         order: ListOrder = ListOrder.ASC,
         limit: int = 10,
     ) -> PagedResult[OriginVisit]:
         if not isinstance(order, ListOrder):
             raise StorageArgumentException("order must be a ListOrder value")
         if page_token and not isinstance(page_token, str):
             raise StorageArgumentException("page_token must be a string.")
 
         next_page_token = None
         visit_from = None if page_token is None else int(page_token)
         visits: List[OriginVisit] = []
         extra_limit = limit + 1
 
         rows = self._cql_runner.origin_visit_get(origin, visit_from, extra_limit, order)
         for row in rows:
             visits.append(converters.row_to_visit(row))
 
         assert len(visits) <= extra_limit
         if len(visits) == extra_limit:
             visits = visits[:limit]
             next_page_token = str(visits[-1].visit)
 
         return PagedResult(results=visits, next_page_token=next_page_token)
 
     def origin_visit_status_get(
         self,
         origin: str,
         visit: int,
         page_token: Optional[str] = None,
         order: ListOrder = ListOrder.ASC,
         limit: int = 10,
     ) -> PagedResult[OriginVisitStatus]:
         next_page_token = None
         date_from = None
         if page_token is not None:
             date_from = datetime.datetime.fromisoformat(page_token)
 
         # Take one more visit status so we can reuse it as the next page token if any
         rows = self._cql_runner.origin_visit_status_get_range(
             origin, visit, date_from, limit + 1, order
         )
         visit_statuses = [converters.row_to_visit_status(row) for row in rows]
         if len(visit_statuses) > limit:
             # last visit status date is the next page token
             next_page_token = str(visit_statuses[-1].date)
             # excluding that visit status from the result to respect the limit size
             visit_statuses = visit_statuses[:limit]
 
         return PagedResult(results=visit_statuses, next_page_token=next_page_token)
 
     def origin_visit_find_by_date(
         self, origin: str, visit_date: datetime.datetime
     ) -> Optional[OriginVisit]:
         # Iterator over all the visits of the origin
         # This should be ok for now, as there aren't too many visits
         # per origin.
         rows = list(self._cql_runner.origin_visit_get_all(origin))
 
         def key(visit):
             dt = visit.date.replace(tzinfo=datetime.timezone.utc) - visit_date
             return (abs(dt), -visit.visit)
 
         if rows:
             return converters.row_to_visit(min(rows, key=key))
         return None
 
     def origin_visit_get_by(self, origin: str, visit: int) -> Optional[OriginVisit]:
         row = self._cql_runner.origin_visit_get_one(origin, visit)
         if row:
             return converters.row_to_visit(row)
         return None
 
     def origin_visit_get_latest(
         self,
         origin: str,
         type: Optional[str] = None,
         allowed_statuses: Optional[List[str]] = None,
         require_snapshot: bool = False,
     ) -> Optional[OriginVisit]:
         if allowed_statuses and not set(allowed_statuses).intersection(VISIT_STATUSES):
             raise StorageArgumentException(
                 f"Unknown allowed statuses {','.join(allowed_statuses)}, only "
                 f"{','.join(VISIT_STATUSES)} authorized"
             )
         # TODO: Do not fetch all visits
         rows = self._cql_runner.origin_visit_get_all(origin)
         latest_visit = None
         for row in rows:
             visit = self._format_origin_visit_row(row)
             for status_row in self._cql_runner.origin_visit_status_get(
                 origin, visit["visit"]
             ):
                 updated_visit = self._origin_visit_apply_status(visit, status_row)
                 if type is not None and updated_visit["type"] != type:
                     continue
                 if allowed_statuses and updated_visit["status"] not in allowed_statuses:
                     continue
                 if require_snapshot and updated_visit["snapshot"] is None:
                     continue
 
                 # updated_visit is a candidate
                 if latest_visit is not None:
                     if updated_visit["date"] < latest_visit["date"]:
                         continue
                     if updated_visit["visit"] < latest_visit["visit"]:
                         continue
 
                 latest_visit = updated_visit
 
         if latest_visit is None:
             return None
         return OriginVisit(
             origin=latest_visit["origin"],
             visit=latest_visit["visit"],
             date=latest_visit["date"],
             type=latest_visit["type"],
         )
 
     def origin_visit_status_get_latest(
         self,
         origin_url: str,
         visit: int,
         allowed_statuses: Optional[List[str]] = None,
         require_snapshot: bool = False,
     ) -> Optional[OriginVisitStatus]:
         if allowed_statuses and not set(allowed_statuses).intersection(VISIT_STATUSES):
             raise StorageArgumentException(
                 f"Unknown allowed statuses {','.join(allowed_statuses)}, only "
                 f"{','.join(VISIT_STATUSES)} authorized"
             )
         rows = list(self._cql_runner.origin_visit_status_get(origin_url, visit))
         # filtering is done python side as we cannot do it server side
         if allowed_statuses:
             rows = [row for row in rows if row.status in allowed_statuses]
         if require_snapshot:
             rows = [row for row in rows if row.snapshot is not None]
         if not rows:
             return None
         return converters.row_to_visit_status(rows[0])
 
     def origin_visit_status_get_random(
         self, type: str
     ) -> Optional[Tuple[OriginVisit, OriginVisitStatus]]:
         back_in_the_day = now() - datetime.timedelta(weeks=12)  # 3 months back
 
         # Random position to start iteration at
         start_token = random.randint(TOKEN_BEGIN, TOKEN_END)
 
         # Iterator over all visits, ordered by token(origins) then visit_id
         rows = self._cql_runner.origin_visit_iter(start_token)
         for row in rows:
             visit = converters.row_to_visit(row)
             visit_status = self._origin_visit_get_latest_status(visit)
             if visit.date > back_in_the_day and visit_status.status == "full":
                 return visit, visit_status
         return None
 
     def stat_counters(self):
         rows = self._cql_runner.stat_counters()
         keys = (
             "content",
             "directory",
             "origin",
             "origin_visit",
             "release",
             "revision",
             "skipped_content",
             "snapshot",
         )
         stats = {key: 0 for key in keys}
         stats.update({row.object_type: row.count for row in rows})
         return stats
 
     def refresh_stat_counters(self):
         pass
 
     def raw_extrinsic_metadata_add(self, metadata: List[RawExtrinsicMetadata]) -> None:
         self.journal_writer.raw_extrinsic_metadata_add(metadata)
         for metadata_entry in metadata:
             if not self._cql_runner.metadata_authority_get(
                 metadata_entry.authority.type.value, metadata_entry.authority.url
             ):
                 raise StorageArgumentException(
                     f"Unknown authority {metadata_entry.authority}"
                 )
             if not self._cql_runner.metadata_fetcher_get(
                 metadata_entry.fetcher.name, metadata_entry.fetcher.version
             ):
                 raise StorageArgumentException(
                     f"Unknown fetcher {metadata_entry.fetcher}"
                 )
 
             try:
                 row = RawExtrinsicMetadataRow(
                     type=metadata_entry.type.value,
                     id=str(metadata_entry.id),
                     authority_type=metadata_entry.authority.type.value,
                     authority_url=metadata_entry.authority.url,
                     discovery_date=metadata_entry.discovery_date,
                     fetcher_name=metadata_entry.fetcher.name,
                     fetcher_version=metadata_entry.fetcher.version,
                     format=metadata_entry.format,
                     metadata=metadata_entry.metadata,
                     origin=metadata_entry.origin,
                     visit=metadata_entry.visit,
                     snapshot=map_optional(str, metadata_entry.snapshot),
                     release=map_optional(str, metadata_entry.release),
                     revision=map_optional(str, metadata_entry.revision),
                     path=metadata_entry.path,
                     directory=map_optional(str, metadata_entry.directory),
                 )
                 self._cql_runner.raw_extrinsic_metadata_add(row)
             except TypeError as e:
                 raise StorageArgumentException(*e.args)
 
     def raw_extrinsic_metadata_get(
         self,
         type: MetadataTargetType,
         id: Union[str, SWHID],
         authority: MetadataAuthority,
         after: Optional[datetime.datetime] = None,
         page_token: Optional[bytes] = None,
         limit: int = 1000,
     ) -> PagedResult[RawExtrinsicMetadata]:
         if type == MetadataTargetType.ORIGIN:
             if isinstance(id, SWHID):
                 raise StorageArgumentException(
                     f"raw_extrinsic_metadata_get called with type='origin', "
                     f"but provided id is an SWHID: {id!r}"
                 )
         else:
             if not isinstance(id, SWHID):
                 raise StorageArgumentException(
                     f"raw_extrinsic_metadata_get called with type!='origin', "
                     f"but provided id is not an SWHID: {id!r}"
                 )
 
         if page_token is not None:
             (after_date, after_fetcher_name, after_fetcher_url) = msgpack_loads(
                 base64.b64decode(page_token)
             )
             if after and after_date < after:
                 raise StorageArgumentException(
                     "page_token is inconsistent with the value of 'after'."
                 )
             entries = self._cql_runner.raw_extrinsic_metadata_get_after_date_and_fetcher(  # noqa
                 str(id),
                 authority.type.value,
                 authority.url,
                 after_date,
                 after_fetcher_name,
                 after_fetcher_url,
             )
         elif after is not None:
             entries = self._cql_runner.raw_extrinsic_metadata_get_after_date(
                 str(id), authority.type.value, authority.url, after
             )
         else:
             entries = self._cql_runner.raw_extrinsic_metadata_get(
                 str(id), authority.type.value, authority.url
             )
 
         if limit:
             entries = itertools.islice(entries, 0, limit + 1)
 
         results = []
         for entry in entries:
             discovery_date = entry.discovery_date.replace(tzinfo=datetime.timezone.utc)
 
             assert str(id) == entry.id
 
             result = RawExtrinsicMetadata(
                 type=MetadataTargetType(entry.type),
                 id=id,
                 authority=MetadataAuthority(
                     type=MetadataAuthorityType(entry.authority_type),
                     url=entry.authority_url,
                 ),
                 fetcher=MetadataFetcher(
                     name=entry.fetcher_name, version=entry.fetcher_version,
                 ),
                 discovery_date=discovery_date,
                 format=entry.format,
                 metadata=entry.metadata,
                 origin=entry.origin,
                 visit=entry.visit,
                 snapshot=map_optional(parse_swhid, entry.snapshot),
                 release=map_optional(parse_swhid, entry.release),
                 revision=map_optional(parse_swhid, entry.revision),
                 path=entry.path,
                 directory=map_optional(parse_swhid, entry.directory),
             )
 
             results.append(result)
 
         if len(results) > limit:
             results.pop()
             assert len(results) == limit
             last_result = results[-1]
             next_page_token: Optional[str] = base64.b64encode(
                 msgpack_dumps(
                     (
                         last_result.discovery_date,
                         last_result.fetcher.name,
                         last_result.fetcher.version,
                     )
                 )
             ).decode()
         else:
             next_page_token = None
 
         return PagedResult(next_page_token=next_page_token, results=results,)
 
     def metadata_fetcher_add(self, fetchers: List[MetadataFetcher]) -> None:
         self.journal_writer.metadata_fetcher_add(fetchers)
         for fetcher in fetchers:
             self._cql_runner.metadata_fetcher_add(
                 MetadataFetcherRow(
                     name=fetcher.name,
                     version=fetcher.version,
                     metadata=json.dumps(map_optional(dict, fetcher.metadata)),
                 )
             )
 
     def metadata_fetcher_get(
         self, name: str, version: str
     ) -> Optional[MetadataFetcher]:
         fetcher = self._cql_runner.metadata_fetcher_get(name, version)
         if fetcher:
             return MetadataFetcher(
                 name=fetcher.name,
                 version=fetcher.version,
                 metadata=json.loads(fetcher.metadata),
             )
         else:
             return None
 
     def metadata_authority_add(self, authorities: List[MetadataAuthority]) -> None:
         self.journal_writer.metadata_authority_add(authorities)
         for authority in authorities:
             self._cql_runner.metadata_authority_add(
                 MetadataAuthorityRow(
                     url=authority.url,
                     type=authority.type.value,
                     metadata=json.dumps(map_optional(dict, authority.metadata)),
                 )
             )
 
     def metadata_authority_get(
         self, type: MetadataAuthorityType, url: str
     ) -> Optional[MetadataAuthority]:
         authority = self._cql_runner.metadata_authority_get(type.value, url)
         if authority:
             return MetadataAuthority(
                 type=MetadataAuthorityType(authority.type),
                 url=authority.url,
                 metadata=json.loads(authority.metadata),
             )
         else:
             return None
 
     def clear_buffers(self, object_types: Optional[List[str]] = None) -> None:
         """Do nothing
 
         """
         return None
 
     def flush(self, object_types: Optional[List[str]] = None) -> Dict:
         return {}
diff --git a/swh/storage/in_memory.py b/swh/storage/in_memory.py
index fc61c763..f94b11bd 100644
--- a/swh/storage/in_memory.py
+++ b/swh/storage/in_memory.py
@@ -1,625 +1,633 @@
 # Copyright (C) 2015-2020  The Software Heritage developers
 # See the AUTHORS file at the top-level directory of this distribution
 # License: GNU General Public License version 3, or any later version
 # See top-level LICENSE file for more information
 
 import datetime
 import functools
 import random
 
 from collections import defaultdict
 from typing import (
     Any,
     Dict,
     Generic,
     Iterable,
     Iterator,
     List,
     Optional,
     Tuple,
     Type,
     TypeVar,
     Union,
 )
 
 from swh.model.model import (
     Content,
     SkippedContent,
     Sha1Git,
 )
 
 from swh.storage.cassandra import CassandraStorage
 from swh.storage.cassandra.model import (
     BaseRow,
     ContentRow,
     DirectoryRow,
     DirectoryEntryRow,
     MetadataAuthorityRow,
     MetadataFetcherRow,
     ObjectCountRow,
     OriginRow,
     OriginVisitRow,
     OriginVisitStatusRow,
     RawExtrinsicMetadataRow,
     ReleaseRow,
     RevisionRow,
     RevisionParentRow,
     SkippedContentRow,
     SnapshotRow,
     SnapshotBranchRow,
 )
 from swh.storage.interface import ListOrder
 from swh.storage.objstorage import ObjStorage
 
 from .common import origin_url_to_sha1
 from .writer import JournalWriter
 
 
 TRow = TypeVar("TRow", bound=BaseRow)
 
 
 class Table(Generic[TRow]):
     def __init__(self, row_class: Type[TRow]):
         self.row_class = row_class
         self.primary_key_cols = row_class.PARTITION_KEY + row_class.CLUSTERING_KEY
 
         # Map from tokens to clustering keys to rows
         # These are not actually partitions (or rather, there is one partition
         # for each token) and they aren't sorted.
         # But it is good enough if we don't care about performance;
         # and makes the code a lot simpler.
         self.data: Dict[int, Dict[Tuple, TRow]] = defaultdict(dict)
 
     def __repr__(self):
         return f"<__module__.Table[{self.row_class.__name__}] object>"
 
     def partition_key(self, row: Union[TRow, Dict[str, Any]]) -> Tuple:
         """Returns the partition key of a row (ie. the cells which get hashed
         into the token."""
         if isinstance(row, dict):
             row_d = row
         else:
             row_d = row.to_dict()
         return tuple(row_d[col] for col in self.row_class.PARTITION_KEY)
 
     def clustering_key(self, row: Union[TRow, Dict[str, Any]]) -> Tuple:
         """Returns the clustering key of a row (ie. the cells which are used
         for sorting rows within a partition."""
         if isinstance(row, dict):
             row_d = row
         else:
             row_d = row.to_dict()
         return tuple(row_d[col] for col in self.row_class.CLUSTERING_KEY)
 
     def primary_key(self, row):
         return self.partition_key(row) + self.clustering_key(row)
 
     def primary_key_from_dict(self, d: Dict[str, Any]) -> Tuple:
         """Returns the primary key (ie. concatenation of partition key and
         clustering key) of the given dictionary interpreted as a row."""
         return tuple(d[col] for col in self.primary_key_cols)
 
     def token(self, key: Tuple):
         """Returns the token of a row (ie. the hash of its partition key)."""
         return hash(key)
 
     def get_partition(self, token: int) -> Dict[Tuple, TRow]:
         """Returns the partition that contains this token."""
         return self.data[token]
 
     def insert(self, row: TRow):
         partition = self.data[self.token(self.partition_key(row))]
         partition[self.clustering_key(row)] = row
 
     def split_primary_key(self, key: Tuple) -> Tuple[Tuple, Tuple]:
         """Returns (partition_key, clustering_key) from a partition key"""
         assert len(key) == len(self.primary_key_cols)
 
         partition_key = key[0 : len(self.row_class.PARTITION_KEY)]
         clustering_key = key[len(self.row_class.PARTITION_KEY) :]
 
         return (partition_key, clustering_key)
 
     def get_from_partition_key(self, partition_key: Tuple) -> Iterable[TRow]:
         """Returns at most one row, from its partition key."""
         token = self.token(partition_key)
         for row in self.get_from_token(token):
             if self.partition_key(row) == partition_key:
                 yield row
 
     def get_from_primary_key(self, primary_key: Tuple) -> Optional[TRow]:
         """Returns at most one row, from its primary key."""
         (partition_key, clustering_key) = self.split_primary_key(primary_key)
 
         token = self.token(partition_key)
         partition = self.get_partition(token)
 
         return partition.get(clustering_key)
 
     def get_from_token(self, token: int) -> Iterable[TRow]:
         """Returns all rows whose token (ie. non-cryptographic hash of the
         partition key) is the one passed as argument."""
         return (v for (k, v) in sorted(self.get_partition(token).items()))
 
     def iter_all(self) -> Iterator[Tuple[Tuple, TRow]]:
         return (
             (self.primary_key(row), row)
             for (token, partition) in self.data.items()
             for (clustering_key, row) in partition.items()
         )
 
     def get_random(self) -> Optional[TRow]:
         return random.choice([row for (pk, row) in self.iter_all()])
 
 
 class InMemoryCqlRunner:
     def __init__(self):
         self._contents = Table(ContentRow)
         self._content_indexes = defaultdict(lambda: defaultdict(set))
         self._skipped_contents = Table(ContentRow)
         self._skipped_content_indexes = defaultdict(lambda: defaultdict(set))
         self._directories = Table(DirectoryRow)
         self._directory_entries = Table(DirectoryEntryRow)
         self._revisions = Table(RevisionRow)
         self._revision_parents = Table(RevisionParentRow)
         self._releases = Table(ReleaseRow)
         self._snapshots = Table(SnapshotRow)
         self._snapshot_branches = Table(SnapshotBranchRow)
         self._origins = Table(OriginRow)
         self._origin_visits = Table(OriginVisitRow)
         self._origin_visit_statuses = Table(OriginVisitStatusRow)
         self._metadata_authorities = Table(MetadataAuthorityRow)
         self._metadata_fetchers = Table(MetadataFetcherRow)
         self._raw_extrinsic_metadata = Table(RawExtrinsicMetadataRow)
         self._stat_counters = defaultdict(int)
 
     def increment_counter(self, object_type: str, nb: int):
         self._stat_counters[object_type] += nb
 
     def stat_counters(self) -> Iterable[ObjectCountRow]:
         for (object_type, count) in self._stat_counters.items():
             yield ObjectCountRow(partition_key=0, object_type=object_type, count=count)
 
     ##########################
     # 'content' table
     ##########################
 
     def _content_add_finalize(self, content: ContentRow) -> None:
         self._contents.insert(content)
         self.increment_counter("content", 1)
 
     def content_add_prepare(self, content: ContentRow):
         finalizer = functools.partial(self._content_add_finalize, content)
         return (self._contents.token(self._contents.partition_key(content)), finalizer)
 
     def content_get_from_pk(
         self, content_hashes: Dict[str, bytes]
     ) -> Optional[ContentRow]:
         primary_key = self._contents.primary_key_from_dict(content_hashes)
         return self._contents.get_from_primary_key(primary_key)
 
     def content_get_from_token(self, token: int) -> Iterable[ContentRow]:
         return self._contents.get_from_token(token)
 
     def content_get_random(self) -> Optional[ContentRow]:
         return self._contents.get_random()
 
     def content_get_token_range(
         self, start: int, end: int, limit: int,
     ) -> Iterable[Tuple[int, ContentRow]]:
         matches = [
             (token, row)
             for (token, partition) in self._contents.data.items()
             for (clustering_key, row) in partition.items()
             if start <= token <= end
         ]
         matches.sort()
         return matches[0:limit]
 
     ##########################
     # 'content_by_*' tables
     ##########################
 
     def content_missing_by_sha1_git(self, ids: List[bytes]) -> List[bytes]:
         missing = []
         for id_ in ids:
             if id_ not in self._content_indexes["sha1_git"]:
                 missing.append(id_)
         return missing
 
     def content_index_add_one(self, algo: str, content: Content, token: int) -> None:
         self._content_indexes[algo][content.get_hash(algo)].add(token)
 
     def content_get_tokens_from_single_hash(
         self, algo: str, hash_: bytes
     ) -> Iterable[int]:
         return self._content_indexes[algo][hash_]
 
     ##########################
     # 'skipped_content' table
     ##########################
 
     def _skipped_content_add_finalize(self, content: SkippedContentRow) -> None:
         self._skipped_contents.insert(content)
         self.increment_counter("skipped_content", 1)
 
     def skipped_content_add_prepare(self, content: SkippedContentRow):
         finalizer = functools.partial(self._skipped_content_add_finalize, content)
         return (
             self._skipped_contents.token(self._contents.partition_key(content)),
             finalizer,
         )
 
     def skipped_content_get_from_pk(
         self, content_hashes: Dict[str, bytes]
     ) -> Optional[SkippedContentRow]:
         primary_key = self._skipped_contents.primary_key_from_dict(content_hashes)
         return self._skipped_contents.get_from_primary_key(primary_key)
 
+    def skipped_content_get_from_token(self, token: int) -> Iterable[SkippedContentRow]:
+        return self._skipped_contents.get_from_token(token)
+
     ##########################
     # 'skipped_content_by_*' tables
     ##########################
 
     def skipped_content_index_add_one(
         self, algo: str, content: SkippedContent, token: int
     ) -> None:
         self._skipped_content_indexes[algo][content.get_hash(algo)].add(token)
 
+    def skipped_content_get_tokens_from_single_hash(
+        self, algo: str, hash_: bytes
+    ) -> Iterable[int]:
+        return self._skipped_content_indexes[algo][hash_]
+
     ##########################
     # 'directory' table
     ##########################
 
     def directory_missing(self, ids: List[bytes]) -> List[bytes]:
         missing = []
         for id_ in ids:
             if self._directories.get_from_primary_key((id_,)) is None:
                 missing.append(id_)
         return missing
 
     def directory_add_one(self, directory: DirectoryRow) -> None:
         self._directories.insert(directory)
         self.increment_counter("directory", 1)
 
     def directory_get_random(self) -> Optional[DirectoryRow]:
         return self._directories.get_random()
 
     ##########################
     # 'directory_entry' table
     ##########################
 
     def directory_entry_add_one(self, entry: DirectoryEntryRow) -> None:
         self._directory_entries.insert(entry)
 
     def directory_entry_get(
         self, directory_ids: List[Sha1Git]
     ) -> Iterable[DirectoryEntryRow]:
         for id_ in directory_ids:
             yield from self._directory_entries.get_from_partition_key((id_,))
 
     ##########################
     # 'revision' table
     ##########################
 
     def revision_missing(self, ids: List[bytes]) -> Iterable[bytes]:
         missing = []
         for id_ in ids:
             if self._revisions.get_from_primary_key((id_,)) is None:
                 missing.append(id_)
         return missing
 
     def revision_add_one(self, revision: RevisionRow) -> None:
         self._revisions.insert(revision)
         self.increment_counter("revision", 1)
 
     def revision_get_ids(self, revision_ids) -> Iterable[int]:
         for id_ in revision_ids:
             if self._revisions.get_from_primary_key((id_,)) is not None:
                 yield id_
 
     def revision_get(self, revision_ids: List[Sha1Git]) -> Iterable[RevisionRow]:
         for id_ in revision_ids:
             row = self._revisions.get_from_primary_key((id_,))
             if row:
                 yield row
 
     def revision_get_random(self) -> Optional[RevisionRow]:
         return self._revisions.get_random()
 
     ##########################
     # 'revision_parent' table
     ##########################
 
     def revision_parent_add_one(self, revision_parent: RevisionParentRow) -> None:
         self._revision_parents.insert(revision_parent)
 
     def revision_parent_get(self, revision_id: Sha1Git) -> Iterable[bytes]:
         for parent in self._revision_parents.get_from_partition_key((revision_id,)):
             yield parent.parent_id
 
     ##########################
     # 'release' table
     ##########################
 
     def release_missing(self, ids: List[bytes]) -> List[bytes]:
         missing = []
         for id_ in ids:
             if self._releases.get_from_primary_key((id_,)) is None:
                 missing.append(id_)
         return missing
 
     def release_add_one(self, release: ReleaseRow) -> None:
         self._releases.insert(release)
         self.increment_counter("release", 1)
 
     def release_get(self, release_ids: List[str]) -> Iterable[ReleaseRow]:
         for id_ in release_ids:
             row = self._releases.get_from_primary_key((id_,))
             if row:
                 yield row
 
     def release_get_random(self) -> Optional[ReleaseRow]:
         return self._releases.get_random()
 
     ##########################
     # 'snapshot' table
     ##########################
 
     def snapshot_missing(self, ids: List[bytes]) -> List[bytes]:
         missing = []
         for id_ in ids:
             if self._snapshots.get_from_primary_key((id_,)) is None:
                 missing.append(id_)
         return missing
 
     def snapshot_add_one(self, snapshot: SnapshotRow) -> None:
         self._snapshots.insert(snapshot)
         self.increment_counter("snapshot", 1)
 
     def snapshot_get_random(self) -> Optional[SnapshotRow]:
         return self._snapshots.get_random()
 
     ##########################
     # 'snapshot_branch' table
     ##########################
 
     def snapshot_branch_add_one(self, branch: SnapshotBranchRow) -> None:
         self._snapshot_branches.insert(branch)
 
     def snapshot_count_branches(self, snapshot_id: Sha1Git) -> Dict[Optional[str], int]:
         """Returns a dictionary from type names to the number of branches
         of that type."""
         counts: Dict[Optional[str], int] = defaultdict(int)
         for branch in self._snapshot_branches.get_from_partition_key((snapshot_id,)):
             if branch.target_type is None:
                 target_type = None
             else:
                 target_type = branch.target_type
             counts[target_type] += 1
         return counts
 
     def snapshot_branch_get(
         self, snapshot_id: Sha1Git, from_: bytes, limit: int
     ) -> Iterable[SnapshotBranchRow]:
         count = 0
         for branch in self._snapshot_branches.get_from_partition_key((snapshot_id,)):
             if branch.name >= from_:
                 count += 1
                 yield branch
             if count >= limit:
                 break
 
     ##########################
     # 'origin' table
     ##########################
 
     def origin_add_one(self, origin: OriginRow) -> None:
         self._origins.insert(origin)
         self.increment_counter("origin", 1)
 
     def origin_get_by_sha1(self, sha1: bytes) -> Iterable[OriginRow]:
         return self._origins.get_from_partition_key((sha1,))
 
     def origin_get_by_url(self, url: str) -> Iterable[OriginRow]:
         return self.origin_get_by_sha1(origin_url_to_sha1(url))
 
     def origin_list(
         self, start_token: int, limit: int
     ) -> Iterable[Tuple[int, OriginRow]]:
         """Returns an iterable of (token, origin)"""
         matches = [
             (token, row)
             for (token, partition) in self._origins.data.items()
             for (clustering_key, row) in partition.items()
             if token >= start_token
         ]
         matches.sort()
         return matches[0:limit]
 
     def origin_iter_all(self) -> Iterable[OriginRow]:
         return (
             row
             for (token, partition) in self._origins.data.items()
             for (clustering_key, row) in partition.items()
         )
 
     def origin_generate_unique_visit_id(self, origin_url: str) -> int:
         origin = list(self.origin_get_by_url(origin_url))[0]
         visit_id = origin.next_visit_id
         origin.next_visit_id += 1
         return visit_id
 
     ##########################
     # 'origin_visit' table
     ##########################
 
     def origin_visit_get(
         self, origin_url: str, last_visit: Optional[int], limit: int, order: ListOrder,
     ) -> Iterable[OriginVisitRow]:
         visits = list(self._origin_visits.get_from_partition_key((origin_url,)))
 
         if last_visit is not None:
             if order == ListOrder.ASC:
                 visits = [v for v in visits if v.visit > last_visit]
             else:
                 visits = [v for v in visits if v.visit < last_visit]
 
         visits.sort(key=lambda v: v.visit, reverse=order == ListOrder.DESC)
 
         visits = visits[0:limit]
 
         return visits
 
     def origin_visit_add_one(self, visit: OriginVisitRow) -> None:
         self._origin_visits.insert(visit)
         self.increment_counter("origin_visit", 1)
 
     def origin_visit_get_one(
         self, origin_url: str, visit_id: int
     ) -> Optional[OriginVisitRow]:
         return self._origin_visits.get_from_primary_key((origin_url, visit_id))
 
     def origin_visit_get_all(self, origin_url: str) -> Iterable[OriginVisitRow]:
         return self._origin_visits.get_from_partition_key((origin_url,))
 
     def origin_visit_iter(self, start_token: int) -> Iterator[OriginVisitRow]:
         """Returns all origin visits in order from this token,
         and wraps around the token space."""
         return (
             row
             for (token, partition) in self._origin_visits.data.items()
             for (clustering_key, row) in partition.items()
         )
 
     ##########################
     # 'origin_visit_status' table
     ##########################
 
     def origin_visit_status_get_range(
         self,
         origin: str,
         visit: int,
         date_from: Optional[datetime.datetime],
         limit: int,
         order: ListOrder,
     ) -> Iterable[OriginVisitStatusRow]:
         statuses = list(self.origin_visit_status_get(origin, visit))
 
         if date_from is not None:
             if order == ListOrder.ASC:
                 statuses = [s for s in statuses if s.date >= date_from]
             else:
                 statuses = [s for s in statuses if s.date <= date_from]
 
         statuses.sort(key=lambda s: s.date, reverse=order == ListOrder.DESC)
 
         return statuses[0:limit]
 
     def origin_visit_status_add_one(self, visit_update: OriginVisitStatusRow) -> None:
         self._origin_visit_statuses.insert(visit_update)
         self.increment_counter("origin_visit_status", 1)
 
     def origin_visit_status_get_latest(
         self, origin: str, visit: int,
     ) -> Optional[OriginVisitStatusRow]:
         """Given an origin visit id, return its latest origin_visit_status
 
          """
         return next(self.origin_visit_status_get(origin, visit), None)
 
     def origin_visit_status_get(
         self, origin: str, visit: int,
     ) -> Iterator[OriginVisitStatusRow]:
         """Return all origin visit statuses for a given visit
 
         """
         statuses = [
             s
             for s in self._origin_visit_statuses.get_from_partition_key((origin,))
             if s.visit == visit
         ]
         statuses.sort(key=lambda s: s.date, reverse=True)
         return iter(statuses)
 
     ##########################
     # 'metadata_authority' table
     ##########################
 
     def metadata_authority_add(self, authority: MetadataAuthorityRow):
         self._metadata_authorities.insert(authority)
         self.increment_counter("metadata_authority", 1)
 
     def metadata_authority_get(self, type, url) -> Optional[MetadataAuthorityRow]:
         return self._metadata_authorities.get_from_primary_key((url, type))
 
     ##########################
     # 'metadata_fetcher' table
     ##########################
 
     def metadata_fetcher_add(self, fetcher: MetadataFetcherRow):
         self._metadata_fetchers.insert(fetcher)
         self.increment_counter("metadata_fetcher", 1)
 
     def metadata_fetcher_get(self, name, version) -> Optional[MetadataAuthorityRow]:
         return self._metadata_fetchers.get_from_primary_key((name, version))
 
     #########################
     # 'raw_extrinsic_metadata' table
     #########################
 
     def raw_extrinsic_metadata_add(self, raw_extrinsic_metadata):
         self._raw_extrinsic_metadata.insert(raw_extrinsic_metadata)
         self.increment_counter("raw_extrinsic_metadata", 1)
 
     def raw_extrinsic_metadata_get_after_date(
         self,
         id: str,
         authority_type: str,
         authority_url: str,
         after: datetime.datetime,
     ) -> Iterable[RawExtrinsicMetadataRow]:
         metadata = self.raw_extrinsic_metadata_get(id, authority_type, authority_url)
         return (m for m in metadata if m.discovery_date > after)
 
     def raw_extrinsic_metadata_get_after_date_and_fetcher(
         self,
         id: str,
         authority_type: str,
         authority_url: str,
         after_date: datetime.datetime,
         after_fetcher_name: str,
         after_fetcher_version: str,
     ) -> Iterable[RawExtrinsicMetadataRow]:
         metadata = self._raw_extrinsic_metadata.get_from_partition_key((id,))
         after_tuple = (after_date, after_fetcher_name, after_fetcher_version)
         return (
             m
             for m in metadata
             if m.authority_type == authority_type
             and m.authority_url == authority_url
             and (m.discovery_date, m.fetcher_name, m.fetcher_version) > after_tuple
         )
 
     def raw_extrinsic_metadata_get(
         self, id: str, authority_type: str, authority_url: str
     ) -> Iterable[RawExtrinsicMetadataRow]:
         metadata = self._raw_extrinsic_metadata.get_from_partition_key((id,))
         return (
             m
             for m in metadata
             if m.authority_type == authority_type and m.authority_url == authority_url
         )
 
 
 class InMemoryStorage(CassandraStorage):
     _cql_runner: InMemoryCqlRunner  # type: ignore
 
     def __init__(self, journal_writer=None):
         self.reset()
         self.journal_writer = JournalWriter(journal_writer)
 
     def reset(self):
         self._cql_runner = InMemoryCqlRunner()
         self.objstorage = ObjStorage({"cls": "memory", "args": {}})
 
     def check_config(self, *, check_write: bool) -> bool:
         return True
diff --git a/swh/storage/postgresql/db.py b/swh/storage/postgresql/db.py
index 5354efdd..998a51be 100644
--- a/swh/storage/postgresql/db.py
+++ b/swh/storage/postgresql/db.py
@@ -1,1342 +1,1342 @@
 # Copyright (C) 2015-2020  The Software Heritage developers
 # See the AUTHORS file at the top-level directory of this distribution
 # License: GNU General Public License version 3, or any later version
 # See top-level LICENSE file for more information
 
 import datetime
 import random
 import select
 from typing import Any, Dict, Iterable, List, Optional, Tuple
 
 from swh.core.db import BaseDb
 from swh.core.db.db_utils import stored_procedure, jsonize as _jsonize
 from swh.core.db.db_utils import execute_values_generator
 from swh.model.model import OriginVisit, OriginVisitStatus, SHA1_SIZE
 from swh.storage.interface import ListOrder
 
 
 def jsonize(d):
     return _jsonize(dict(d) if d is not None else None)
 
 
 class Db(BaseDb):
     """Proxy to the SWH DB, with wrappers around stored procedures
 
     """
 
-    current_version = 161
+    current_version = 162
 
     def mktemp_dir_entry(self, entry_type, cur=None):
         self._cursor(cur).execute(
             "SELECT swh_mktemp_dir_entry(%s)", (("directory_entry_%s" % entry_type),)
         )
 
     @stored_procedure("swh_mktemp_revision")
     def mktemp_revision(self, cur=None):
         pass
 
     @stored_procedure("swh_mktemp_release")
     def mktemp_release(self, cur=None):
         pass
 
     @stored_procedure("swh_mktemp_snapshot_branch")
     def mktemp_snapshot_branch(self, cur=None):
         pass
 
     def register_listener(self, notify_queue, cur=None):
         """Register a listener for NOTIFY queue `notify_queue`"""
         self._cursor(cur).execute("LISTEN %s" % notify_queue)
 
     def listen_notifies(self, timeout):
         """Listen to notifications for `timeout` seconds"""
         if select.select([self.conn], [], [], timeout) == ([], [], []):
             return
         else:
             self.conn.poll()
             while self.conn.notifies:
                 yield self.conn.notifies.pop(0)
 
     @stored_procedure("swh_content_add")
     def content_add_from_temp(self, cur=None):
         pass
 
     @stored_procedure("swh_directory_add")
     def directory_add_from_temp(self, cur=None):
         pass
 
     @stored_procedure("swh_skipped_content_add")
     def skipped_content_add_from_temp(self, cur=None):
         pass
 
     @stored_procedure("swh_revision_add")
     def revision_add_from_temp(self, cur=None):
         pass
 
     @stored_procedure("swh_release_add")
     def release_add_from_temp(self, cur=None):
         pass
 
     def content_update_from_temp(self, keys_to_update, cur=None):
         cur = self._cursor(cur)
         cur.execute(
             """select swh_content_update(ARRAY[%s] :: text[])""" % keys_to_update
         )
 
     content_get_metadata_keys = [
         "sha1",
         "sha1_git",
         "sha256",
         "blake2s256",
         "length",
         "status",
     ]
 
     content_add_keys = content_get_metadata_keys + ["ctime"]
 
     skipped_content_keys = [
         "sha1",
         "sha1_git",
         "sha256",
         "blake2s256",
         "length",
         "reason",
         "status",
         "origin",
     ]
 
     def content_get_metadata_from_sha1s(self, sha1s, cur=None):
         cur = self._cursor(cur)
         yield from execute_values_generator(
             cur,
             """
             select t.sha1, %s from (values %%s) as t (sha1)
             inner join content using (sha1)
             """
             % ", ".join(self.content_get_metadata_keys[1:]),
             ((sha1,) for sha1 in sha1s),
         )
 
     def content_get_range(self, start, end, limit=None, cur=None):
         """Retrieve contents within range [start, end].
 
         """
         cur = self._cursor(cur)
         query = """select %s from content
                    where %%s <= sha1 and sha1 <= %%s
                    order by sha1
                    limit %%s""" % ", ".join(
             self.content_get_metadata_keys
         )
         cur.execute(query, (start, end, limit))
         yield from cur
 
     content_hash_keys = ["sha1", "sha1_git", "sha256", "blake2s256"]
 
     def content_missing_from_list(self, contents, cur=None):
         cur = self._cursor(cur)
 
         keys = ", ".join(self.content_hash_keys)
         equality = " AND ".join(
             ("t.%s = c.%s" % (key, key)) for key in self.content_hash_keys
         )
 
         yield from execute_values_generator(
             cur,
             """
             SELECT %s
             FROM (VALUES %%s) as t(%s)
             WHERE NOT EXISTS (
                 SELECT 1 FROM content c
                 WHERE %s
             )
             """
             % (keys, keys, equality),
             (tuple(c[key] for key in self.content_hash_keys) for c in contents),
         )
 
     def content_missing_per_sha1(self, sha1s, cur=None):
         cur = self._cursor(cur)
 
         yield from execute_values_generator(
             cur,
             """
         SELECT t.sha1 FROM (VALUES %s) AS t(sha1)
         WHERE NOT EXISTS (
             SELECT 1 FROM content c WHERE c.sha1 = t.sha1
         )""",
             ((sha1,) for sha1 in sha1s),
         )
 
     def content_missing_per_sha1_git(self, contents, cur=None):
         cur = self._cursor(cur)
 
         yield from execute_values_generator(
             cur,
             """
         SELECT t.sha1_git FROM (VALUES %s) AS t(sha1_git)
         WHERE NOT EXISTS (
             SELECT 1 FROM content c WHERE c.sha1_git = t.sha1_git
         )""",
             ((sha1,) for sha1 in contents),
         )
 
     def skipped_content_missing(self, contents, cur=None):
         if not contents:
             return []
         cur = self._cursor(cur)
 
         query = """SELECT * FROM (VALUES %s) AS t (%s)
                    WHERE not exists
                    (SELECT 1 FROM skipped_content s WHERE
                        s.sha1 is not distinct from t.sha1::sha1 and
                        s.sha1_git is not distinct from t.sha1_git::sha1 and
                        s.sha256 is not distinct from t.sha256::bytea);""" % (
             (", ".join("%s" for _ in contents)),
             ", ".join(self.content_hash_keys),
         )
         cur.execute(
             query,
             [tuple(cont[key] for key in self.content_hash_keys) for cont in contents],
         )
 
         yield from cur
 
     def snapshot_exists(self, snapshot_id, cur=None):
         """Check whether a snapshot with the given id exists"""
         cur = self._cursor(cur)
 
         cur.execute("""SELECT 1 FROM snapshot where id=%s""", (snapshot_id,))
 
         return bool(cur.fetchone())
 
     def snapshot_missing_from_list(self, snapshots, cur=None):
         cur = self._cursor(cur)
         yield from execute_values_generator(
             cur,
             """
             SELECT id FROM (VALUES %s) as t(id)
             WHERE NOT EXISTS (
                 SELECT 1 FROM snapshot d WHERE d.id = t.id
             )
                 """,
             ((id,) for id in snapshots),
         )
 
     def snapshot_add(self, snapshot_id, cur=None):
         """Add a snapshot from the temporary table"""
         cur = self._cursor(cur)
 
         cur.execute("""SELECT swh_snapshot_add(%s)""", (snapshot_id,))
 
     snapshot_count_cols = ["target_type", "count"]
 
     def snapshot_count_branches(self, snapshot_id, cur=None):
         cur = self._cursor(cur)
         query = """\
            SELECT %s FROM swh_snapshot_count_branches(%%s)
         """ % ", ".join(
             self.snapshot_count_cols
         )
 
         cur.execute(query, (snapshot_id,))
 
         yield from cur
 
     snapshot_get_cols = ["snapshot_id", "name", "target", "target_type"]
 
     def snapshot_get_by_id(
         self,
         snapshot_id,
         branches_from=b"",
         branches_count=None,
         target_types=None,
         cur=None,
     ):
         cur = self._cursor(cur)
         query = """\
            SELECT %s
            FROM swh_snapshot_get_by_id(%%s, %%s, %%s, %%s :: snapshot_target[])
         """ % ", ".join(
             self.snapshot_get_cols
         )
 
         cur.execute(query, (snapshot_id, branches_from, branches_count, target_types))
 
         yield from cur
 
     def snapshot_get_random(self, cur=None):
         return self._get_random_row_from_table("snapshot", ["id"], "id", cur)
 
     content_find_cols = [
         "sha1",
         "sha1_git",
         "sha256",
         "blake2s256",
         "length",
         "ctime",
         "status",
     ]
 
     def content_find(
         self,
         sha1: Optional[bytes] = None,
         sha1_git: Optional[bytes] = None,
         sha256: Optional[bytes] = None,
         blake2s256: Optional[bytes] = None,
         cur=None,
     ):
         """Find the content optionally on a combination of the following
         checksums sha1, sha1_git, sha256 or blake2s256.
 
         Args:
             sha1: sha1 content
             git_sha1: the sha1 computed `a la git` sha1 of the content
             sha256: sha256 content
             blake2s256: blake2s256 content
 
         Returns:
             The tuple (sha1, sha1_git, sha256, blake2s256) if found or None.
 
         """
         cur = self._cursor(cur)
 
         checksum_dict = {
             "sha1": sha1,
             "sha1_git": sha1_git,
             "sha256": sha256,
             "blake2s256": blake2s256,
         }
 
         query_parts = [f"SELECT {','.join(self.content_find_cols)} FROM content WHERE "]
         query_params = []
         where_parts = []
         # Adds only those keys which have values exist
         for algorithm in checksum_dict:
             if checksum_dict[algorithm] is not None:
                 where_parts.append(f"{algorithm} = %s")
                 query_params.append(checksum_dict[algorithm])
 
         query_parts.append(" AND ".join(where_parts))
         query = "\n".join(query_parts)
         cur.execute(query, query_params)
         content = cur.fetchall()
         return content
 
     def content_get_random(self, cur=None):
         return self._get_random_row_from_table("content", ["sha1_git"], "sha1_git", cur)
 
     def directory_missing_from_list(self, directories, cur=None):
         cur = self._cursor(cur)
         yield from execute_values_generator(
             cur,
             """
             SELECT id FROM (VALUES %s) as t(id)
             WHERE NOT EXISTS (
                 SELECT 1 FROM directory d WHERE d.id = t.id
             )
             """,
             ((id,) for id in directories),
         )
 
     directory_ls_cols = [
         "dir_id",
         "type",
         "target",
         "name",
         "perms",
         "status",
         "sha1",
         "sha1_git",
         "sha256",
         "length",
     ]
 
     def directory_walk_one(self, directory, cur=None):
         cur = self._cursor(cur)
         cols = ", ".join(self.directory_ls_cols)
         query = "SELECT %s FROM swh_directory_walk_one(%%s)" % cols
         cur.execute(query, (directory,))
         yield from cur
 
     def directory_walk(self, directory, cur=None):
         cur = self._cursor(cur)
         cols = ", ".join(self.directory_ls_cols)
         query = "SELECT %s FROM swh_directory_walk(%%s)" % cols
         cur.execute(query, (directory,))
         yield from cur
 
     def directory_entry_get_by_path(self, directory, paths, cur=None):
         """Retrieve a directory entry by path.
 
         """
         cur = self._cursor(cur)
 
         cols = ", ".join(self.directory_ls_cols)
         query = "SELECT %s FROM swh_find_directory_entry_by_path(%%s, %%s)" % cols
         cur.execute(query, (directory, paths))
 
         data = cur.fetchone()
         if set(data) == {None}:
             return None
         return data
 
     def directory_get_random(self, cur=None):
         return self._get_random_row_from_table("directory", ["id"], "id", cur)
 
     def revision_missing_from_list(self, revisions, cur=None):
         cur = self._cursor(cur)
 
         yield from execute_values_generator(
             cur,
             """
             SELECT id FROM (VALUES %s) as t(id)
             WHERE NOT EXISTS (
                 SELECT 1 FROM revision r WHERE r.id = t.id
             )
             """,
             ((id,) for id in revisions),
         )
 
     revision_add_cols = [
         "id",
         "date",
         "date_offset",
         "date_neg_utc_offset",
         "committer_date",
         "committer_date_offset",
         "committer_date_neg_utc_offset",
         "type",
         "directory",
         "message",
         "author_fullname",
         "author_name",
         "author_email",
         "committer_fullname",
         "committer_name",
         "committer_email",
         "metadata",
         "synthetic",
         "extra_headers",
     ]
 
     revision_get_cols = revision_add_cols + ["parents"]
 
     def origin_visit_add(self, origin, ts, type, cur=None):
         """Add a new origin_visit for origin origin at timestamp ts.
 
         Args:
             origin: origin concerned by the visit
             ts: the date of the visit
             type: type of loader for the visit
 
         Returns:
             The new visit index step for that origin
 
         """
         cur = self._cursor(cur)
         self._cursor(cur).execute(
             "SELECT swh_origin_visit_add(%s, %s, %s)", (origin, ts, type)
         )
         return cur.fetchone()[0]
 
     origin_visit_status_cols = [
         "origin",
         "visit",
         "date",
         "status",
         "snapshot",
         "metadata",
     ]
 
     def origin_visit_status_add(
         self, visit_status: OriginVisitStatus, cur=None
     ) -> None:
         """Add new origin visit status
 
         """
         assert self.origin_visit_status_cols[0] == "origin"
         assert self.origin_visit_status_cols[-1] == "metadata"
         cols = self.origin_visit_status_cols[1:-1]
         cur = self._cursor(cur)
         cur.execute(
             f"WITH origin_id as (select id from origin where url=%s) "
             f"INSERT INTO origin_visit_status "
             f"(origin, {', '.join(cols)}, metadata) "
             f"VALUES ((select id from origin_id), "
             f"{', '.join(['%s']*len(cols))}, %s) "
             f"ON CONFLICT (origin, visit, date) do nothing",
             [visit_status.origin]
             + [getattr(visit_status, key) for key in cols]
             + [jsonize(visit_status.metadata)],
         )
 
     origin_visit_cols = ["origin", "visit", "date", "type"]
 
     def origin_visit_add_with_id(self, origin_visit: OriginVisit, cur=None) -> None:
         """Insert origin visit when id are already set
 
         """
         ov = origin_visit
         assert ov.visit is not None
         cur = self._cursor(cur)
         query = """INSERT INTO origin_visit ({cols})
                    VALUES ((select id from origin where url=%s), {values})
                    ON CONFLICT (origin, visit) DO NOTHING""".format(
             cols=", ".join(self.origin_visit_cols),
             values=", ".join("%s" for col in self.origin_visit_cols[1:]),
         )
         cur.execute(query, (ov.origin, ov.visit, ov.date, ov.type))
 
     origin_visit_get_cols = [
         "origin",
         "visit",
         "date",
         "type",
         "status",
         "metadata",
         "snapshot",
     ]
     origin_visit_select_cols = [
         "o.url AS origin",
         "ov.visit",
         "ov.date",
         "ov.type AS type",
         "ovs.status",
         "ovs.metadata",
         "ovs.snapshot",
     ]
 
     origin_visit_status_select_cols = [
         "o.url AS origin",
         "ovs.visit",
         "ovs.date",
         "ovs.status",
         "ovs.snapshot",
         "ovs.metadata",
     ]
 
     def _make_origin_visit_status(
         self, row: Optional[Tuple[Any]]
     ) -> Optional[Dict[str, Any]]:
         """Make an origin_visit_status dict out of a row
 
         """
         if not row:
             return None
         return dict(zip(self.origin_visit_status_cols, row))
 
     def origin_visit_status_get_latest(
         self,
         origin_url: str,
         visit: int,
         allowed_statuses: Optional[List[str]] = None,
         require_snapshot: bool = False,
         cur=None,
     ) -> Optional[Dict[str, Any]]:
         """Given an origin visit id, return its latest origin_visit_status
 
         """
         cur = self._cursor(cur)
 
         query_parts = [
             "SELECT %s" % ", ".join(self.origin_visit_status_select_cols),
             "FROM origin_visit_status ovs ",
             "INNER JOIN origin o ON o.id = ovs.origin",
         ]
         query_parts.append("WHERE o.url = %s")
         query_params: List[Any] = [origin_url]
         query_parts.append("AND ovs.visit = %s")
         query_params.append(visit)
 
         if require_snapshot:
             query_parts.append("AND ovs.snapshot is not null")
 
         if allowed_statuses:
             query_parts.append("AND ovs.status IN %s")
             query_params.append(tuple(allowed_statuses))
 
         query_parts.append("ORDER BY ovs.date DESC LIMIT 1")
         query = "\n".join(query_parts)
 
         cur.execute(query, tuple(query_params))
         row = cur.fetchone()
         return self._make_origin_visit_status(row)
 
     def origin_visit_status_get_range(
         self,
         origin: str,
         visit: int,
         date_from: Optional[datetime.datetime],
         order: ListOrder,
         limit: int,
         cur=None,
     ):
         """Retrieve visit_status rows for visit (origin, visit) in a paginated way.
 
         """
         cur = self._cursor(cur)
 
         query_parts = [
             f"SELECT {', '.join(self.origin_visit_status_select_cols)} "
             "FROM origin_visit_status ovs ",
             "INNER JOIN origin o ON o.id = ovs.origin ",
         ]
         query_parts.append("WHERE o.url = %s AND ovs.visit = %s ")
         query_params: List[Any] = [origin, visit]
 
         if date_from is not None:
             op_comparison = ">=" if order == ListOrder.ASC else "<="
             query_parts.append(f"and ovs.date {op_comparison} %s ")
             query_params.append(date_from)
 
         if order == ListOrder.ASC:
             query_parts.append("ORDER BY ovs.date ASC ")
         elif order == ListOrder.DESC:
             query_parts.append("ORDER BY ovs.date DESC ")
         else:
             assert False
 
         query_parts.append("LIMIT %s")
         query_params.append(limit)
 
         query = "\n".join(query_parts)
         cur.execute(query, tuple(query_params))
         yield from cur
 
     def origin_visit_get_range(
         self, origin: str, visit_from: int, order: ListOrder, limit: int, cur=None,
     ):
         cur = self._cursor(cur)
 
         origin_visit_cols = ["o.url as origin", "ov.visit", "ov.date", "ov.type"]
         query_parts = [
             f"SELECT {', '.join(origin_visit_cols)} FROM origin_visit ov ",
             "INNER JOIN origin o ON o.id = ov.origin ",
         ]
         query_parts.append("WHERE o.url = %s")
         query_params: List[Any] = [origin]
 
         if visit_from > 0:
             op_comparison = ">" if order == ListOrder.ASC else "<"
             query_parts.append(f"and ov.visit {op_comparison} %s")
             query_params.append(visit_from)
 
         if order == ListOrder.ASC:
             query_parts.append("ORDER BY ov.visit ASC")
         elif order == ListOrder.DESC:
             query_parts.append("ORDER BY ov.visit DESC")
 
         query_parts.append("LIMIT %s")
         query_params.append(limit)
 
         query = "\n".join(query_parts)
         cur.execute(query, tuple(query_params))
         yield from cur
 
     def origin_visit_get(self, origin_id, visit_id, cur=None):
         """Retrieve information on visit visit_id of origin origin_id.
 
         Args:
             origin_id: the origin concerned
             visit_id: The visit step for that origin
 
         Returns:
             The origin_visit information
 
         """
         cur = self._cursor(cur)
 
         query = """\
             SELECT %s
             FROM origin_visit ov
             INNER JOIN origin o ON o.id = ov.origin
             INNER JOIN origin_visit_status ovs
             ON ov.origin = ovs.origin AND ov.visit = ovs.visit
             WHERE o.url = %%s AND ov.visit = %%s
             ORDER BY ovs.date DESC
             LIMIT 1
             """ % (
             ", ".join(self.origin_visit_select_cols)
         )
 
         cur.execute(query, (origin_id, visit_id))
         r = cur.fetchall()
         if not r:
             return None
         return r[0]
 
     def origin_visit_find_by_date(self, origin, visit_date, cur=None):
         cur = self._cursor(cur)
         cur.execute(
             "SELECT * FROM swh_visit_find_by_date(%s, %s)", (origin, visit_date)
         )
         rows = cur.fetchall()
         if rows:
             visit = dict(zip(self.origin_visit_get_cols, rows[0]))
             visit["origin"] = origin
             return visit
 
     def origin_visit_exists(self, origin_id, visit_id, cur=None):
         """Check whether an origin visit with the given ids exists"""
         cur = self._cursor(cur)
 
         query = "SELECT 1 FROM origin_visit where origin = %s AND visit = %s"
 
         cur.execute(query, (origin_id, visit_id))
 
         return bool(cur.fetchone())
 
     def origin_visit_get_latest(
         self,
         origin_id: str,
         type: Optional[str],
         allowed_statuses: Optional[Iterable[str]],
         require_snapshot: bool,
         cur=None,
     ):
         """Retrieve the most recent origin_visit of the given origin,
         with optional filters.
 
         Args:
             origin_id: the origin concerned
             type: Optional visit type to filter on
             allowed_statuses: the visit statuses allowed for the returned visit
             require_snapshot (bool): If True, only a visit with a known
                 snapshot will be returned.
 
         Returns:
             The origin_visit information, or None if no visit matches.
         """
         cur = self._cursor(cur)
 
         query_parts = [
             "SELECT %s" % ", ".join(self.origin_visit_select_cols),
             "FROM origin_visit ov ",
             "INNER JOIN origin o ON o.id = ov.origin",
             "INNER JOIN origin_visit_status ovs ",
             "ON o.id = ovs.origin AND ov.visit = ovs.visit ",
         ]
         query_parts.append("WHERE o.url = %s")
         query_params: List[Any] = [origin_id]
 
         if type is not None:
             query_parts.append("AND ov.type = %s")
             query_params.append(type)
 
         if require_snapshot:
             query_parts.append("AND ovs.snapshot is not null")
 
         if allowed_statuses:
             query_parts.append("AND ovs.status IN %s")
             query_params.append(tuple(allowed_statuses))
 
         query_parts.append(
             "ORDER BY ov.date DESC, ov.visit DESC, ovs.date DESC LIMIT 1"
         )
 
         query = "\n".join(query_parts)
 
         cur.execute(query, tuple(query_params))
         r = cur.fetchone()
         if not r:
             return None
         return r
 
     def origin_visit_get_random(self, type, cur=None):
         """Randomly select one origin visit that was full and in the last 3
            months
 
         """
         cur = self._cursor(cur)
         columns = ",".join(self.origin_visit_select_cols)
         query = f"""select {columns}
                     from origin_visit ov
                     inner join origin o on ov.origin=o.id
                     inner join origin_visit_status ovs
                       on ov.origin = ovs.origin and ov.visit = ovs.visit
                     where ovs.status='full'
                       and ov.type=%s
                       and ov.date > now() - '3 months'::interval
                       and random() < 0.1
                     limit 1
                  """
         cur.execute(query, (type,))
         return cur.fetchone()
 
     @staticmethod
     def mangle_query_key(key, main_table):
         if key == "id":
             return "t.id"
         if key == "parents":
             return """
             ARRAY(
             SELECT rh.parent_id::bytea
             FROM revision_history rh
             WHERE rh.id = t.id
             ORDER BY rh.parent_rank
             )"""
         if "_" not in key:
             return "%s.%s" % (main_table, key)
 
         head, tail = key.split("_", 1)
         if head in ("author", "committer") and tail in (
             "name",
             "email",
             "id",
             "fullname",
         ):
             return "%s.%s" % (head, tail)
 
         return "%s.%s" % (main_table, key)
 
     def revision_get_from_list(self, revisions, cur=None):
         cur = self._cursor(cur)
 
         query_keys = ", ".join(
             self.mangle_query_key(k, "revision") for k in self.revision_get_cols
         )
 
         yield from execute_values_generator(
             cur,
             """
             SELECT %s FROM (VALUES %%s) as t(sortkey, id)
             LEFT JOIN revision ON t.id = revision.id
             LEFT JOIN person author ON revision.author = author.id
             LEFT JOIN person committer ON revision.committer = committer.id
             ORDER BY sortkey
             """
             % query_keys,
             ((sortkey, id) for sortkey, id in enumerate(revisions)),
         )
 
     def revision_log(self, root_revisions, limit=None, cur=None):
         cur = self._cursor(cur)
 
         query = """SELECT %s
                    FROM swh_revision_log(%%s, %%s)
                 """ % ", ".join(
             self.revision_get_cols
         )
 
         cur.execute(query, (root_revisions, limit))
         yield from cur
 
     revision_shortlog_cols = ["id", "parents"]
 
     def revision_shortlog(self, root_revisions, limit=None, cur=None):
         cur = self._cursor(cur)
 
         query = """SELECT %s
                    FROM swh_revision_list(%%s, %%s)
                 """ % ", ".join(
             self.revision_shortlog_cols
         )
 
         cur.execute(query, (root_revisions, limit))
         yield from cur
 
     def revision_get_random(self, cur=None):
         return self._get_random_row_from_table("revision", ["id"], "id", cur)
 
     def release_missing_from_list(self, releases, cur=None):
         cur = self._cursor(cur)
         yield from execute_values_generator(
             cur,
             """
             SELECT id FROM (VALUES %s) as t(id)
             WHERE NOT EXISTS (
                 SELECT 1 FROM release r WHERE r.id = t.id
             )
             """,
             ((id,) for id in releases),
         )
 
     object_find_by_sha1_git_cols = ["sha1_git", "type"]
 
     def object_find_by_sha1_git(self, ids, cur=None):
         cur = self._cursor(cur)
 
         yield from execute_values_generator(
             cur,
             """
             WITH t (sha1_git) AS (VALUES %s),
             known_objects as ((
                 select
                   id as sha1_git,
                   'release'::object_type as type,
                   object_id
                 from release r
                 where exists (select 1 from t where t.sha1_git = r.id)
             ) union all (
                 select
                   id as sha1_git,
                   'revision'::object_type as type,
                   object_id
                 from revision r
                 where exists (select 1 from t where t.sha1_git = r.id)
             ) union all (
                 select
                   id as sha1_git,
                   'directory'::object_type as type,
                   object_id
                 from directory d
                 where exists (select 1 from t where t.sha1_git = d.id)
             ) union all (
                 select
                   sha1_git as sha1_git,
                   'content'::object_type as type,
                   object_id
                 from content c
                 where exists (select 1 from t where t.sha1_git = c.sha1_git)
             ))
             select t.sha1_git as sha1_git, k.type
             from t
             left join known_objects k on t.sha1_git = k.sha1_git
             """,
             ((id,) for id in ids),
         )
 
     def stat_counters(self, cur=None):
         cur = self._cursor(cur)
         cur.execute("SELECT * FROM swh_stat_counters()")
         yield from cur
 
     def origin_add(self, url, cur=None):
         """Insert a new origin and return the new identifier."""
         insert = """INSERT INTO origin (url) values (%s)
                     RETURNING url"""
 
         cur.execute(insert, (url,))
         return cur.fetchone()[0]
 
     origin_cols = ["url"]
 
     def origin_get_by_url(self, origins, cur=None):
         """Retrieve origin `(type, url)` from urls if found."""
         cur = self._cursor(cur)
 
         query = """SELECT %s FROM (VALUES %%s) as t(url)
                    LEFT JOIN origin ON t.url = origin.url
                 """ % ",".join(
             "origin." + col for col in self.origin_cols
         )
 
         yield from execute_values_generator(cur, query, ((url,) for url in origins))
 
     def origin_get_by_sha1(self, sha1s, cur=None):
         """Retrieve origin urls from sha1s if found."""
         cur = self._cursor(cur)
 
         query = """SELECT %s FROM (VALUES %%s) as t(sha1)
                    LEFT JOIN origin ON t.sha1 = digest(origin.url, 'sha1')
                 """ % ",".join(
             "origin." + col for col in self.origin_cols
         )
 
         yield from execute_values_generator(cur, query, ((sha1,) for sha1 in sha1s))
 
     def origin_id_get_by_url(self, origins, cur=None):
         """Retrieve origin `(type, url)` from urls if found."""
         cur = self._cursor(cur)
 
         query = """SELECT id FROM (VALUES %s) as t(url)
                    LEFT JOIN origin ON t.url = origin.url
                 """
 
         for row in execute_values_generator(cur, query, ((url,) for url in origins)):
             yield row[0]
 
     origin_get_range_cols = ["id", "url"]
 
     def origin_get_range(self, origin_from: int = 1, origin_count: int = 100, cur=None):
         """Retrieve ``origin_count`` origins whose ids are greater
         or equal than ``origin_from``.
 
         Origins are sorted by id before retrieving them.
 
         Args:
             origin_from: the minimum id of origins to retrieve
             origin_count: the maximum number of origins to retrieve
 
         """
         cur = self._cursor(cur)
 
         query = """SELECT %s
                    FROM origin WHERE id >= %%s
                    ORDER BY id LIMIT %%s
                 """ % ",".join(
             self.origin_get_range_cols
         )
 
         cur.execute(query, (origin_from, origin_count))
         yield from cur
 
     def _origin_query(
         self,
         url_pattern,
         count=False,
         offset=0,
         limit=50,
         regexp=False,
         with_visit=False,
         cur=None,
     ):
         """
         Method factorizing query creation for searching and counting origins.
         """
         cur = self._cursor(cur)
 
         if count:
             origin_cols = "COUNT(*)"
             order_clause = ""
         else:
             origin_cols = ",".join(self.origin_cols)
             order_clause = "ORDER BY id"
 
         if not regexp:
             operator = "ILIKE"
             query_params = [f"%{url_pattern}%"]
         else:
             operator = "~*"
             query_params = [url_pattern]
 
         query = f"""
             WITH filtered_origins AS (
                 SELECT *
                 FROM origin
                 WHERE url {operator} %s
                 {order_clause}
             )
             SELECT {origin_cols}
             FROM filtered_origins AS o
             """
 
         if with_visit:
             query += """
                    WHERE EXISTS (
                      SELECT 1
                      FROM origin_visit ov
                      INNER JOIN origin_visit_status ovs
                        ON ov.origin = ovs.origin AND ov.visit = ovs.visit
                      INNER JOIN snapshot ON ovs.snapshot=snapshot.id
                      WHERE ov.origin=o.id
                      )
             """
 
         if not count:
             query += "OFFSET %s LIMIT %s"
             query_params.extend([offset, limit])
 
         cur.execute(query, query_params)
 
     def origin_search(
         self,
         url_pattern: str,
         offset: int = 0,
         limit: int = 50,
         regexp: bool = False,
         with_visit: bool = False,
         cur=None,
     ):
         """Search for origins whose urls contain a provided string pattern
         or match a provided regular expression.
         The search is performed in a case insensitive way.
 
         Args:
             url_pattern: the string pattern to search for in origin urls
             offset: number of found origins to skip before returning
                 results
             limit: the maximum number of found origins to return
             regexp: if True, consider the provided pattern as a regular
                 expression and returns origins whose urls match it
             with_visit: if True, filter out origins with no visit
 
         """
         self._origin_query(
             url_pattern,
             offset=offset,
             limit=limit,
             regexp=regexp,
             with_visit=with_visit,
             cur=cur,
         )
         yield from cur
 
     def origin_count(self, url_pattern, regexp=False, with_visit=False, cur=None):
         """Count origins whose urls contain a provided string pattern
         or match a provided regular expression.
         The pattern search in origin urls is performed in a case insensitive
         way.
 
         Args:
             url_pattern (str): the string pattern to search for in origin urls
             regexp (bool): if True, consider the provided pattern as a regular
                 expression and returns origins whose urls match it
             with_visit (bool): if True, filter out origins with no visit
         """
         self._origin_query(
             url_pattern, count=True, regexp=regexp, with_visit=with_visit, cur=cur
         )
         return cur.fetchone()[0]
 
     release_add_cols = [
         "id",
         "target",
         "target_type",
         "date",
         "date_offset",
         "date_neg_utc_offset",
         "name",
         "comment",
         "synthetic",
         "author_fullname",
         "author_name",
         "author_email",
     ]
     release_get_cols = release_add_cols
 
     def release_get_from_list(self, releases, cur=None):
         cur = self._cursor(cur)
         query_keys = ", ".join(
             self.mangle_query_key(k, "release") for k in self.release_get_cols
         )
 
         yield from execute_values_generator(
             cur,
             """
             SELECT %s FROM (VALUES %%s) as t(sortkey, id)
             LEFT JOIN release ON t.id = release.id
             LEFT JOIN person author ON release.author = author.id
             ORDER BY sortkey
             """
             % query_keys,
             ((sortkey, id) for sortkey, id in enumerate(releases)),
         )
 
     def release_get_random(self, cur=None):
         return self._get_random_row_from_table("release", ["id"], "id", cur)
 
     _raw_extrinsic_metadata_context_cols = [
         "origin",
         "visit",
         "snapshot",
         "release",
         "revision",
         "path",
         "directory",
     ]
     """The list of context columns for all artifact types."""
 
     _raw_extrinsic_metadata_insert_cols = [
         "type",
         "id",
         "authority_id",
         "fetcher_id",
         "discovery_date",
         "format",
         "metadata",
         *_raw_extrinsic_metadata_context_cols,
     ]
     """List of columns of the raw_extrinsic_metadata table, used when writing
     metadata."""
 
     _raw_extrinsic_metadata_insert_query = f"""
         INSERT INTO raw_extrinsic_metadata
             ({', '.join(_raw_extrinsic_metadata_insert_cols)})
         VALUES ({', '.join('%s' for _ in _raw_extrinsic_metadata_insert_cols)})
         ON CONFLICT (id, authority_id, discovery_date, fetcher_id)
         DO NOTHING
     """
 
     raw_extrinsic_metadata_get_cols = [
         "raw_extrinsic_metadata.id",
         "raw_extrinsic_metadata.type",
         "discovery_date",
         "metadata_authority.type",
         "metadata_authority.url",
         "metadata_fetcher.id",
         "metadata_fetcher.name",
         "metadata_fetcher.version",
         *_raw_extrinsic_metadata_context_cols,
         "format",
         "raw_extrinsic_metadata.metadata",
     ]
     """List of columns of the raw_extrinsic_metadata, metadata_authority,
     and metadata_fetcher tables, used when reading object metadata."""
 
     _raw_extrinsic_metadata_select_query = f"""
         SELECT
             {', '.join(raw_extrinsic_metadata_get_cols)}
         FROM raw_extrinsic_metadata
         INNER JOIN metadata_authority
             ON (metadata_authority.id=authority_id)
         INNER JOIN metadata_fetcher ON (metadata_fetcher.id=fetcher_id)
         WHERE raw_extrinsic_metadata.id=%s AND authority_id=%s
     """
 
     def raw_extrinsic_metadata_add(
         self,
         type: str,
         id: str,
         discovery_date: datetime.datetime,
         authority_id: int,
         fetcher_id: int,
         format: str,
         metadata: bytes,
         origin: Optional[str],
         visit: Optional[int],
         snapshot: Optional[str],
         release: Optional[str],
         revision: Optional[str],
         path: Optional[bytes],
         directory: Optional[str],
         cur,
     ):
         query = self._raw_extrinsic_metadata_insert_query
         args: Dict[str, Any] = dict(
             type=type,
             id=id,
             authority_id=authority_id,
             fetcher_id=fetcher_id,
             discovery_date=discovery_date,
             format=format,
             metadata=metadata,
             origin=origin,
             visit=visit,
             snapshot=snapshot,
             release=release,
             revision=revision,
             path=path,
             directory=directory,
         )
 
         params = [args[col] for col in self._raw_extrinsic_metadata_insert_cols]
 
         cur.execute(query, params)
 
     def raw_extrinsic_metadata_get(
         self,
         type: str,
         id: str,
         authority_id: int,
         after_time: Optional[datetime.datetime],
         after_fetcher: Optional[int],
         limit: int,
         cur,
     ):
         query_parts = [self._raw_extrinsic_metadata_select_query]
         args = [id, authority_id]
 
         if after_fetcher is not None:
             assert after_time
             query_parts.append("AND (discovery_date, fetcher_id) > (%s, %s)")
             args.extend([after_time, after_fetcher])
         elif after_time is not None:
             query_parts.append("AND discovery_date > %s")
             args.append(after_time)
 
         query_parts.append("ORDER BY discovery_date, fetcher_id")
 
         if limit:
             query_parts.append("LIMIT %s")
             args.append(limit)
 
         cur.execute(" ".join(query_parts), args)
         yield from cur
 
     metadata_fetcher_cols = ["name", "version", "metadata"]
 
     def metadata_fetcher_add(
         self, name: str, version: str, metadata: bytes, cur=None
     ) -> None:
         cur = self._cursor(cur)
         cur.execute(
             "INSERT INTO metadata_fetcher (name, version, metadata) "
             "VALUES (%s, %s, %s) ON CONFLICT DO NOTHING",
             (name, version, jsonize(metadata)),
         )
 
     def metadata_fetcher_get(self, name: str, version: str, cur=None):
         cur = self._cursor(cur)
         cur.execute(
             f"SELECT {', '.join(self.metadata_fetcher_cols)} "
             f"FROM metadata_fetcher "
             f"WHERE name=%s AND version=%s",
             (name, version),
         )
         return cur.fetchone()
 
     def metadata_fetcher_get_id(
         self, name: str, version: str, cur=None
     ) -> Optional[int]:
         cur = self._cursor(cur)
         cur.execute(
             "SELECT id FROM metadata_fetcher WHERE name=%s AND version=%s",
             (name, version),
         )
         row = cur.fetchone()
         if row:
             return row[0]
         else:
             return None
 
     metadata_authority_cols = ["type", "url", "metadata"]
 
     def metadata_authority_add(
         self, type: str, url: str, metadata: bytes, cur=None
     ) -> None:
         cur = self._cursor(cur)
         cur.execute(
             "INSERT INTO metadata_authority (type, url, metadata) "
             "VALUES (%s, %s, %s) ON CONFLICT DO NOTHING",
             (type, url, jsonize(metadata)),
         )
 
     def metadata_authority_get(self, type: str, url: str, cur=None):
         cur = self._cursor(cur)
         cur.execute(
             f"SELECT {', '.join(self.metadata_authority_cols)} "
             f"FROM metadata_authority "
             f"WHERE type=%s AND url=%s",
             (type, url),
         )
         return cur.fetchone()
 
     def metadata_authority_get_id(self, type: str, url: str, cur=None) -> Optional[int]:
         cur = self._cursor(cur)
         cur.execute(
             "SELECT id FROM metadata_authority WHERE type=%s AND url=%s", (type, url)
         )
         row = cur.fetchone()
         if row:
             return row[0]
         else:
             return None
 
     def _get_random_row_from_table(self, table_name, cols, id_col, cur=None):
         random_sha1 = bytes(random.randint(0, 255) for _ in range(SHA1_SIZE))
         cur = self._cursor(cur)
         query = """
             (SELECT {cols} FROM {table} WHERE {id_col} >= %s
              ORDER BY {id_col} LIMIT 1)
             UNION
             (SELECT {cols} FROM {table} WHERE {id_col} < %s
              ORDER BY {id_col} DESC LIMIT 1)
             LIMIT 1
             """.format(
             cols=", ".join(cols), table=table_name, id_col=id_col
         )
         cur.execute(query, (random_sha1, random_sha1))
         row = cur.fetchone()
         if row:
             return row[0]
 
     dbversion_cols = ["version", "release", "description"]
 
     def dbversion(self):
         with self.transaction() as cur:
             cur.execute(f"SELECT {', '.join(self.dbversion_cols)} FROM dbversion")
             return dict(zip(self.dbversion_cols, cur.fetchone()))
 
     def check_dbversion(self):
         return self.dbversion()["version"] == self.current_version
diff --git a/swh/storage/sql/30-swh-schema.sql b/swh/storage/sql/30-swh-schema.sql
index cdf8a44a..68b67d6c 100644
--- a/swh/storage/sql/30-swh-schema.sql
+++ b/swh/storage/sql/30-swh-schema.sql
@@ -1,499 +1,499 @@
 ---
 --- SQL implementation of the Software Heritage data model
 ---
 
 -- schema versions
 create table dbversion
 (
   version     int primary key,
   release     timestamptz,
   description text
 );
 
 comment on table dbversion is 'Details of current db version';
 comment on column dbversion.version is 'SQL schema version';
 comment on column dbversion.release is 'Version deployment timestamp';
 comment on column dbversion.description is 'Release description';
 
 -- latest schema version
 insert into dbversion(version, release, description)
-      values(161, now(), 'Work In Progress');
+      values(162, now(), 'Work In Progress');
 
 -- a SHA1 checksum
 create domain sha1 as bytea check (length(value) = 20);
 
 -- a Git object ID, i.e., a Git-style salted SHA1 checksum
 create domain sha1_git as bytea check (length(value) = 20);
 
 -- a SHA256 checksum
 create domain sha256 as bytea check (length(value) = 32);
 
 -- a blake2 checksum
 create domain blake2s256 as bytea check (length(value) = 32);
 
 -- UNIX path (absolute, relative, individual path component, etc.)
 create domain unix_path as bytea;
 
 -- a set of UNIX-like access permissions, as manipulated by, e.g., chmod
 create domain file_perms as int;
 
 -- an SWHID
 create domain swhid as text check (value ~ '^swh:[0-9]+:.*');
 
 
 -- Checksums about actual file content. Note that the content itself is not
 -- stored in the DB, but on external (key-value) storage. A single checksum is
 -- used as key there, but the other can be used to verify that we do not inject
 -- content collisions not knowingly.
 create table content
 (
   sha1       sha1 not null,
   sha1_git   sha1_git not null,
   sha256     sha256 not null,
   blake2s256 blake2s256 not null,
   length     bigint not null,
   ctime      timestamptz not null default now(),
              -- creation time, i.e. time of (first) injection into the storage
   status     content_status not null default 'visible',
   object_id  bigserial
 );
 
 comment on table content is 'Checksums of file content which is actually stored externally';
 comment on column content.sha1 is 'Content sha1 hash';
 comment on column content.sha1_git is 'Git object sha1 hash';
 comment on column content.sha256 is 'Content Sha256 hash';
 comment on column content.blake2s256 is 'Content blake2s hash';
 comment on column content.length is 'Content length';
 comment on column content.ctime is 'First seen time';
 comment on column content.status is 'Content status (absent, visible, hidden)';
 comment on column content.object_id is 'Content identifier';
 
 
 -- An origin is a place, identified by an URL, where software source code
 -- artifacts can be found. We support different kinds of origins, e.g., git and
 -- other VCS repositories, web pages that list tarballs URLs (e.g.,
 -- http://www.kernel.org), indirect tarball URLs (e.g.,
 -- http://www.example.org/latest.tar.gz), etc. The key feature of an origin is
 -- that it can be *fetched* from (wget, git clone, svn checkout, etc.) to
 -- retrieve all the contained software.
 create table origin
 (
   id       bigserial not null,
   url      text not null
 );
 
 comment on column origin.id is 'Artifact origin id';
 comment on column origin.url is 'URL of origin';
 
 
 -- Content blobs observed somewhere, but not ingested into the archive for
 -- whatever reason. This table is separate from the content table as we might
 -- not have the sha1 checksum of skipped contents (for instance when we inject
 -- git repositories, objects that are too big will be skipped here, and we will
 -- only know their sha1_git). 'reason' contains the reason the content was
 -- skipped. origin is a nullable column allowing to find out which origin
 -- contains that skipped content.
 create table skipped_content
 (
   sha1       sha1,
   sha1_git   sha1_git,
   sha256     sha256,
   blake2s256 blake2s256,
   length     bigint not null,
   ctime      timestamptz not null default now(),
   status     content_status not null default 'absent',
   reason     text not null,
   origin     bigint,
   object_id  bigserial
 );
 
 comment on table skipped_content is 'Content blobs observed, but not ingested in the archive';
 comment on column skipped_content.sha1 is 'Skipped content sha1 hash';
 comment on column skipped_content.sha1_git is 'Git object sha1 hash';
 comment on column skipped_content.sha256 is 'Skipped content sha256 hash';
 comment on column skipped_content.blake2s256 is 'Skipped content blake2s hash';
 comment on column skipped_content.length is 'Skipped content length';
 comment on column skipped_content.ctime is 'First seen time';
 comment on column skipped_content.status is 'Skipped content status (absent, visible, hidden)';
 comment on column skipped_content.reason is 'Reason for skipping';
 comment on column skipped_content.origin is 'Origin table identifier';
 comment on column skipped_content.object_id is 'Skipped content identifier';
 
 
 -- A file-system directory.  A directory is a list of directory entries (see
 -- tables: directory_entry_{dir,file}).
 --
 -- To list the contents of a directory:
 -- 1. list the contained directory_entry_dir using array dir_entries
 -- 2. list the contained directory_entry_file using array file_entries
 -- 3. list the contained directory_entry_rev using array rev_entries
 -- 4. UNION
 --
 -- Synonyms/mappings:
 -- * git: tree
 create table directory
 (
   id            sha1_git not null,
   dir_entries   bigint[],  -- sub-directories, reference directory_entry_dir
   file_entries  bigint[],  -- contained files, reference directory_entry_file
   rev_entries   bigint[],  -- mounted revisions, reference directory_entry_rev
   object_id     bigserial  -- short object identifier
 );
 
 comment on table directory is 'Contents of a directory, synonymous to tree (git)';
 comment on column directory.id is 'Git object sha1 hash';
 comment on column directory.dir_entries is 'Sub-directories, reference directory_entry_dir';
 comment on column directory.file_entries is 'Contained files, reference directory_entry_file';
 comment on column directory.rev_entries is 'Mounted revisions, reference directory_entry_rev';
 comment on column directory.object_id is 'Short object identifier';
 
 
 -- A directory entry pointing to a (sub-)directory.
 create table directory_entry_dir
 (
   id      bigserial,
   target  sha1_git not null,   -- id of target directory
   name    unix_path not null,  -- path name, relative to containing dir
   perms   file_perms not null  -- unix-like permissions
 );
 
 comment on table directory_entry_dir is 'Directory entry for directory';
 comment on column directory_entry_dir.id is 'Directory identifier';
 comment on column directory_entry_dir.target is 'Target directory identifier';
 comment on column directory_entry_dir.name is 'Path name, relative to containing directory';
 comment on column directory_entry_dir.perms is 'Unix-like permissions';
 
 
 -- A directory entry pointing to a file content.
 create table directory_entry_file
 (
   id      bigserial,
   target  sha1_git not null,   -- id of target file
   name    unix_path not null,  -- path name, relative to containing dir
   perms   file_perms not null  -- unix-like permissions
 );
 
 comment on table directory_entry_file is 'Directory entry for file';
 comment on column directory_entry_file.id is 'File identifier';
 comment on column directory_entry_file.target is 'Target file identifier';
 comment on column directory_entry_file.name is 'Path name, relative to containing directory';
 comment on column directory_entry_file.perms is 'Unix-like permissions';
 
 
 -- A directory entry pointing to a revision.
 create table directory_entry_rev
 (
   id      bigserial,
   target  sha1_git not null,   -- id of target revision
   name    unix_path not null,  -- path name, relative to containing dir
   perms   file_perms not null  -- unix-like permissions
 );
 
 comment on table directory_entry_rev is 'Directory entry for revision';
 comment on column directory_entry_dir.id is 'Revision identifier';
 comment on column directory_entry_dir.target is 'Target revision in identifier';
 comment on column directory_entry_dir.name is 'Path name, relative to containing directory';
 comment on column directory_entry_dir.perms is 'Unix-like permissions';
 
 
 -- A person referenced by some source code artifacts, e.g., a VCS revision or
 -- release metadata.
 create table person
 (
   id        bigserial,
   name      bytea,          -- advisory: not null if we managed to parse a name
   email     bytea,          -- advisory: not null if we managed to parse an email
   fullname  bytea not null  -- freeform specification; what is actually used in the checksums
                             --     will usually be of the form 'name <email>'
 );
 
 comment on table person is 'Person referenced in code artifact release metadata';
 comment on column person.id is 'Person identifier';
 comment on column person.name is 'Name';
 comment on column person.email is 'Email';
 comment on column person.fullname is 'Full name (raw name)';
 
 
 -- The state of a source code tree at a specific point in time.
 --
 -- Synonyms/mappings:
 -- * git / subversion / etc: commit
 -- * tarball: a specific tarball
 --
 -- Revisions are organized as DAGs. Each revision points to 0, 1, or more (in
 -- case of merges) parent revisions. Each revision points to a directory, i.e.,
 -- a file-system tree containing files and directories.
 create table revision
 (
   id                    sha1_git not null,
   date                  timestamptz,
   date_offset           smallint,
   committer_date        timestamptz,
   committer_date_offset smallint,
   type                  revision_type not null,
   directory             sha1_git,  -- source code 'root' directory
   message               bytea,
   author                bigint,
   committer             bigint,
   synthetic             boolean not null default false,  -- true iff revision has been created by Software Heritage
   metadata              jsonb,  -- extra metadata (tarball checksums, extra commit information, etc...)
   object_id             bigserial,
   date_neg_utc_offset   boolean,
   committer_date_neg_utc_offset boolean,
   extra_headers         bytea[][] not null  -- extra headers (used in hash computation)
 );
 
 comment on table revision is 'A revision represents the state of a source code tree at a specific point in time';
 comment on column revision.id is 'Git-style SHA1 commit identifier';
 comment on column revision.date is 'Author timestamp as UNIX epoch';
 comment on column revision.date_offset is 'Author timestamp timezone, as minute offsets from UTC';
 comment on column revision.date_neg_utc_offset is 'True indicates a -0 UTC offset on author timestamp';
 comment on column revision.committer_date is 'Committer timestamp as UNIX epoch';
 comment on column revision.committer_date_offset is 'Committer timestamp timezone, as minute offsets from UTC';
 comment on column revision.committer_date_neg_utc_offset is 'True indicates a -0 UTC offset on committer timestamp';
 comment on column revision.type is 'Type of revision';
 comment on column revision.directory is 'Directory identifier';
 comment on column revision.message is 'Commit message';
 comment on column revision.author is 'Author identity';
 comment on column revision.committer is 'Committer identity';
 comment on column revision.synthetic is 'True iff revision has been synthesized by Software Heritage';
 comment on column revision.metadata is 'Extra revision metadata';
 comment on column revision.object_id is 'Non-intrinsic, sequential object identifier';
 comment on column revision.extra_headers is 'Extra revision headers; used in revision hash computation';
 
 
 -- either this table or the sha1_git[] column on the revision table
 create table revision_history
 (
   id           sha1_git not null,
   parent_id    sha1_git not null,
   parent_rank  int not null default 0
     -- parent position in merge commits, 0-based
 );
 
 comment on table revision_history is 'Sequence of revision history with parent and position in history';
 comment on column revision_history.id is 'Revision history git object sha1 checksum';
 comment on column revision_history.parent_id is 'Parent revision git object identifier';
 comment on column revision_history.parent_rank is 'Parent position in merge commits, 0-based';
 
 
 -- Crawling history of software origins visited by Software Heritage. Each
 -- visit is a 3-way mapping between a software origin, a timestamp, and a
 -- snapshot object capturing the full-state of the origin at visit time.
 create table origin_visit
 (
   origin       bigint not null,
   visit        bigint not null,
   date         timestamptz not null,
   type         text not null
 );
 
 comment on column origin_visit.origin is 'Visited origin';
 comment on column origin_visit.visit is 'Sequential visit number for the origin';
 comment on column origin_visit.date is 'Visit timestamp';
 comment on column origin_visit.type is 'Type of loader that did the visit (hg, git, ...)';
 
 
 -- Crawling history of software origin visits by Software Heritage. Each
 -- visit see its history change through new origin visit status updates
 create table origin_visit_status
 (
   origin   bigint not null,
   visit    bigint not null,
   date     timestamptz not null,
   status   origin_visit_state not null,
   metadata jsonb,
   snapshot sha1_git
 );
 
 comment on column origin_visit_status.origin is 'Origin concerned by the visit update';
 comment on column origin_visit_status.visit is 'Visit concerned by the visit update';
 comment on column origin_visit_status.date is 'Visit update timestamp';
 comment on column origin_visit_status.status is 'Visit status (ongoing, failed, full)';
 comment on column origin_visit_status.metadata is 'Optional origin visit metadata';
 comment on column origin_visit_status.snapshot is 'Optional, possibly partial, snapshot of the origin visit. It can be partial.';
 
 
 -- A snapshot represents the entire state of a software origin as crawled by
 -- Software Heritage. This table is a simple mapping between (public) intrinsic
 -- snapshot identifiers and (private) numeric sequential identifiers.
 create table snapshot
 (
   object_id  bigserial not null,  -- PK internal object identifier
   id         sha1_git not null    -- snapshot intrinsic identifier
 );
 
 comment on table snapshot is 'State of a software origin as crawled by Software Heritage';
 comment on column snapshot.object_id is 'Internal object identifier';
 comment on column snapshot.id is 'Intrinsic snapshot identifier';
 
 
 -- Each snapshot associate "branch" names to other objects in the Software
 -- Heritage Merkle DAG. This table describes branches as mappings between names
 -- and target typed objects.
 create table snapshot_branch
 (
   object_id    bigserial not null,  -- PK internal object identifier
   name         bytea not null,      -- branch name, e.g., "master" or "feature/drag-n-drop"
   target       bytea,               -- target object identifier, e.g., a revision identifier
   target_type  snapshot_target      -- target object type, e.g., "revision"
 );
 
 comment on table snapshot_branch is 'Associates branches with objects in Heritage Merkle DAG';
 comment on column snapshot_branch.object_id is 'Internal object identifier';
 comment on column snapshot_branch.name is 'Branch name';
 comment on column snapshot_branch.target is 'Target object identifier';
 comment on column snapshot_branch.target_type is 'Target object type';
 
 
 -- Mapping between snapshots and their branches.
 create table snapshot_branches
 (
   snapshot_id  bigint not null,  -- snapshot identifier, ref. snapshot.object_id
   branch_id    bigint not null   -- branch identifier, ref. snapshot_branch.object_id
 );
 
 comment on table snapshot_branches is 'Mapping between snapshot and their branches';
 comment on column snapshot_branches.snapshot_id is 'Snapshot identifier';
 comment on column snapshot_branches.branch_id is 'Branch identifier';
 
 
 -- A "memorable" point in time in the development history of a software
 -- project.
 --
 -- Synonyms/mappings:
 -- * git: tag (of the annotated kind, otherwise they are just references)
 -- * tarball: the release version number
 create table release
 (
   id          sha1_git not null,
   target      sha1_git,
   date        timestamptz,
   date_offset smallint,
   name        bytea,
   comment     bytea,
   author      bigint,
   synthetic   boolean not null default false,  -- true iff release has been created by Software Heritage
   object_id   bigserial,
   target_type object_type not null,
   date_neg_utc_offset  boolean
 );
 
 comment on table release is 'Details of a software release, synonymous with
  a tag (git) or version number (tarball)';
 comment on column release.id is 'Release git identifier';
 comment on column release.target is 'Target git identifier';
 comment on column release.date is 'Release timestamp';
 comment on column release.date_offset is 'Timestamp offset from UTC';
 comment on column release.name is 'Name';
 comment on column release.comment is 'Comment';
 comment on column release.author is 'Author';
 comment on column release.synthetic is 'Indicates if created by Software Heritage';
 comment on column release.object_id is 'Object identifier';
 comment on column release.target_type is 'Object type (''content'', ''directory'', ''revision'',
  ''release'', ''snapshot'')';
 comment on column release.date_neg_utc_offset is 'True indicates -0 UTC offset for release timestamp';
 
 -- Tools
 create table metadata_fetcher
 (
   id            serial  not null,
   name          text    not null,
   version       text    not null,
   metadata      jsonb   not null
 );
 
 comment on table metadata_fetcher is 'Tools used to retrieve metadata';
 comment on column metadata_fetcher.id is 'Internal identifier of the fetcher';
 comment on column metadata_fetcher.name is 'Fetcher name';
 comment on column metadata_fetcher.version is 'Fetcher version';
 comment on column metadata_fetcher.metadata is 'Extra information about the fetcher';
 
 
 create table metadata_authority
 (
   id            serial  not null,
   type          text    not null,
   url           text    not null,
   metadata      jsonb   not null
 );
 
 comment on table metadata_authority is 'Metadata authority information';
 comment on column metadata_authority.id is 'Internal identifier of the authority';
 comment on column metadata_authority.type is 'Type of authority (deposit_client/forge/registry)';
 comment on column metadata_authority.url is 'Authority''s uri';
 comment on column metadata_authority.metadata is 'Other metadata about authority';
 
 
 -- Extrinsic metadata on a DAG objects and origins.
 create table raw_extrinsic_metadata
 (
   type           text          not null,
   id             text          not null,
 
   -- metadata source
   authority_id   bigint        not null,
   fetcher_id     bigint        not null,
   discovery_date timestamptz   not null,
 
   -- metadata itself
   format         text          not null,
   metadata       bytea         not null,
 
   -- context
   origin         text,
   visit          bigint,
   snapshot       swhid,
   release        swhid,
   revision       swhid,
   path           bytea,
   directory      swhid
 );
 
 comment on table raw_extrinsic_metadata is 'keeps all metadata found concerning an object';
 comment on column raw_extrinsic_metadata.type is 'the type of object (content/directory/revision/release/snapshot/origin) the metadata is on';
 comment on column raw_extrinsic_metadata.id is 'the SWHID or origin URL for which the metadata was found';
 comment on column raw_extrinsic_metadata.discovery_date is 'the date of retrieval';
 comment on column raw_extrinsic_metadata.authority_id is 'the metadata provider: github, openhub, deposit, etc.';
 comment on column raw_extrinsic_metadata.fetcher_id is 'the tool used for extracting metadata: loaders, crawlers, etc.';
 comment on column raw_extrinsic_metadata.format is 'name of the format of metadata, used by readers to interpret it.';
 comment on column raw_extrinsic_metadata.metadata is 'original metadata in opaque format';
 
 
 -- Keep a cache of object counts
 create table object_counts
 (
   object_type text,             -- table for which we're counting objects (PK)
   value bigint,                 -- count of objects in the table
   last_update timestamptz,      -- last update for the object count in this table
   single_update boolean         -- whether we update this table standalone (true) or through bucketed counts (false)
 );
 
 comment on table object_counts is 'Cache of object counts';
 comment on column object_counts.object_type is 'Object type (''content'', ''directory'', ''revision'',
  ''release'', ''snapshot'')';
 comment on column object_counts.value is 'Count of objects in the table';
 comment on column object_counts.last_update is 'Last update for object count';
 comment on column object_counts.single_update is 'standalone (true) or bucketed counts (false)';
 
 
 create table object_counts_bucketed
 (
     line serial not null,       -- PK
     object_type text not null,  -- table for which we're counting objects
     identifier text not null,   -- identifier across which we're bucketing objects
     bucket_start bytea,         -- lower bound (inclusive) for the bucket
     bucket_end bytea,           -- upper bound (exclusive) for the bucket
     value bigint,               -- count of objects in the bucket
     last_update timestamptz     -- last update for the object count in this bucket
 );
 
 comment on table object_counts_bucketed is 'Bucketed count for objects ordered by type';
 comment on column object_counts_bucketed.line is 'Auto incremented idenitfier value';
 comment on column object_counts_bucketed.object_type is 'Object type (''content'', ''directory'', ''revision'',
  ''release'', ''snapshot'')';
 comment on column object_counts_bucketed.identifier is 'Common identifier for bucketed objects';
 comment on column object_counts_bucketed.bucket_start is 'Lower bound (inclusive) for the bucket';
 comment on column object_counts_bucketed.bucket_end is 'Upper bound (exclusive) for the bucket';
 comment on column object_counts_bucketed.value is 'Count of objects in the bucket';
 comment on column object_counts_bucketed.last_update is 'Last update for the object count in this bucket';
diff --git a/swh/storage/sql/40-swh-func.sql b/swh/storage/sql/40-swh-func.sql
index fa177309..da50c252 100644
--- a/swh/storage/sql/40-swh-func.sql
+++ b/swh/storage/sql/40-swh-func.sql
@@ -1,950 +1,960 @@
 create or replace function hash_sha1(text)
        returns text
 as $$
    select encode(digest($1, 'sha1'), 'hex')
 $$ language sql strict immutable;
 
 comment on function hash_sha1(text) is 'Compute SHA1 hash as text';
 
 -- create a temporary table called tmp_TBLNAME, mimicking existing table
 -- TBLNAME
 --
 -- Args:
 --     tblname: name of the table to mimic
 create or replace function swh_mktemp(tblname regclass)
     returns void
     language plpgsql
 as $$
 begin
     execute format('
 	create temporary table if not exists tmp_%1$I
 	    (like %1$I including defaults)
 	    on commit delete rows;
       alter table tmp_%1$I drop column if exists object_id;
 	', tblname);
     return;
 end
 $$;
 
 -- create a temporary table for directory entries called tmp_TBLNAME,
 -- mimicking existing table TBLNAME with an extra dir_id (sha1_git)
 -- column, and dropping the id column.
 --
 -- This is used to create the tmp_directory_entry_<foo> tables.
 --
 -- Args:
 --     tblname: name of the table to mimic
 create or replace function swh_mktemp_dir_entry(tblname regclass)
     returns void
     language plpgsql
 as $$
 begin
     execute format('
 	create temporary table if not exists tmp_%1$I
 	    (like %1$I including defaults, dir_id sha1_git)
 	    on commit delete rows;
         alter table tmp_%1$I drop column if exists id;
 	', tblname);
     return;
 end
 $$;
 
 -- create a temporary table for revisions called tmp_revisions,
 -- mimicking existing table revision, replacing the foreign keys to
 -- people with an email and name field
 --
 create or replace function swh_mktemp_revision()
     returns void
     language sql
 as $$
     create temporary table if not exists tmp_revision (
         like revision including defaults,
         author_fullname bytea,
         author_name bytea,
         author_email bytea,
         committer_fullname bytea,
         committer_name bytea,
         committer_email bytea
     ) on commit delete rows;
     alter table tmp_revision drop column if exists author;
     alter table tmp_revision drop column if exists committer;
     alter table tmp_revision drop column if exists object_id;
 $$;
 
 -- create a temporary table for releases called tmp_release,
 -- mimicking existing table release, replacing the foreign keys to
 -- people with an email and name field
 --
 create or replace function swh_mktemp_release()
     returns void
     language sql
 as $$
     create temporary table if not exists tmp_release (
         like release including defaults,
         author_fullname bytea,
         author_name bytea,
         author_email bytea
     ) on commit delete rows;
     alter table tmp_release drop column if exists author;
     alter table tmp_release drop column if exists object_id;
 $$;
 
 -- create a temporary table for the branches of a snapshot
 create or replace function swh_mktemp_snapshot_branch()
     returns void
     language sql
 as $$
   create temporary table if not exists tmp_snapshot_branch (
       name bytea not null,
       target bytea,
       target_type snapshot_target
   ) on commit delete rows;
 $$;
 
 -- a content signature is a set of cryptographic checksums that we use to
 -- uniquely identify content, for the purpose of verifying if we already have
 -- some content or not during content injection
 create type content_signature as (
     sha1       sha1,
     sha1_git   sha1_git,
     sha256     sha256,
     blake2s256 blake2s256
 );
 
 
 -- check which entries of tmp_skipped_content are missing from skipped_content
 --
 -- operates in bulk: 0. swh_mktemp(skipped_content), 1. COPY to tmp_skipped_content,
 -- 2. call this function
 create or replace function swh_skipped_content_missing()
     returns setof content_signature
     language plpgsql
 as $$
 begin
     return query
 	select sha1, sha1_git, sha256, blake2s256 from tmp_skipped_content t
 	where not exists
 	(select 1 from skipped_content s where
 	    s.sha1 is not distinct from t.sha1 and
 	    s.sha1_git is not distinct from t.sha1_git and
 	    s.sha256 is not distinct from t.sha256);
     return;
 end
 $$;
 
 
 -- add tmp_content entries to content, skipping duplicates
 --
 -- operates in bulk: 0. swh_mktemp(content), 1. COPY to tmp_content,
 -- 2. call this function
 create or replace function swh_content_add()
     returns void
     language plpgsql
 as $$
 begin
     insert into content (sha1, sha1_git, sha256, blake2s256, length, status, ctime)
         select distinct sha1, sha1_git, sha256, blake2s256, length, status, ctime from tmp_content;
     return;
 end
 $$;
 
 
 -- add tmp_skipped_content entries to skipped_content, skipping duplicates
 --
 -- operates in bulk: 0. swh_mktemp(skipped_content), 1. COPY to tmp_skipped_content,
 -- 2. call this function
 create or replace function swh_skipped_content_add()
     returns void
     language plpgsql
 as $$
 begin
     insert into skipped_content (sha1, sha1_git, sha256, blake2s256, length, status, reason, origin)
         select distinct sha1, sha1_git, sha256, blake2s256, length, status, reason, origin
 	from tmp_skipped_content
 	where (coalesce(sha1, ''), coalesce(sha1_git, ''), coalesce(sha256, '')) in (
             select coalesce(sha1, ''), coalesce(sha1_git, ''), coalesce(sha256, '')
             from swh_skipped_content_missing()
         );
         -- TODO XXX use postgres 9.5 "UPSERT" support here, when available.
         -- Specifically, using "INSERT .. ON CONFLICT IGNORE" we can avoid
         -- the extra swh_skipped_content_missing() query here.
     return;
 end
 $$;
 
 -- Update content entries from temporary table.
 -- (columns are potential new columns added to the schema, this cannot be empty)
 --
 create or replace function swh_content_update(columns_update text[])
     returns void
     language plpgsql
 as $$
 declare
    query text;
    tmp_array text[];
 begin
     if array_length(columns_update, 1) = 0 then
         raise exception 'Please, provide the list of column names to update.';
     end if;
 
     tmp_array := array(select format('%1$s=t.%1$s', unnest) from unnest(columns_update));
 
     query = format('update content set %s
                     from tmp_content t where t.sha1 = content.sha1',
                     array_to_string(tmp_array, ', '));
 
     execute query;
 
     return;
 end
 $$;
 
 comment on function swh_content_update(text[]) IS 'Update existing content''s columns';
 
 
 create type directory_entry_type as enum('file', 'dir', 'rev');
 
 
 -- Add tmp_directory_entry_* entries to directory_entry_* and directory,
 -- skipping duplicates in directory_entry_*.  This is a generic function that
 -- works on all kind of directory entries.
 --
 -- operates in bulk: 0. swh_mktemp_dir_entry('directory_entry_*'), 1 COPY to
 -- tmp_directory_entry_*, 2. call this function
 --
 -- Assumption: this function is used in the same transaction that inserts the
 -- context directory in table "directory".
 create or replace function swh_directory_entry_add(typ directory_entry_type)
     returns void
     language plpgsql
 as $$
 begin
     execute format('
     insert into directory_entry_%1$s (target, name, perms)
     select distinct t.target, t.name, t.perms
     from tmp_directory_entry_%1$s t
     where not exists (
     select 1
     from directory_entry_%1$s i
     where t.target = i.target and t.name = i.name and t.perms = i.perms)
    ', typ);
 
     execute format('
     with new_entries as (
 	select t.dir_id, array_agg(i.id) as entries
 	from tmp_directory_entry_%1$s t
 	inner join directory_entry_%1$s i
 	using (target, name, perms)
 	group by t.dir_id
     )
     update tmp_directory as d
     set %1$s_entries = new_entries.entries
     from new_entries
     where d.id = new_entries.dir_id
     ', typ);
 
     return;
 end
 $$;
 
 -- Insert the data from tmp_directory, tmp_directory_entry_file,
 -- tmp_directory_entry_dir, tmp_directory_entry_rev into their final
 -- tables.
 --
 -- Prerequisites:
 --  directory ids in tmp_directory
 --  entries in tmp_directory_entry_{file,dir,rev}
 --
 create or replace function swh_directory_add()
     returns void
     language plpgsql
 as $$
 begin
     perform swh_directory_entry_add('file');
     perform swh_directory_entry_add('dir');
     perform swh_directory_entry_add('rev');
 
     insert into directory
     select * from tmp_directory t
     where not exists (
         select 1 from directory d
 	where d.id = t.id);
 
     return;
 end
 $$;
 
 -- a directory listing entry with all the metadata
 --
 -- can be used to list a directory, and retrieve all the data in one go.
 create type directory_entry as
 (
   dir_id   sha1_git,     -- id of the parent directory
   type     directory_entry_type,  -- type of entry
   target   sha1_git,     -- id of target
   name     unix_path,    -- path name, relative to containing dir
   perms    file_perms,   -- unix-like permissions
   status   content_status,  -- visible or absent
   sha1     sha1,            -- content if sha1 if type is not dir
   sha1_git sha1_git,        -- content's sha1 git if type is not dir
   sha256   sha256,          -- content's sha256 if type is not dir
   length   bigint           -- content length if type is not dir
 );
 
 
 -- List a single level of directory walked_dir_id
 -- FIXME: order by name is not correct. For git, we need to order by
 -- lexicographic order but as if a trailing / is present in directory
 -- name
 create or replace function swh_directory_walk_one(walked_dir_id sha1_git)
     returns setof directory_entry
     language sql
     stable
 as $$
     with dir as (
 	select id as dir_id, dir_entries, file_entries, rev_entries
 	from directory
 	where id = walked_dir_id),
     ls_d as (select dir_id, unnest(dir_entries) as entry_id from dir),
     ls_f as (select dir_id, unnest(file_entries) as entry_id from dir),
     ls_r as (select dir_id, unnest(rev_entries) as entry_id from dir)
     (select dir_id, 'dir'::directory_entry_type as type,
             e.target, e.name, e.perms, NULL::content_status,
             NULL::sha1, NULL::sha1_git, NULL::sha256, NULL::bigint
      from ls_d
      left join directory_entry_dir e on ls_d.entry_id = e.id)
     union
-    (select dir_id, 'file'::directory_entry_type as type,
+    (with known_contents as
+	(select dir_id, 'file'::directory_entry_type as type,
             e.target, e.name, e.perms, c.status,
             c.sha1, c.sha1_git, c.sha256, c.length
-     from ls_f
-     left join directory_entry_file e on ls_f.entry_id = e.id
-     left join content c on e.target = c.sha1_git)
+         from ls_f
+         left join directory_entry_file e on ls_f.entry_id = e.id
+         inner join content c on e.target = c.sha1_git)
+        select * from known_contents
+	union
+	(select dir_id, 'file'::directory_entry_type as type,
+            e.target, e.name, e.perms, c.status,
+            c.sha1, c.sha1_git, c.sha256, c.length
+         from ls_f
+         left join directory_entry_file e on ls_f.entry_id = e.id
+         left join skipped_content c on e.target = c.sha1_git
+         where not exists (select 1 from known_contents where known_contents.sha1_git=e.target)))
     union
     (select dir_id, 'rev'::directory_entry_type as type,
             e.target, e.name, e.perms, NULL::content_status,
             NULL::sha1, NULL::sha1_git, NULL::sha256, NULL::bigint
      from ls_r
      left join directory_entry_rev e on ls_r.entry_id = e.id)
     order by name;
 $$;
 
 -- List recursively the revision directory arborescence
 create or replace function swh_directory_walk(walked_dir_id sha1_git)
     returns setof directory_entry
     language sql
     stable
 as $$
     with recursive entries as (
         select dir_id, type, target, name, perms, status, sha1, sha1_git,
                sha256, length
         from swh_directory_walk_one(walked_dir_id)
         union all
         select dir_id, type, target, (dirname || '/' || name)::unix_path as name,
                perms, status, sha1, sha1_git, sha256, length
         from (select (swh_directory_walk_one(dirs.target)).*, dirs.name as dirname
               from (select target, name from entries where type = 'dir') as dirs) as with_parent
     )
     select dir_id, type, target, name, perms, status, sha1, sha1_git, sha256, length
     from entries
 $$;
 
 -- Find a directory entry by its path
 create or replace function swh_find_directory_entry_by_path(
     walked_dir_id sha1_git,
     dir_or_content_path bytea[])
     returns directory_entry
     language plpgsql
 as $$
 declare
     end_index integer;
     paths bytea default '';
     path bytea;
     res bytea[];
     r record;
 begin
     end_index := array_upper(dir_or_content_path, 1);
     res[1] := walked_dir_id;
 
     for i in 1..end_index
     loop
         path := dir_or_content_path[i];
         -- concatenate path for patching the name in the result record (if we found it)
         if i = 1 then
             paths = path;
         else
             paths := paths || '/' || path;  -- concatenate paths
         end if;
 
         if i <> end_index then
             select *
             from swh_directory_walk_one(res[i] :: sha1_git)
             where name=path
             and type = 'dir'
             limit 1 into r;
         else
             select *
             from swh_directory_walk_one(res[i] :: sha1_git)
             where name=path
             limit 1 into r;
         end if;
 
         -- find the path
         if r is null then
            return null;
         else
             -- store the next dir to lookup the next local path from
             res[i+1] := r.target;
         end if;
     end loop;
 
     -- at this moment, r is the result. Patch its 'name' with the full path before returning it.
     r.name := paths;
     return r;
 end
 $$;
 
 -- List all revision IDs starting from a given revision, going back in time
 --
 -- TODO ordering: should be breadth-first right now (what do we want?)
 -- TODO ordering: ORDER BY parent_rank somewhere?
 create or replace function swh_revision_list(root_revisions bytea[], num_revs bigint default NULL)
     returns table (id sha1_git, parents bytea[])
     language sql
     stable
 as $$
     with recursive full_rev_list(id) as (
         (select id from revision where id = ANY(root_revisions))
         union
         (select h.parent_id
          from revision_history as h
          join full_rev_list on h.id = full_rev_list.id)
     ),
     rev_list as (select id from full_rev_list limit num_revs)
     select rev_list.id as id,
            array(select rh.parent_id::bytea
                  from revision_history rh
                  where rh.id = rev_list.id
                  order by rh.parent_rank
                 ) as parent
     from rev_list;
 $$;
 
 
 -- Detailed entry for a revision
 create type revision_entry as
 (
   id                             sha1_git,
   date                           timestamptz,
   date_offset                    smallint,
   date_neg_utc_offset            boolean,
   committer_date                 timestamptz,
   committer_date_offset          smallint,
   committer_date_neg_utc_offset  boolean,
   type                           revision_type,
   directory                      sha1_git,
   message                        bytea,
   author_id                      bigint,
   author_fullname                bytea,
   author_name                    bytea,
   author_email                   bytea,
   committer_id                   bigint,
   committer_fullname             bytea,
   committer_name                 bytea,
   committer_email                bytea,
   metadata                       jsonb,
   synthetic                      boolean,
   parents                        bytea[],
   object_id                      bigint,
   extra_headers                  bytea[][]
 );
 
 
 -- "git style" revision log. Similar to swh_revision_list(), but returning all
 -- information associated to each revision, and expanding authors/committers
 create or replace function swh_revision_log(root_revisions bytea[], num_revs bigint default NULL)
     returns setof revision_entry
     language sql
     stable
 as $$
     select t.id, r.date, r.date_offset, r.date_neg_utc_offset,
            r.committer_date, r.committer_date_offset, r.committer_date_neg_utc_offset,
            r.type, r.directory, r.message,
            a.id, a.fullname, a.name, a.email,
            c.id, c.fullname, c.name, c.email,
            r.metadata, r.synthetic, t.parents, r.object_id, r.extra_headers
     from swh_revision_list(root_revisions, num_revs) as t
     left join revision r on t.id = r.id
     left join person a on a.id = r.author
     left join person c on c.id = r.committer;
 $$;
 
 
 -- Detailed entry for a release
 create type release_entry as
 (
   id                   sha1_git,
   target               sha1_git,
   target_type          object_type,
   date                 timestamptz,
   date_offset          smallint,
   date_neg_utc_offset  boolean,
   name                 bytea,
   comment              bytea,
   synthetic            boolean,
   author_id            bigint,
   author_fullname      bytea,
   author_name          bytea,
   author_email         bytea,
   object_id            bigint
 );
 
 -- Create entries in person from tmp_revision
 create or replace function swh_person_add_from_revision()
     returns void
     language plpgsql
 as $$
 begin
     with t as (
         select author_fullname as fullname, author_name as name, author_email as email from tmp_revision
     union
         select committer_fullname as fullname, committer_name as name, committer_email as email from tmp_revision
     ) insert into person (fullname, name, email)
     select distinct on (fullname) fullname, name, email from t
     where not exists (
         select 1
         from person p
         where t.fullname = p.fullname
     );
     return;
 end
 $$;
 
 
 -- Create entries in revision from tmp_revision
 create or replace function swh_revision_add()
     returns void
     language plpgsql
 as $$
 begin
     perform swh_person_add_from_revision();
 
     insert into revision (id, date, date_offset, date_neg_utc_offset, committer_date, committer_date_offset, committer_date_neg_utc_offset, type, directory, message, author, committer, metadata, synthetic, extra_headers)
     select t.id, t.date, t.date_offset, t.date_neg_utc_offset, t.committer_date, t.committer_date_offset, t.committer_date_neg_utc_offset, t.type, t.directory, t.message, a.id, c.id, t.metadata, t.synthetic, t.extra_headers
     from tmp_revision t
     left join person a on a.fullname = t.author_fullname
     left join person c on c.fullname = t.committer_fullname;
     return;
 end
 $$;
 
 
 -- Create entries in person from tmp_release
 create or replace function swh_person_add_from_release()
     returns void
     language plpgsql
 as $$
 begin
     with t as (
         select distinct author_fullname as fullname, author_name as name, author_email as email from tmp_release
         where author_fullname is not null
     ) insert into person (fullname, name, email)
     select distinct on (fullname) fullname, name, email from t
     where not exists (
         select 1
         from person p
         where t.fullname = p.fullname
     );
     return;
 end
 $$;
 
 
 -- Create entries in release from tmp_release
 create or replace function swh_release_add()
     returns void
     language plpgsql
 as $$
 begin
     perform swh_person_add_from_release();
 
     insert into release (id, target, target_type, date, date_offset, date_neg_utc_offset, name, comment, author, synthetic)
       select distinct t.id, t.target, t.target_type, t.date, t.date_offset, t.date_neg_utc_offset, t.name, t.comment, a.id, t.synthetic
         from tmp_release t
         left join person a on a.fullname = t.author_fullname
         where not exists (select 1 from release where t.id = release.id);
     return;
 end
 $$;
 
 
 -- add a new origin_visit for origin origin_id at date.
 --
 -- Returns the new visit id.
 create or replace function swh_origin_visit_add(origin_url text, date timestamptz, type text)
     returns bigint
     language sql
 as $$
   with origin_id as (
     select id
     from origin
     where url = origin_url
   ), last_known_visit as (
     select coalesce(max(visit), 0) as visit
     from origin_visit
     where origin = (select id from origin_id)
   )
   insert into origin_visit (origin, date, type, visit)
   values ((select id from origin_id), date, type,
           (select visit from last_known_visit) + 1)
   returning visit;
 $$;
 
 create or replace function swh_snapshot_add(snapshot_id sha1_git)
   returns void
   language plpgsql
 as $$
 declare
   snapshot_object_id snapshot.object_id%type;
 begin
   select object_id from snapshot where id = snapshot_id into snapshot_object_id;
   if snapshot_object_id is null then
      insert into snapshot (id) values (snapshot_id) returning object_id into snapshot_object_id;
      insert into snapshot_branch (name, target_type, target)
        select name, target_type, target from tmp_snapshot_branch tmp
        where not exists (
          select 1
          from snapshot_branch sb
          where sb.name = tmp.name
            and sb.target = tmp.target
            and sb.target_type = tmp.target_type
        )
        on conflict do nothing;
      insert into snapshot_branches (snapshot_id, branch_id)
      select snapshot_object_id, sb.object_id as branch_id
        from tmp_snapshot_branch tmp
        join snapshot_branch sb
        using (name, target, target_type)
        where tmp.target is not null and tmp.target_type is not null
      union
      select snapshot_object_id, sb.object_id as branch_id
        from tmp_snapshot_branch tmp
        join snapshot_branch sb
        using (name)
        where tmp.target is null and tmp.target_type is null
          and sb.target is null and sb.target_type is null;
   end if;
   truncate table tmp_snapshot_branch;
 end;
 $$;
 
 create type snapshot_result as (
   snapshot_id  sha1_git,
   name         bytea,
   target       bytea,
   target_type  snapshot_target
 );
 
 create or replace function swh_snapshot_get_by_id(id sha1_git,
     branches_from bytea default '', branches_count bigint default null,
     target_types snapshot_target[] default NULL)
   returns setof snapshot_result
   language sql
   stable
 as $$
   -- with small limits, the "naive" version of this query can degenerate into
   -- using the deduplication index on snapshot_branch (name, target,
   -- target_type); The planner happily scans several hundred million rows.
 
   -- Do the query in two steps: first pull the relevant branches for the given
   -- snapshot (filtering them by type), then do the limiting. This two-step
   -- process guides the planner into using the proper index.
   with filtered_snapshot_branches as (
     select swh_snapshot_get_by_id.id as snapshot_id, name, target, target_type
       from snapshot_branches
       inner join snapshot_branch on snapshot_branches.branch_id = snapshot_branch.object_id
       where snapshot_id = (select object_id from snapshot where snapshot.id = swh_snapshot_get_by_id.id)
         and (target_types is null or target_type = any(target_types))
       order by name
   )
   select snapshot_id, name, target, target_type
     from filtered_snapshot_branches
     where name >= branches_from
     order by name limit branches_count;
 $$;
 
 create type snapshot_size as (
   target_type snapshot_target,
   count bigint
 );
 
 create or replace function swh_snapshot_count_branches(id sha1_git)
   returns setof snapshot_size
   language sql
   stable
 as $$
   SELECT target_type, count(name)
   from swh_snapshot_get_by_id(swh_snapshot_count_branches.id)
   group by target_type;
 $$;
 
 -- Absolute path: directory reference + complete path relative to it
 create type content_dir as (
     directory  sha1_git,
     path       unix_path
 );
 
 
 -- Find the containing directory of a given content, specified by sha1
 -- (note: *not* sha1_git).
 --
 -- Return a pair (dir_it, path) where path is a UNIX path that, from the
 -- directory root, reach down to a file with the desired content. Return NULL
 -- if no match is found.
 --
 -- In case of multiple paths (i.e., pretty much always), an arbitrary one is
 -- chosen.
 create or replace function swh_content_find_directory(content_id sha1)
     returns content_dir
     language sql
     stable
 as $$
     with recursive path as (
 	-- Recursively build a path from the requested content to a root
 	-- directory. Each iteration returns a pair (dir_id, filename) where
 	-- filename is relative to dir_id. Stops when no parent directory can
 	-- be found.
 	(select dir.id as dir_id, dir_entry_f.name as name, 0 as depth
 	 from directory_entry_file as dir_entry_f
 	 join content on content.sha1_git = dir_entry_f.target
 	 join directory as dir on dir.file_entries @> array[dir_entry_f.id]
 	 where content.sha1 = content_id
 	 limit 1)
 	union all
 	(select dir.id as dir_id,
 		(dir_entry_d.name || '/' || path.name)::unix_path as name,
 		path.depth + 1
 	 from path
 	 join directory_entry_dir as dir_entry_d on dir_entry_d.target = path.dir_id
 	 join directory as dir on dir.dir_entries @> array[dir_entry_d.id]
 	 limit 1)
     )
     select dir_id, name from path order by depth desc limit 1;
 $$;
 
 -- Find the visit of origin closest to date visit_date
 -- Breaks ties by selecting the largest visit id
 create or replace function swh_visit_find_by_date(origin_url text, visit_date timestamptz default NOW())
     returns setof origin_visit
     language plpgsql
     stable
 as $$
 declare
   origin_id bigint;
 begin
   select id into origin_id from origin where url=origin_url;
   return query
   with closest_two_visits as ((
     select ov, (date - visit_date), visit as interval
     from origin_visit ov
     where ov.origin = origin_id
           and ov.date >= visit_date
     order by ov.date asc, ov.visit desc
     limit 1
   ) union (
     select ov, (visit_date - date), visit as interval
     from origin_visit ov
     where ov.origin = origin_id
           and ov.date < visit_date
     order by ov.date desc, ov.visit desc
     limit 1
   )) select (ov).* from closest_two_visits order by interval, visit limit 1;
 end
 $$;
 
 -- Object listing by object_id
 
 create or replace function swh_content_list_by_object_id(
     min_excl bigint,
     max_incl bigint
 )
     returns setof content
     language sql
     stable
 as $$
     select * from content
     where object_id > min_excl and object_id <= max_incl
     order by object_id;
 $$;
 
 create or replace function swh_revision_list_by_object_id(
     min_excl bigint,
     max_incl bigint
 )
     returns setof revision_entry
     language sql
     stable
 as $$
     with revs as (
         select * from revision
         where object_id > min_excl and object_id <= max_incl
     )
     select r.id, r.date, r.date_offset, r.date_neg_utc_offset,
            r.committer_date, r.committer_date_offset, r.committer_date_neg_utc_offset,
            r.type, r.directory, r.message,
            a.id, a.fullname, a.name, a.email, c.id, c.fullname, c.name, c.email, r.metadata, r.synthetic,
            array(select rh.parent_id::bytea from revision_history rh where rh.id = r.id order by rh.parent_rank)
                as parents, r.object_id, r.extra_headers
     from revs r
     left join person a on a.id = r.author
     left join person c on c.id = r.committer
     order by r.object_id;
 $$;
 
 create or replace function swh_release_list_by_object_id(
     min_excl bigint,
     max_incl bigint
 )
     returns setof release_entry
     language sql
     stable
 as $$
     with rels as (
         select * from release
         where object_id > min_excl and object_id <= max_incl
     )
     select r.id, r.target, r.target_type, r.date, r.date_offset, r.date_neg_utc_offset, r.name, r.comment,
            r.synthetic, p.id as author_id, p.fullname as author_fullname, p.name as author_name, p.email as author_email, r.object_id
     from rels r
     left join person p on p.id = r.author
     order by r.object_id;
 $$;
 
 
 -- simple counter mapping a textual label to an integer value
 create type counter as (
     label  text,
     value  bigint
 );
 
 -- return statistics about the number of tuples in various SWH tables
 --
 -- Note: the returned values are based on postgres internal statistics
 -- (pg_class table), which are only updated daily (by autovacuum) or so
 create or replace function swh_stat_counters()
     returns setof counter
     language sql
     stable
 as $$
     select object_type as label, value as value
     from object_counts
     where object_type in (
         'content',
         'directory',
         'directory_entry_dir',
         'directory_entry_file',
         'directory_entry_rev',
         'origin',
         'origin_visit',
         'person',
         'release',
         'revision',
         'revision_history',
         'skipped_content',
         'snapshot'
     );
 $$;
 
 create or replace function swh_update_counter(object_type text)
     returns void
     language plpgsql
 as $$
 begin
     execute format('
 	insert into object_counts
     (value, last_update, object_type)
   values
     ((select count(*) from %1$I), NOW(), %1$L)
   on conflict (object_type) do update set
     value = excluded.value,
     last_update = excluded.last_update',
   object_type);
     return;
 end;
 $$;
 
 create or replace function swh_update_counter_bucketed()
     returns void
     language plpgsql
 as $$
 declare
   query text;
   line_to_update int;
   new_value bigint;
 begin
   select
     object_counts_bucketed.line,
     format(
       'select count(%I) from %I where %s',
       coalesce(identifier, '*'),
       object_type,
       coalesce(
         concat_ws(
           ' and ',
           case when bucket_start is not null then
             format('%I >= %L', identifier, bucket_start) -- lower bound condition, inclusive
           end,
           case when bucket_end is not null then
             format('%I < %L', identifier, bucket_end) -- upper bound condition, exclusive
           end
         ),
         'true'
       )
     )
     from object_counts_bucketed
     order by coalesce(last_update, now() - '1 month'::interval) asc
     limit 1
     into line_to_update, query;
 
   execute query into new_value;
 
   update object_counts_bucketed
     set value = new_value,
         last_update = now()
     where object_counts_bucketed.line = line_to_update;
 
 END
 $$;
 
 create or replace function swh_update_counters_from_buckets()
   returns trigger
   language plpgsql
 as $$
 begin
 with to_update as (
   select object_type, sum(value) as value, max(last_update) as last_update
   from object_counts_bucketed ob1
   where not exists (
     select 1 from object_counts_bucketed ob2
     where ob1.object_type = ob2.object_type
     and value is null
     )
   group by object_type
 ) update object_counts
   set
     value = to_update.value,
     last_update = to_update.last_update
   from to_update
   where
     object_counts.object_type = to_update.object_type
     and object_counts.value != to_update.value;
 return null;
 end
 $$;
 
 create trigger update_counts_from_bucketed
   after insert or update
   on object_counts_bucketed
   for each row
   when (NEW.line % 256 = 0)
   execute procedure swh_update_counters_from_buckets();
diff --git a/swh/storage/tests/storage_tests.py b/swh/storage/tests/storage_tests.py
index c80c0a9f..53dc2421 100644
--- a/swh/storage/tests/storage_tests.py
+++ b/swh/storage/tests/storage_tests.py
@@ -1,3884 +1,3914 @@
 # Copyright (C) 2015-2020  The Software Heritage developers
 # See the AUTHORS file at the top-level directory of this distribution
 # License: GNU General Public License version 3, or any later version
 # See top-level LICENSE file for more information
 
 from collections import defaultdict
 import datetime
 from datetime import timedelta
 import inspect
 import itertools
 import math
 import random
 
 import attr
 import pytest
 
 from hypothesis import given, strategies, settings, HealthCheck
 
 from typing import Any, ClassVar, Dict, Iterator, Optional
 
 from swh.model import from_disk
 from swh.model.hashutil import hash_to_bytes
 from swh.model.identifiers import SWHID
 from swh.model.model import (
     Content,
     Directory,
     MetadataTargetType,
     Origin,
     OriginVisit,
     OriginVisitStatus,
     Person,
     Revision,
+    SkippedContent,
     Snapshot,
     TargetType,
 )
 from swh.model.hypothesis_strategies import objects
 from swh.storage import get_storage
 from swh.storage.common import origin_url_to_sha1 as sha1
 from swh.storage.exc import HashCollision, StorageArgumentException
 from swh.storage.interface import ListOrder, PagedResult, StorageInterface
 from swh.storage.utils import content_hex_hashes, now, round_to_milliseconds
 
 
 def transform_entries(
     storage: StorageInterface, dir_: Directory, *, prefix: bytes = b""
 ) -> Iterator[Dict[str, Any]]:
     """Iterate through a directory's entries, and yields the items 'directory_ls' is
        expected to return; including content metadata for file entries."""
 
     for ent in dir_.entries:
         if ent.type == "dir":
             yield {
                 "dir_id": dir_.id,
                 "type": ent.type,
                 "target": ent.target,
                 "name": prefix + ent.name,
                 "perms": ent.perms,
                 "status": None,
                 "sha1": None,
                 "sha1_git": None,
                 "sha256": None,
                 "length": None,
             }
         elif ent.type == "file":
             contents = storage.content_find({"sha1_git": ent.target})
             assert contents
             ent_dict = contents[0].to_dict()
             for key in ["ctime", "blake2s256"]:
                 ent_dict.pop(key, None)
             ent_dict.update(
                 {
                     "dir_id": dir_.id,
                     "type": ent.type,
                     "target": ent.target,
                     "name": prefix + ent.name,
                     "perms": ent.perms,
                 }
             )
             yield ent_dict
 
 
 def assert_contents_ok(
     expected_contents, actual_contents, keys_to_check={"sha1", "data"}
 ):
     """Assert that a given list of contents matches on a given set of keys.
 
     """
     for k in keys_to_check:
         expected_list = set([c.get(k) for c in expected_contents])
         actual_list = set([c.get(k) for c in actual_contents])
         assert actual_list == expected_list, k
 
 
 class LazyContent(Content):
     def with_data(self):
         return Content.from_dict({**self.to_dict(), "data": b"42\n"})
 
 
 class TestStorage:
     """Main class for Storage testing.
 
     This class is used as-is to test local storage (see TestLocalStorage
     below) and remote storage (see TestRemoteStorage in
     test_remote_storage.py.
 
     We need to have the two classes inherit from this base class
     separately to avoid nosetests running the tests from the base
     class twice.
     """
 
     maxDiff = None  # type: ClassVar[Optional[int]]
 
     def test_types(self, swh_storage_backend_config):
         """Checks all methods of StorageInterface are implemented by this
         backend, and that they have the same signature."""
         # Create an instance of the protocol (which cannot be instantiated
         # directly, so this creates a subclass, then instantiates it)
         interface = type("_", (StorageInterface,), {})()
         storage = get_storage(**swh_storage_backend_config)
 
         assert "content_add" in dir(interface)
 
         missing_methods = []
 
         for meth_name in dir(interface):
             if meth_name.startswith("_"):
                 continue
             interface_meth = getattr(interface, meth_name)
             try:
                 concrete_meth = getattr(storage, meth_name)
             except AttributeError:
                 if not getattr(interface_meth, "deprecated_endpoint", False):
                     # The backend is missing a (non-deprecated) endpoint
                     missing_methods.append(meth_name)
                 continue
 
             expected_signature = inspect.signature(interface_meth)
             actual_signature = inspect.signature(concrete_meth)
 
             assert expected_signature == actual_signature, meth_name
 
         assert missing_methods == []
 
         # If all the assertions above succeed, then this one should too.
         # But there's no harm in double-checking.
         # And we could replace the assertions above by this one, but unlike
         # the assertions above, it doesn't explain what is missing.
         assert isinstance(storage, StorageInterface)
 
     def test_check_config(self, swh_storage):
         assert swh_storage.check_config(check_write=True)
         assert swh_storage.check_config(check_write=False)
 
     def test_content_add(self, swh_storage, sample_data):
         cont = sample_data.content
 
         insertion_start_time = now()
         actual_result = swh_storage.content_add([cont])
         insertion_end_time = now()
 
         assert actual_result == {
             "content:add": 1,
             "content:add:bytes": cont.length,
         }
 
         assert swh_storage.content_get_data(cont.sha1) == cont.data
 
         expected_cont = attr.evolve(cont, data=None)
 
         contents = [
             obj
             for (obj_type, obj) in swh_storage.journal_writer.journal.objects
             if obj_type == "content"
         ]
         assert len(contents) == 1
         for obj in contents:
             assert insertion_start_time <= obj.ctime
             assert obj.ctime <= insertion_end_time
             assert obj == expected_cont
 
         swh_storage.refresh_stat_counters()
         assert swh_storage.stat_counters()["content"] == 1
 
     def test_content_add_from_lazy_content(self, swh_storage, sample_data):
         cont = sample_data.content
         lazy_content = LazyContent.from_dict(cont.to_dict())
 
         insertion_start_time = now()
 
         actual_result = swh_storage.content_add([lazy_content])
 
         insertion_end_time = now()
 
         assert actual_result == {
             "content:add": 1,
             "content:add:bytes": cont.length,
         }
 
         # the fact that we retrieve the content object from the storage with
         # the correct 'data' field ensures it has been 'called'
         assert swh_storage.content_get_data(cont.sha1) == cont.data
 
         expected_cont = attr.evolve(lazy_content, data=None, ctime=None)
         contents = [
             obj
             for (obj_type, obj) in swh_storage.journal_writer.journal.objects
             if obj_type == "content"
         ]
         assert len(contents) == 1
         for obj in contents:
             assert insertion_start_time <= obj.ctime
             assert obj.ctime <= insertion_end_time
             assert attr.evolve(obj, ctime=None).to_dict() == expected_cont.to_dict()
 
         swh_storage.refresh_stat_counters()
         assert swh_storage.stat_counters()["content"] == 1
 
     def test_content_get_data_missing(self, swh_storage, sample_data):
         cont, cont2 = sample_data.contents[:2]
 
         swh_storage.content_add([cont])
 
         # Query a single missing content
         actual_content_data = swh_storage.content_get_data(cont2.sha1)
         assert actual_content_data is None
 
         # Check content_get does not abort after finding a missing content
         actual_content_data = swh_storage.content_get_data(cont.sha1)
         assert actual_content_data == cont.data
         actual_content_data = swh_storage.content_get_data(cont2.sha1)
         assert actual_content_data is None
 
     def test_content_add_different_input(self, swh_storage, sample_data):
         cont, cont2 = sample_data.contents[:2]
 
         actual_result = swh_storage.content_add([cont, cont2])
         assert actual_result == {
             "content:add": 2,
             "content:add:bytes": cont.length + cont2.length,
         }
 
     def test_content_add_twice(self, swh_storage, sample_data):
         cont, cont2 = sample_data.contents[:2]
 
         actual_result = swh_storage.content_add([cont])
         assert actual_result == {
             "content:add": 1,
             "content:add:bytes": cont.length,
         }
         assert len(swh_storage.journal_writer.journal.objects) == 1
 
         actual_result = swh_storage.content_add([cont, cont2])
         assert actual_result == {
             "content:add": 1,
             "content:add:bytes": cont2.length,
         }
         assert 2 <= len(swh_storage.journal_writer.journal.objects) <= 3
 
         assert len(swh_storage.content_find(cont.to_dict())) == 1
         assert len(swh_storage.content_find(cont2.to_dict())) == 1
 
     def test_content_add_collision(self, swh_storage, sample_data):
         cont1 = sample_data.content
 
         # create (corrupted) content with same sha1{,_git} but != sha256
         sha256_array = bytearray(cont1.sha256)
         sha256_array[0] += 1
         cont1b = attr.evolve(cont1, sha256=bytes(sha256_array))
 
         with pytest.raises(HashCollision) as cm:
             swh_storage.content_add([cont1, cont1b])
 
         exc = cm.value
         actual_algo = exc.algo
         assert actual_algo in ["sha1", "sha1_git"]
         actual_id = exc.hash_id
         assert actual_id == getattr(cont1, actual_algo).hex()
         collisions = exc.args[2]
         assert len(collisions) == 2
         assert collisions == [
             content_hex_hashes(cont1.hashes()),
             content_hex_hashes(cont1b.hashes()),
         ]
         assert exc.colliding_content_hashes() == [
             cont1.hashes(),
             cont1b.hashes(),
         ]
 
     def test_content_add_duplicate(self, swh_storage, sample_data):
         cont = sample_data.content
         swh_storage.content_add([cont, cont])
 
         assert swh_storage.content_get_data(cont.sha1) == cont.data
 
     def test_content_update(self, swh_storage, sample_data):
         cont1 = sample_data.content
 
         if hasattr(swh_storage, "journal_writer"):
             swh_storage.journal_writer.journal = None  # TODO, not supported
 
         swh_storage.content_add([cont1])
 
         # alter the sha1_git for example
         cont1b = attr.evolve(
             cont1, sha1_git=hash_to_bytes("3a60a5275d0333bf13468e8b3dcab90f4046e654")
         )
 
         swh_storage.content_update([cont1b.to_dict()], keys=["sha1_git"])
 
         actual_contents = swh_storage.content_get([cont1.sha1])
         expected_content = attr.evolve(cont1b, data=None)
         assert actual_contents == [expected_content]
 
     def test_content_add_metadata(self, swh_storage, sample_data):
         cont = attr.evolve(sample_data.content, data=None, ctime=now())
 
         actual_result = swh_storage.content_add_metadata([cont])
         assert actual_result == {
             "content:add": 1,
         }
 
         expected_cont = cont
         assert swh_storage.content_get([cont.sha1]) == [expected_cont]
         contents = [
             obj
             for (obj_type, obj) in swh_storage.journal_writer.journal.objects
             if obj_type == "content"
         ]
         assert len(contents) == 1
         for obj in contents:
             obj = attr.evolve(obj, ctime=None)
             assert obj == cont
 
     def test_content_add_metadata_different_input(self, swh_storage, sample_data):
         contents = sample_data.contents[:2]
         cont = attr.evolve(contents[0], data=None, ctime=now())
         cont2 = attr.evolve(contents[1], data=None, ctime=now())
 
         actual_result = swh_storage.content_add_metadata([cont, cont2])
         assert actual_result == {
             "content:add": 2,
         }
 
     def test_content_add_metadata_collision(self, swh_storage, sample_data):
         cont1 = attr.evolve(sample_data.content, data=None, ctime=now())
 
         # create (corrupted) content with same sha1{,_git} but != sha256
         sha1_git_array = bytearray(cont1.sha256)
         sha1_git_array[0] += 1
         cont1b = attr.evolve(cont1, sha256=bytes(sha1_git_array))
 
         with pytest.raises(HashCollision) as cm:
             swh_storage.content_add_metadata([cont1, cont1b])
 
         exc = cm.value
         actual_algo = exc.algo
         assert actual_algo in ["sha1", "sha1_git", "blake2s256"]
         actual_id = exc.hash_id
         assert actual_id == getattr(cont1, actual_algo).hex()
         collisions = exc.args[2]
         assert len(collisions) == 2
         assert collisions == [
             content_hex_hashes(cont1.hashes()),
             content_hex_hashes(cont1b.hashes()),
         ]
         assert exc.colliding_content_hashes() == [
             cont1.hashes(),
             cont1b.hashes(),
         ]
 
     def test_skipped_content_add(self, swh_storage, sample_data):
         contents = sample_data.skipped_contents[:2]
         cont = contents[0]
         cont2 = attr.evolve(contents[1], blake2s256=None)
 
         contents_dict = [c.to_dict() for c in [cont, cont2]]
 
         missing = list(swh_storage.skipped_content_missing(contents_dict))
 
         assert missing == [cont.hashes(), cont2.hashes()]
 
         actual_result = swh_storage.skipped_content_add([cont, cont, cont2])
 
         assert 2 <= actual_result.pop("skipped_content:add") <= 3
         assert actual_result == {}
 
         missing = list(swh_storage.skipped_content_missing(contents_dict))
         assert missing == []
 
     def test_skipped_content_add_missing_hashes(self, swh_storage, sample_data):
         cont, cont2 = [
             attr.evolve(c, sha1_git=None) for c in sample_data.skipped_contents[:2]
         ]
         contents_dict = [c.to_dict() for c in [cont, cont2]]
 
         missing = list(swh_storage.skipped_content_missing(contents_dict))
         assert len(missing) == 2
 
         actual_result = swh_storage.skipped_content_add([cont, cont, cont2])
 
         assert 2 <= actual_result.pop("skipped_content:add") <= 3
         assert actual_result == {}
 
         missing = list(swh_storage.skipped_content_missing(contents_dict))
         assert missing == []
 
     def test_skipped_content_missing_partial_hash(self, swh_storage, sample_data):
         cont = sample_data.skipped_content
         cont2 = attr.evolve(cont, sha1_git=None)
         contents_dict = [c.to_dict() for c in [cont, cont2]]
 
         missing = list(swh_storage.skipped_content_missing(contents_dict))
         assert len(missing) == 2
 
         actual_result = swh_storage.skipped_content_add([cont])
 
         assert actual_result.pop("skipped_content:add") == 1
         assert actual_result == {}
 
         missing = list(swh_storage.skipped_content_missing(contents_dict))
         assert missing == [cont2.hashes()]
 
     @pytest.mark.property_based
     @settings(deadline=None)  # this test is very slow
     @given(
         strategies.sets(
             elements=strategies.sampled_from(["sha256", "sha1_git", "blake2s256"]),
             min_size=0,
         )
     )
     def test_content_missing(self, swh_storage, sample_data, algos):
         algos |= {"sha1"}
         content, missing_content = [sample_data.content2, sample_data.skipped_content]
         swh_storage.content_add([content])
 
         test_contents = [content.to_dict()]
         missing_per_hash = defaultdict(list)
         for i in range(256):
             test_content = missing_content.to_dict()
             for hash in algos:
                 test_content[hash] = bytes([i]) + test_content[hash][1:]
                 missing_per_hash[hash].append(test_content[hash])
             test_contents.append(test_content)
 
         assert set(swh_storage.content_missing(test_contents)) == set(
             missing_per_hash["sha1"]
         )
 
         for hash in algos:
             assert set(
                 swh_storage.content_missing(test_contents, key_hash=hash)
             ) == set(missing_per_hash[hash])
 
     @pytest.mark.property_based
     @given(
         strategies.sets(
             elements=strategies.sampled_from(["sha256", "sha1_git", "blake2s256"]),
             min_size=0,
         )
     )
     def test_content_missing_unknown_algo(self, swh_storage, sample_data, algos):
         algos |= {"sha1"}
         content, missing_content = [sample_data.content2, sample_data.skipped_content]
         swh_storage.content_add([content])
 
         test_contents = [content.to_dict()]
         missing_per_hash = defaultdict(list)
         for i in range(16):
             test_content = missing_content.to_dict()
             for hash in algos:
                 test_content[hash] = bytes([i]) + test_content[hash][1:]
                 missing_per_hash[hash].append(test_content[hash])
             test_content["nonexisting_algo"] = b"\x00"
             test_contents.append(test_content)
 
         assert set(swh_storage.content_missing(test_contents)) == set(
             missing_per_hash["sha1"]
         )
 
         for hash in algos:
             assert set(
                 swh_storage.content_missing(test_contents, key_hash=hash)
             ) == set(missing_per_hash[hash])
 
     def test_content_missing_per_sha1(self, swh_storage, sample_data):
         # given
         cont = sample_data.content
         cont2 = sample_data.content2
         missing_cont = sample_data.skipped_content
         missing_cont2 = sample_data.skipped_content2
         swh_storage.content_add([cont, cont2])
 
         # when
         gen = swh_storage.content_missing_per_sha1(
             [cont.sha1, missing_cont.sha1, cont2.sha1, missing_cont2.sha1]
         )
         # then
         assert list(gen) == [missing_cont.sha1, missing_cont2.sha1]
 
     def test_content_missing_per_sha1_git(self, swh_storage, sample_data):
         cont, cont2 = sample_data.contents[:2]
         missing_cont = sample_data.skipped_content
 
         swh_storage.content_add([cont, cont2])
 
         contents = [cont.sha1_git, cont2.sha1_git, missing_cont.sha1_git]
 
         missing_contents = swh_storage.content_missing_per_sha1_git(contents)
         assert list(missing_contents) == [missing_cont.sha1_git]
 
     def test_content_get_partition(self, swh_storage, swh_contents):
         """content_get_partition paginates results if limit exceeded"""
         expected_contents = [
             attr.evolve(c, data=None) for c in swh_contents if c.status != "absent"
         ]
 
         actual_contents = []
         for i in range(16):
             actual_result = swh_storage.content_get_partition(i, 16)
             assert actual_result.next_page_token is None
             actual_contents.extend(actual_result.results)
 
         assert len(actual_contents) == len(expected_contents)
         for content in actual_contents:
             assert content in expected_contents
             assert content.ctime is None
 
     def test_content_get_partition_full(self, swh_storage, swh_contents):
         """content_get_partition for a single partition returns all available contents
 
         """
         expected_contents = [
             attr.evolve(c, data=None) for c in swh_contents if c.status != "absent"
         ]
 
         actual_result = swh_storage.content_get_partition(0, 1)
         assert actual_result.next_page_token is None
 
         actual_contents = actual_result.results
         assert len(actual_contents) == len(expected_contents)
         for content in actual_contents:
             assert content in expected_contents
 
     def test_content_get_partition_empty(self, swh_storage, swh_contents):
         """content_get_partition when at least one of the partitions is empty"""
         expected_contents = {
             cont.sha1 for cont in swh_contents if cont.status != "absent"
         }
         # nb_partitions = smallest power of 2 such that at least one of
         # the partitions is empty
         nb_partitions = 1 << math.floor(math.log2(len(swh_contents)) + 1)
 
         seen_sha1s = []
 
         for i in range(nb_partitions):
             actual_result = swh_storage.content_get_partition(
                 i, nb_partitions, limit=len(swh_contents) + 1
             )
 
             for content in actual_result.results:
                 seen_sha1s.append(content.sha1)
 
             # Limit is higher than the max number of results
             assert actual_result.next_page_token is None
 
         assert set(seen_sha1s) == expected_contents
 
     def test_content_get_partition_limit_none(self, swh_storage):
         """content_get_partition call with wrong limit input should fail"""
         with pytest.raises(StorageArgumentException, match="limit should not be None"):
             swh_storage.content_get_partition(1, 16, limit=None)
 
     def test_content_get_partition_pagination_generate(self, swh_storage, swh_contents):
         """content_get_partition returns contents within range provided"""
         expected_contents = [
             attr.evolve(c, data=None) for c in swh_contents if c.status != "absent"
         ]
 
         # retrieve contents
         actual_contents = []
         for i in range(4):
             page_token = None
             while True:
                 actual_result = swh_storage.content_get_partition(
                     i, 4, limit=3, page_token=page_token
                 )
                 actual_contents.extend(actual_result.results)
                 page_token = actual_result.next_page_token
 
                 if page_token is None:
                     break
 
         assert len(actual_contents) == len(expected_contents)
         for content in actual_contents:
             assert content in expected_contents
 
     def test_content_get(self, swh_storage, sample_data):
         cont1, cont2 = sample_data.contents[:2]
 
         swh_storage.content_add([cont1, cont2])
 
         actual_contents = swh_storage.content_get([cont1.sha1, cont2.sha1])
 
         # we only retrieve the metadata so no data nor ctime within
         expected_contents = [attr.evolve(c, data=None) for c in [cont1, cont2]]
 
         assert actual_contents == expected_contents
         for content in actual_contents:
             assert content.ctime is None
 
     def test_content_get_missing_sha1(self, swh_storage, sample_data):
         cont1, cont2 = sample_data.contents[:2]
         assert cont1.sha1 != cont2.sha1
         missing_cont = sample_data.skipped_content
 
         swh_storage.content_add([cont1, cont2])
 
         actual_contents = swh_storage.content_get(
             [cont1.sha1, cont2.sha1, missing_cont.sha1]
         )
 
         expected_contents = [
             attr.evolve(c, data=None) if c else None for c in [cont1, cont2, None]
         ]
         assert actual_contents == expected_contents
 
     def test_content_get_random(self, swh_storage, sample_data):
         cont, cont2, cont3 = sample_data.contents[:3]
         swh_storage.content_add([cont, cont2, cont3])
 
         assert swh_storage.content_get_random() in {
             cont.sha1_git,
             cont2.sha1_git,
             cont3.sha1_git,
         }
 
     def test_directory_add(self, swh_storage, sample_data):
         content = sample_data.content
         directory = sample_data.directories[1]
         assert directory.entries[0].target == content.sha1_git
         swh_storage.content_add([content])
 
         init_missing = list(swh_storage.directory_missing([directory.id]))
         assert [directory.id] == init_missing
 
         actual_result = swh_storage.directory_add([directory])
         assert actual_result == {"directory:add": 1}
 
         assert ("directory", directory) in list(
             swh_storage.journal_writer.journal.objects
         )
 
         actual_data = list(swh_storage.directory_ls(directory.id))
         expected_data = list(transform_entries(swh_storage, directory))
 
         for data in actual_data:
             assert data in expected_data
 
         after_missing = list(swh_storage.directory_missing([directory.id]))
         assert after_missing == []
 
         swh_storage.refresh_stat_counters()
         assert swh_storage.stat_counters()["directory"] == 1
 
     def test_directory_add_twice(self, swh_storage, sample_data):
         directory = sample_data.directories[1]
 
         actual_result = swh_storage.directory_add([directory])
         assert actual_result == {"directory:add": 1}
 
         assert list(swh_storage.journal_writer.journal.objects) == [
             ("directory", directory)
         ]
 
         actual_result = swh_storage.directory_add([directory])
         assert actual_result == {"directory:add": 0}
 
         assert list(swh_storage.journal_writer.journal.objects) == [
             ("directory", directory)
         ]
 
     def test_directory_ls_recursive(self, swh_storage, sample_data):
         # create consistent dataset regarding the directories we want to list
         content, content2 = sample_data.contents[:2]
         swh_storage.content_add([content, content2])
         dir1, dir2, dir3 = sample_data.directories[:3]
 
         dir_ids = [d.id for d in [dir1, dir2, dir3]]
         init_missing = list(swh_storage.directory_missing(dir_ids))
         assert init_missing == dir_ids
 
         actual_result = swh_storage.directory_add([dir1, dir2, dir3])
         assert actual_result == {"directory:add": 3}
 
         # List directory containing one file
         actual_data = list(swh_storage.directory_ls(dir1.id, recursive=True))
         expected_data = list(transform_entries(swh_storage, dir1))
         for data in actual_data:
             assert data in expected_data
 
         # List directory containing a file and an unknown subdirectory
         actual_data = list(swh_storage.directory_ls(dir2.id, recursive=True))
         expected_data = list(transform_entries(swh_storage, dir2))
         for data in actual_data:
             assert data in expected_data
 
         # List directory containing both a known and unknown subdirectory, entries
         # should be both those of the directory and of the known subdir (up to contents)
         actual_data = list(swh_storage.directory_ls(dir3.id, recursive=True))
         expected_data = list(
             itertools.chain(
                 transform_entries(swh_storage, dir3),
                 transform_entries(swh_storage, dir2, prefix=b"subdir/"),
             )
         )
 
         for data in actual_data:
             assert data in expected_data
 
     def test_directory_ls_non_recursive(self, swh_storage, sample_data):
         # create consistent dataset regarding the directories we want to list
         content, content2 = sample_data.contents[:2]
         swh_storage.content_add([content, content2])
         dir1, dir2, dir3, _, dir5 = sample_data.directories[:5]
 
         dir_ids = [d.id for d in [dir1, dir2, dir3, dir5]]
         init_missing = list(swh_storage.directory_missing(dir_ids))
         assert init_missing == dir_ids
 
         actual_result = swh_storage.directory_add([dir1, dir2, dir3, dir5])
         assert actual_result == {"directory:add": 4}
 
         # List directory containing a file and an unknown subdirectory
         actual_data = list(swh_storage.directory_ls(dir1.id))
         expected_data = list(transform_entries(swh_storage, dir1))
         for data in actual_data:
             assert data in expected_data
 
         # List directory containing a single file
         actual_data = list(swh_storage.directory_ls(dir2.id))
         expected_data = list(transform_entries(swh_storage, dir2))
         for data in actual_data:
             assert data in expected_data
 
         # List directory containing a known subdirectory, entries should
         # only be those of the parent directory, not of the subdir
         actual_data = list(swh_storage.directory_ls(dir3.id))
         expected_data = list(transform_entries(swh_storage, dir3))
         for data in actual_data:
             assert data in expected_data
 
     def test_directory_ls_missing_content(self, swh_storage, sample_data):
         swh_storage.directory_add([sample_data.directory2])
         assert list(swh_storage.directory_ls(sample_data.directory2.id)) == [
             {
                 "dir_id": sample_data.directory2.id,
                 "length": None,
                 "name": b"oof",
                 "perms": 33188,
                 "sha1": None,
                 "sha1_git": None,
                 "sha256": None,
                 "status": None,
                 "target": sample_data.directory2.entries[0].target,
                 "type": "file",
             },
         ]
 
+    def test_directory_ls_skipped_content(self, swh_storage, sample_data):
+        swh_storage.directory_add([sample_data.directory2])
+
+        cont = SkippedContent(
+            sha1_git=sample_data.directory2.entries[0].target,
+            sha1=b"c" * 20,
+            sha256=None,
+            blake2s256=None,
+            length=42,
+            status="absent",
+            reason="You need a premium subscription to access this content",
+        )
+        swh_storage.skipped_content_add([cont])
+
+        assert list(swh_storage.directory_ls(sample_data.directory2.id)) == [
+            {
+                "dir_id": sample_data.directory2.id,
+                "length": 42,
+                "name": b"oof",
+                "perms": 33188,
+                "sha1": b"c" * 20,
+                "sha1_git": sample_data.directory2.entries[0].target,
+                "sha256": None,
+                "status": "absent",
+                "target": sample_data.directory2.entries[0].target,
+                "type": "file",
+            },
+        ]
+
     def test_directory_entry_get_by_path(self, swh_storage, sample_data):
         cont, content2 = sample_data.contents[:2]
         dir1, dir2, dir3, dir4, dir5 = sample_data.directories[:5]
 
         # given
         dir_ids = [d.id for d in [dir1, dir2, dir3, dir4, dir5]]
         init_missing = list(swh_storage.directory_missing(dir_ids))
         assert init_missing == dir_ids
 
         actual_result = swh_storage.directory_add([dir3, dir4])
         assert actual_result == {"directory:add": 2}
 
         expected_entries = [
             {
                 "dir_id": dir3.id,
                 "name": b"foo",
                 "type": "file",
                 "target": cont.sha1_git,
                 "sha1": None,
                 "sha1_git": None,
                 "sha256": None,
                 "status": None,
                 "perms": from_disk.DentryPerms.content,
                 "length": None,
             },
             {
                 "dir_id": dir3.id,
                 "name": b"subdir",
                 "type": "dir",
                 "target": dir2.id,
                 "sha1": None,
                 "sha1_git": None,
                 "sha256": None,
                 "status": None,
                 "perms": from_disk.DentryPerms.directory,
                 "length": None,
             },
             {
                 "dir_id": dir3.id,
                 "name": b"hello",
                 "type": "file",
                 "target": content2.sha1_git,
                 "sha1": None,
                 "sha1_git": None,
                 "sha256": None,
                 "status": None,
                 "perms": from_disk.DentryPerms.content,
                 "length": None,
             },
         ]
 
         # when (all must be found here)
         for entry, expected_entry in zip(dir3.entries, expected_entries):
             actual_entry = swh_storage.directory_entry_get_by_path(
                 dir3.id, [entry.name]
             )
             assert actual_entry == expected_entry
 
         # same, but deeper
         for entry, expected_entry in zip(dir3.entries, expected_entries):
             actual_entry = swh_storage.directory_entry_get_by_path(
                 dir4.id, [b"subdir1", entry.name]
             )
             expected_entry = expected_entry.copy()
             expected_entry["name"] = b"subdir1/" + expected_entry["name"]
             assert actual_entry == expected_entry
 
         # when (nothing should be found here since `dir` is not persisted.)
         for entry in dir2.entries:
             actual_entry = swh_storage.directory_entry_get_by_path(
                 dir2.id, [entry.name]
             )
             assert actual_entry is None
 
     def test_directory_get_random(self, swh_storage, sample_data):
         dir1, dir2, dir3 = sample_data.directories[:3]
         swh_storage.directory_add([dir1, dir2, dir3])
 
         assert swh_storage.directory_get_random() in {
             dir1.id,
             dir2.id,
             dir3.id,
         }
 
     def test_revision_add(self, swh_storage, sample_data):
         revision = sample_data.revision
         init_missing = swh_storage.revision_missing([revision.id])
         assert list(init_missing) == [revision.id]
 
         actual_result = swh_storage.revision_add([revision])
         assert actual_result == {"revision:add": 1}
 
         end_missing = swh_storage.revision_missing([revision.id])
         assert list(end_missing) == []
 
         assert list(swh_storage.journal_writer.journal.objects) == [
             ("revision", revision)
         ]
 
         # already there so nothing added
         actual_result = swh_storage.revision_add([revision])
         assert actual_result == {"revision:add": 0}
 
         swh_storage.refresh_stat_counters()
         assert swh_storage.stat_counters()["revision"] == 1
 
     def test_revision_add_twice(self, swh_storage, sample_data):
         revision, revision2 = sample_data.revisions[:2]
 
         actual_result = swh_storage.revision_add([revision])
         assert actual_result == {"revision:add": 1}
 
         assert list(swh_storage.journal_writer.journal.objects) == [
             ("revision", revision)
         ]
 
         actual_result = swh_storage.revision_add([revision, revision2])
         assert actual_result == {"revision:add": 1}
 
         assert list(swh_storage.journal_writer.journal.objects) == [
             ("revision", revision),
             ("revision", revision2),
         ]
 
     def test_revision_add_name_clash(self, swh_storage, sample_data):
         revision, revision2 = sample_data.revisions[:2]
 
         revision1 = attr.evolve(
             revision,
             author=Person(
                 fullname=b"John Doe <john.doe@example.com>",
                 name=b"John Doe",
                 email=b"john.doe@example.com",
             ),
         )
         revision2 = attr.evolve(
             revision2,
             author=Person(
                 fullname=b"John Doe <john.doe@example.com>",
                 name=b"John Doe ",
                 email=b"john.doe@example.com ",
             ),
         )
         actual_result = swh_storage.revision_add([revision1, revision2])
         assert actual_result == {"revision:add": 2}
 
     def test_revision_get_order(self, swh_storage, sample_data):
         revision, revision2 = sample_data.revisions[:2]
 
         add_result = swh_storage.revision_add([revision, revision2])
         assert add_result == {"revision:add": 2}
 
         # order 1
         actual_revisions = swh_storage.revision_get([revision.id, revision2.id])
         assert actual_revisions == [revision, revision2]
 
         # order 2
         actual_revisions2 = swh_storage.revision_get([revision2.id, revision.id])
         assert actual_revisions2 == [revision2, revision]
 
     def test_revision_log(self, swh_storage, sample_data):
         revision1, revision2, revision3, revision4 = sample_data.revisions[:4]
 
         # rev4 -is-child-of-> rev3 -> rev1, (rev2 -> rev1)
         swh_storage.revision_add([revision1, revision2, revision3, revision4])
 
         # when
         results = list(swh_storage.revision_log([revision4.id]))
 
         # for comparison purposes
         actual_results = [Revision.from_dict(r) for r in results]
         assert len(actual_results) == 4  # rev4 -child-> rev3 -> rev1, (rev2 -> rev1)
         assert actual_results == [revision4, revision3, revision1, revision2]
 
     def test_revision_log_with_limit(self, swh_storage, sample_data):
         revision1, revision2, revision3, revision4 = sample_data.revisions[:4]
 
         # revision4 -is-child-of-> revision3
         swh_storage.revision_add([revision3, revision4])
         results = list(swh_storage.revision_log([revision4.id], 1))
 
         actual_results = [Revision.from_dict(r) for r in results]
         assert len(actual_results) == 1
         assert actual_results[0] == revision4
 
     def test_revision_log_unknown_revision(self, swh_storage, sample_data):
         revision = sample_data.revision
         rev_log = list(swh_storage.revision_log([revision.id]))
         assert rev_log == []
 
     def test_revision_shortlog(self, swh_storage, sample_data):
         revision1, revision2, revision3, revision4 = sample_data.revisions[:4]
 
         # rev4 -is-child-of-> rev3 -> (rev1, rev2); rev2 -> rev1
         swh_storage.revision_add([revision1, revision2, revision3, revision4])
 
         results = list(swh_storage.revision_shortlog([revision4.id]))
         actual_results = [[id, tuple(parents)] for (id, parents) in results]
 
         assert len(actual_results) == 4
         assert actual_results == [
             [revision4.id, revision4.parents],
             [revision3.id, revision3.parents],
             [revision1.id, revision1.parents],
             [revision2.id, revision2.parents],
         ]
 
     def test_revision_shortlog_with_limit(self, swh_storage, sample_data):
         revision1, revision2, revision3, revision4 = sample_data.revisions[:4]
 
         # revision4 -is-child-of-> revision3
         swh_storage.revision_add([revision1, revision2, revision3, revision4])
         results = list(swh_storage.revision_shortlog([revision4.id], 1))
         actual_results = [[id, tuple(parents)] for (id, parents) in results]
 
         assert len(actual_results) == 1
         assert list(actual_results[0]) == [revision4.id, revision4.parents]
 
     def test_revision_get(self, swh_storage, sample_data):
         revision, revision2 = sample_data.revisions[:2]
 
         swh_storage.revision_add([revision])
 
         actual_revisions = swh_storage.revision_get([revision.id, revision2.id])
 
         assert len(actual_revisions) == 2
         assert actual_revisions == [revision, None]
 
     def test_revision_get_no_parents(self, swh_storage, sample_data):
         revision = sample_data.revision
         swh_storage.revision_add([revision])
 
         actual_revision = swh_storage.revision_get([revision.id])[0]
 
         assert revision.parents == ()
         assert actual_revision.parents == ()  # no parents on this one
 
     def test_revision_get_random(self, swh_storage, sample_data):
         revision1, revision2, revision3 = sample_data.revisions[:3]
 
         swh_storage.revision_add([revision1, revision2, revision3])
 
         assert swh_storage.revision_get_random() in {
             revision1.id,
             revision2.id,
             revision3.id,
         }
 
     def test_release_add(self, swh_storage, sample_data):
         release, release2 = sample_data.releases[:2]
 
         init_missing = swh_storage.release_missing([release.id, release2.id])
         assert list(init_missing) == [release.id, release2.id]
 
         actual_result = swh_storage.release_add([release, release2])
         assert actual_result == {"release:add": 2}
 
         end_missing = swh_storage.release_missing([release.id, release2.id])
         assert list(end_missing) == []
 
         assert list(swh_storage.journal_writer.journal.objects) == [
             ("release", release),
             ("release", release2),
         ]
 
         # already present so nothing added
         actual_result = swh_storage.release_add([release, release2])
         assert actual_result == {"release:add": 0}
 
         swh_storage.refresh_stat_counters()
         assert swh_storage.stat_counters()["release"] == 2
 
     def test_release_add_no_author_date(self, swh_storage, sample_data):
         full_release = sample_data.release
 
         release = attr.evolve(full_release, author=None, date=None)
         actual_result = swh_storage.release_add([release])
         assert actual_result == {"release:add": 1}
 
         end_missing = swh_storage.release_missing([release.id])
         assert list(end_missing) == []
 
         assert list(swh_storage.journal_writer.journal.objects) == [
             ("release", release)
         ]
 
     def test_release_add_twice(self, swh_storage, sample_data):
         release, release2 = sample_data.releases[:2]
 
         actual_result = swh_storage.release_add([release])
         assert actual_result == {"release:add": 1}
 
         assert list(swh_storage.journal_writer.journal.objects) == [
             ("release", release)
         ]
 
         actual_result = swh_storage.release_add([release, release2, release, release2])
         assert actual_result == {"release:add": 1}
 
         assert set(swh_storage.journal_writer.journal.objects) == set(
             [("release", release), ("release", release2),]
         )
 
     def test_release_add_name_clash(self, swh_storage, sample_data):
         release, release2 = [
             attr.evolve(
                 c,
                 author=Person(
                     fullname=b"John Doe <john.doe@example.com>",
                     name=b"John Doe",
                     email=b"john.doe@example.com",
                 ),
             )
             for c in sample_data.releases[:2]
         ]
 
         actual_result = swh_storage.release_add([release, release2])
         assert actual_result == {"release:add": 2}
 
     def test_release_get(self, swh_storage, sample_data):
         release, release2, release3 = sample_data.releases[:3]
 
         # given
         swh_storage.release_add([release, release2])
 
         # when
         actual_releases = swh_storage.release_get([release.id, release2.id])
 
         # then
         assert actual_releases == [release, release2]
 
         unknown_releases = swh_storage.release_get([release3.id])
         assert unknown_releases[0] is None
 
     def test_release_get_order(self, swh_storage, sample_data):
         release, release2 = sample_data.releases[:2]
 
         add_result = swh_storage.release_add([release, release2])
         assert add_result == {"release:add": 2}
 
         # order 1
         actual_releases = swh_storage.release_get([release.id, release2.id])
         assert actual_releases == [release, release2]
 
         # order 2
         actual_releases2 = swh_storage.release_get([release2.id, release.id])
         assert actual_releases2 == [release2, release]
 
     def test_release_get_random(self, swh_storage, sample_data):
         release, release2, release3 = sample_data.releases[:3]
 
         swh_storage.release_add([release, release2, release3])
 
         assert swh_storage.release_get_random() in {
             release.id,
             release2.id,
             release3.id,
         }
 
     def test_origin_add(self, swh_storage, sample_data):
         origins = list(sample_data.origins[:2])
         origin_urls = [o.url for o in origins]
 
         assert swh_storage.origin_get(origin_urls) == [None, None]
 
         stats = swh_storage.origin_add(origins)
         assert stats == {"origin:add": 2}
 
         actual_origins = swh_storage.origin_get(origin_urls)
         assert actual_origins == origins
 
         assert set(swh_storage.journal_writer.journal.objects) == set(
             [("origin", origins[0]), ("origin", origins[1]),]
         )
 
         swh_storage.refresh_stat_counters()
         assert swh_storage.stat_counters()["origin"] == 2
 
     def test_origin_add_twice(self, swh_storage, sample_data):
         origin, origin2 = sample_data.origins[:2]
 
         add1 = swh_storage.origin_add([origin, origin2])
         assert set(swh_storage.journal_writer.journal.objects) == set(
             [("origin", origin), ("origin", origin2),]
         )
         assert add1 == {"origin:add": 2}
 
         add2 = swh_storage.origin_add([origin, origin2])
         assert set(swh_storage.journal_writer.journal.objects) == set(
             [("origin", origin), ("origin", origin2),]
         )
         assert add2 == {"origin:add": 0}
 
     def test_origin_get(self, swh_storage, sample_data):
         origin, origin2 = sample_data.origins[:2]
 
         assert swh_storage.origin_get([origin.url]) == [None]
         swh_storage.origin_add([origin])
 
         actual_origins = swh_storage.origin_get([origin.url])
         assert actual_origins == [origin]
 
         actual_origins = swh_storage.origin_get([origin.url, "not://exists"])
         assert actual_origins == [origin, None]
 
     def _generate_random_visits(self, nb_visits=100, start=0, end=7):
         """Generate random visits within the last 2 months (to avoid
         computations)
 
         """
         visits = []
         today = now()
         for weeks in range(nb_visits, 0, -1):
             hours = random.randint(0, 24)
             minutes = random.randint(0, 60)
             seconds = random.randint(0, 60)
             days = random.randint(0, 28)
             weeks = random.randint(start, end)
             date_visit = today - timedelta(
                 weeks=weeks, hours=hours, minutes=minutes, seconds=seconds, days=days
             )
             visits.append(date_visit)
         return visits
 
     def test_origin_visit_get__unknown_origin(self, swh_storage):
         actual_page = swh_storage.origin_visit_get("foo")
         assert actual_page.next_page_token is None
         assert actual_page.results == []
         assert actual_page == PagedResult()
 
     def test_origin_visit_get__validation_failure(self, swh_storage, sample_data):
         origin = sample_data.origin
         swh_storage.origin_add([origin])
         with pytest.raises(
             StorageArgumentException, match="page_token must be a string"
         ):
             swh_storage.origin_visit_get(origin.url, page_token=10)  # not bytes
 
         with pytest.raises(
             StorageArgumentException, match="order must be a ListOrder value"
         ):
             swh_storage.origin_visit_get(origin.url, order="foobar")  # wrong order
 
     def test_origin_visit_get_all(self, swh_storage, sample_data):
         origin = sample_data.origin
         swh_storage.origin_add([origin])
         ov1, ov2, ov3 = swh_storage.origin_visit_add(
             [
                 OriginVisit(
                     origin=origin.url,
                     date=sample_data.date_visit1,
                     type=sample_data.type_visit1,
                 ),
                 OriginVisit(
                     origin=origin.url,
                     date=sample_data.date_visit2,
                     type=sample_data.type_visit2,
                 ),
                 OriginVisit(
                     origin=origin.url,
                     date=sample_data.date_visit2,
                     type=sample_data.type_visit2,
                 ),
             ]
         )
 
         # order asc, no token, no limit
         actual_page = swh_storage.origin_visit_get(origin.url)
         assert actual_page.next_page_token is None
         assert actual_page.results == [ov1, ov2, ov3]
 
         # order asc, no token, limit
         actual_page = swh_storage.origin_visit_get(origin.url, limit=2)
         next_page_token = actual_page.next_page_token
         assert next_page_token is not None
         assert actual_page.results == [ov1, ov2]
 
         # order asc, token, no limit
         actual_page = swh_storage.origin_visit_get(
             origin.url, page_token=next_page_token
         )
         assert actual_page.next_page_token is None
         assert actual_page.results == [ov3]
 
         # order asc, no token, limit
         actual_page = swh_storage.origin_visit_get(origin.url, limit=1)
         next_page_token = actual_page.next_page_token
         assert next_page_token is not None
         assert actual_page.results == [ov1]
 
         # order asc, token, no limit
         actual_page = swh_storage.origin_visit_get(
             origin.url, page_token=next_page_token
         )
         assert actual_page.next_page_token is None
         assert actual_page.results == [ov2, ov3]
 
         # order asc, token, limit
         actual_page = swh_storage.origin_visit_get(
             origin.url, page_token=next_page_token, limit=2
         )
         assert actual_page.next_page_token is None
         assert actual_page.results == [ov2, ov3]
 
         actual_page = swh_storage.origin_visit_get(
             origin.url, page_token=next_page_token, limit=1
         )
         next_page_token = actual_page.next_page_token
         assert next_page_token is not None
         assert actual_page.results == [ov2]
 
         actual_page = swh_storage.origin_visit_get(
             origin.url, page_token=next_page_token, limit=1
         )
         assert actual_page.next_page_token is None
         assert actual_page.results == [ov3]
 
         # order desc, no token, no limit
         actual_page = swh_storage.origin_visit_get(origin.url, order=ListOrder.DESC)
         assert actual_page.next_page_token is None
         assert actual_page.results == [ov3, ov2, ov1]
 
         # order desc, no token, limit
         actual_page = swh_storage.origin_visit_get(
             origin.url, limit=2, order=ListOrder.DESC
         )
         next_page_token = actual_page.next_page_token
         assert next_page_token is not None
         assert actual_page.results == [ov3, ov2]
 
         # order desc, token, no limit
         actual_page = swh_storage.origin_visit_get(
             origin.url, page_token=next_page_token, order=ListOrder.DESC
         )
         assert actual_page.next_page_token is None
         assert actual_page.results == [ov1]
 
         # order desc, no token, limit
         actual_page = swh_storage.origin_visit_get(
             origin.url, limit=1, order=ListOrder.DESC
         )
         next_page_token = actual_page.next_page_token
         assert next_page_token is not None
         assert actual_page.results == [ov3]
 
         # order desc, token, no limit
         actual_page = swh_storage.origin_visit_get(
             origin.url, page_token=next_page_token, order=ListOrder.DESC
         )
         assert actual_page.next_page_token is None
         assert actual_page.results == [ov2, ov1]
 
         # order desc, token, limit
         actual_page = swh_storage.origin_visit_get(
             origin.url, page_token=next_page_token, order=ListOrder.DESC, limit=1
         )
         next_page_token = actual_page.next_page_token
         assert next_page_token is not None
         assert actual_page.results == [ov2]
 
         actual_page = swh_storage.origin_visit_get(
             origin.url, page_token=next_page_token, order=ListOrder.DESC
         )
         assert actual_page.next_page_token is None
         assert actual_page.results == [ov1]
 
     def test_origin_visit_status_get__unknown_cases(self, swh_storage, sample_data):
         origin = sample_data.origin
         actual_page = swh_storage.origin_visit_status_get("foobar", 1)
         assert actual_page.next_page_token is None
         assert actual_page.results == []
 
         actual_page = swh_storage.origin_visit_status_get(origin.url, 1)
         assert actual_page.next_page_token is None
         assert actual_page.results == []
 
         origin = sample_data.origin
         swh_storage.origin_add([origin])
         ov1 = swh_storage.origin_visit_add(
             [
                 OriginVisit(
                     origin=origin.url,
                     date=sample_data.date_visit1,
                     type=sample_data.type_visit1,
                 ),
             ]
         )[0]
         actual_page = swh_storage.origin_visit_status_get(origin.url, ov1.visit + 10)
         assert actual_page.next_page_token is None
         assert actual_page.results == []
 
     def test_origin_visit_status_get_all(self, swh_storage, sample_data):
         origin = sample_data.origin
         swh_storage.origin_add([origin])
         date_visit3 = round_to_milliseconds(now())
         date_visit1 = date_visit3 - datetime.timedelta(hours=2)
         date_visit2 = date_visit3 - datetime.timedelta(hours=1)
         assert date_visit1 < date_visit2 < date_visit3
 
         ov1 = swh_storage.origin_visit_add(
             [
                 OriginVisit(
                     origin=origin.url, date=date_visit1, type=sample_data.type_visit1,
                 ),
             ]
         )[0]
 
         ovs1 = OriginVisitStatus(
             origin=origin.url,
             visit=ov1.visit,
             date=date_visit1,
             status="created",
             snapshot=None,
         )
 
         ovs2 = OriginVisitStatus(
             origin=origin.url,
             visit=ov1.visit,
             date=date_visit2,
             status="partial",
             snapshot=None,
         )
 
         ovs3 = OriginVisitStatus(
             origin=origin.url,
             visit=ov1.visit,
             date=date_visit3,
             status="full",
             snapshot=sample_data.snapshot.id,
             metadata={},
         )
 
         swh_storage.origin_visit_status_add([ovs2, ovs3])
 
         # order asc, no token, no limit
         actual_page = swh_storage.origin_visit_status_get(origin.url, ov1.visit)
         assert actual_page.next_page_token is None
         assert actual_page.results == [ovs1, ovs2, ovs3]
 
         # order asc, no token, limit
         actual_page = swh_storage.origin_visit_status_get(
             origin.url, ov1.visit, limit=2
         )
         next_page_token = actual_page.next_page_token
         assert next_page_token is not None
         assert actual_page.results == [ovs1, ovs2]
 
         # order asc, token, no limit
         actual_page = swh_storage.origin_visit_status_get(
             origin.url, ov1.visit, page_token=next_page_token
         )
         assert actual_page.next_page_token is None
         assert actual_page.results == [ovs3]
 
         # order asc, no token, limit
         actual_page = swh_storage.origin_visit_status_get(
             origin.url, ov1.visit, limit=1
         )
         next_page_token = actual_page.next_page_token
         assert next_page_token is not None
         assert actual_page.results == [ovs1]
 
         actual_page = swh_storage.origin_visit_status_get(
             origin.url, ov1.visit, page_token=next_page_token
         )
         assert actual_page.next_page_token is None
         assert actual_page.results == [ovs2, ovs3]
 
         # order asc, token, limit
         actual_page = swh_storage.origin_visit_status_get(
             origin.url, ov1.visit, page_token=next_page_token, limit=2
         )
         assert actual_page.next_page_token is None
         assert actual_page.results == [ovs2, ovs3]
 
         # order asc, no token, limit
         actual_page = swh_storage.origin_visit_status_get(
             origin.url, ov1.visit, limit=2
         )
         next_page_token = actual_page.next_page_token
         assert next_page_token is not None
         assert actual_page.results == [ovs1, ovs2]
 
         actual_page = swh_storage.origin_visit_status_get(
             origin.url, ov1.visit, page_token=next_page_token, limit=1
         )
         assert actual_page.next_page_token is None
         assert actual_page.results == [ovs3]
 
         # order desc, no token, no limit
         actual_page = swh_storage.origin_visit_status_get(
             origin.url, ov1.visit, order=ListOrder.DESC
         )
         assert actual_page.next_page_token is None
         assert actual_page.results == [ovs3, ovs2, ovs1]
 
         # order desc, no token, limit
         actual_page = swh_storage.origin_visit_status_get(
             origin.url, ov1.visit, limit=2, order=ListOrder.DESC
         )
         next_page_token = actual_page.next_page_token
         assert next_page_token is not None
         assert actual_page.results == [ovs3, ovs2]
 
         actual_page = swh_storage.origin_visit_status_get(
             origin.url, ov1.visit, page_token=next_page_token, order=ListOrder.DESC
         )
         assert actual_page.next_page_token is None
         assert actual_page.results == [ovs1]
 
         # order desc, no token, limit
         actual_page = swh_storage.origin_visit_status_get(
             origin.url, ov1.visit, order=ListOrder.DESC, limit=1
         )
         next_page_token = actual_page.next_page_token
         assert next_page_token is not None
         assert actual_page.results == [ovs3]
 
         # order desc, token, no limit
         actual_page = swh_storage.origin_visit_status_get(
             origin.url, ov1.visit, page_token=next_page_token, order=ListOrder.DESC
         )
         assert actual_page.next_page_token is None
         assert actual_page.results == [ovs2, ovs1]
 
         # order desc, token, limit
         actual_page = swh_storage.origin_visit_status_get(
             origin.url,
             ov1.visit,
             page_token=next_page_token,
             order=ListOrder.DESC,
             limit=1,
         )
         next_page_token = actual_page.next_page_token
         assert next_page_token is not None
         assert actual_page.results == [ovs2]
 
         actual_page = swh_storage.origin_visit_status_get(
             origin.url, ov1.visit, page_token=next_page_token, order=ListOrder.DESC
         )
         assert actual_page.next_page_token is None
         assert actual_page.results == [ovs1]
 
     def test_origin_visit_status_get_random(self, swh_storage, sample_data):
         origins = sample_data.origins[:2]
         swh_storage.origin_add(origins)
 
         # Add some random visits within the selection range
         visits = self._generate_random_visits()
         visit_type = "git"
 
         # Add visits to those origins
         for origin in origins:
             for date_visit in visits:
                 visit = swh_storage.origin_visit_add(
                     [OriginVisit(origin=origin.url, date=date_visit, type=visit_type,)]
                 )[0]
                 swh_storage.origin_visit_status_add(
                     [
                         OriginVisitStatus(
                             origin=origin.url,
                             visit=visit.visit,
                             date=now(),
                             status="full",
                             snapshot=None,
                         )
                     ]
                 )
 
         swh_storage.refresh_stat_counters()
 
         stats = swh_storage.stat_counters()
         assert stats["origin"] == len(origins)
         assert stats["origin_visit"] == len(origins) * len(visits)
 
         random_ov, random_ovs = swh_storage.origin_visit_status_get_random(visit_type)
         assert random_ov and random_ovs
         assert random_ov.origin is not None
         assert random_ov.origin == random_ovs.origin
         assert random_ov.origin in [o.url for o in origins]
 
     def test_origin_visit_status_get_random_nothing_found(
         self, swh_storage, sample_data
     ):
         origins = sample_data.origins
         swh_storage.origin_add(origins)
         visit_type = "hg"
         # Add some visits outside of the random generation selection so nothing
         # will be found by the random selection
         visits = self._generate_random_visits(nb_visits=3, start=13, end=24)
         for origin in origins:
             for date_visit in visits:
                 visit = swh_storage.origin_visit_add(
                     [OriginVisit(origin=origin.url, date=date_visit, type=visit_type,)]
                 )[0]
                 swh_storage.origin_visit_status_add(
                     [
                         OriginVisitStatus(
                             origin=origin.url,
                             visit=visit.visit,
                             date=now(),
                             status="full",
                             snapshot=None,
                         )
                     ]
                 )
 
         random_origin_visit = swh_storage.origin_visit_status_get_random(visit_type)
         assert random_origin_visit is None
 
     def test_origin_get_by_sha1(self, swh_storage, sample_data):
         origin = sample_data.origin
         assert swh_storage.origin_get([origin.url])[0] is None
         swh_storage.origin_add([origin])
 
         origins = list(swh_storage.origin_get_by_sha1([sha1(origin.url)]))
         assert len(origins) == 1
         assert origins[0]["url"] == origin.url
 
     def test_origin_get_by_sha1_not_found(self, swh_storage, sample_data):
         unknown_origin = sample_data.origin
         assert swh_storage.origin_get([unknown_origin.url])[0] is None
         origins = list(swh_storage.origin_get_by_sha1([sha1(unknown_origin.url)]))
         assert len(origins) == 1
         assert origins[0] is None
 
     def test_origin_search_single_result(self, swh_storage, sample_data):
         origin, origin2 = sample_data.origins[:2]
 
         actual_page = swh_storage.origin_search(origin.url)
         assert actual_page.next_page_token is None
         assert actual_page.results == []
 
         actual_page = swh_storage.origin_search(origin.url, regexp=True)
         assert actual_page.next_page_token is None
         assert actual_page.results == []
 
         swh_storage.origin_add([origin])
         actual_page = swh_storage.origin_search(origin.url)
         assert actual_page.next_page_token is None
         assert actual_page.results == [origin]
 
         actual_page = swh_storage.origin_search(f".{origin.url[1:-1]}.", regexp=True)
         assert actual_page.next_page_token is None
         assert actual_page.results == [origin]
 
         swh_storage.origin_add([origin2])
         actual_page = swh_storage.origin_search(origin2.url)
         assert actual_page.next_page_token is None
         assert actual_page.results == [origin2]
 
         actual_page = swh_storage.origin_search(f".{origin2.url[1:-1]}.", regexp=True)
         assert actual_page.next_page_token is None
         assert actual_page.results == [origin2]
 
     def test_origin_search_no_regexp(self, swh_storage, sample_data):
         origin, origin2 = sample_data.origins[:2]
         swh_storage.origin_add([origin, origin2])
 
         # no pagination
         actual_page = swh_storage.origin_search("/")
         assert actual_page.next_page_token is None
         assert actual_page.results == [origin, origin2]
 
         # offset=0
         actual_page = swh_storage.origin_search("/", page_token=None, limit=1)
         next_page_token = actual_page.next_page_token
         assert next_page_token is not None
         assert actual_page.results == [origin]
 
         # offset=1
         actual_page = swh_storage.origin_search(
             "/", page_token=next_page_token, limit=1
         )
         assert actual_page.next_page_token is None
         assert actual_page.results == [origin2]
 
     def test_origin_search_regexp_substring(self, swh_storage, sample_data):
         origin, origin2 = sample_data.origins[:2]
 
         swh_storage.origin_add([origin, origin2])
 
         # no pagination
         actual_page = swh_storage.origin_search("/", regexp=True)
         assert actual_page.next_page_token is None
         assert actual_page.results == [origin, origin2]
 
         # offset=0
         actual_page = swh_storage.origin_search(
             "/", page_token=None, limit=1, regexp=True
         )
         next_page_token = actual_page.next_page_token
         assert next_page_token is not None
         assert actual_page.results == [origin]
 
         # offset=1
         actual_page = swh_storage.origin_search(
             "/", page_token=next_page_token, limit=1, regexp=True
         )
         assert actual_page.next_page_token is None
         assert actual_page.results == [origin2]
 
     def test_origin_search_regexp_fullstring(self, swh_storage, sample_data):
         origin, origin2 = sample_data.origins[:2]
 
         swh_storage.origin_add([origin, origin2])
 
         # no pagination
         actual_page = swh_storage.origin_search(".*/.*", regexp=True)
         assert actual_page.next_page_token is None
         assert actual_page.results == [origin, origin2]
 
         # offset=0
         actual_page = swh_storage.origin_search(
             ".*/.*", page_token=None, limit=1, regexp=True
         )
         next_page_token = actual_page.next_page_token
         assert next_page_token is not None
         assert actual_page.results == [origin]
 
         # offset=1
         actual_page = swh_storage.origin_search(
             ".*/.*", page_token=next_page_token, limit=1, regexp=True
         )
         assert actual_page.next_page_token is None
         assert actual_page.results == [origin2]
 
     def test_origin_visit_add(self, swh_storage, sample_data):
         origin1 = sample_data.origins[1]
         swh_storage.origin_add([origin1])
 
         date_visit = now()
         date_visit2 = date_visit + datetime.timedelta(minutes=1)
 
         date_visit = round_to_milliseconds(date_visit)
         date_visit2 = round_to_milliseconds(date_visit2)
 
         visit1 = OriginVisit(
             origin=origin1.url, date=date_visit, type=sample_data.type_visit1,
         )
         visit2 = OriginVisit(
             origin=origin1.url, date=date_visit2, type=sample_data.type_visit2,
         )
 
         # add once
         ov1, ov2 = swh_storage.origin_visit_add([visit1, visit2])
         # then again (will be ignored as they already exist)
         origin_visit1, origin_visit2 = swh_storage.origin_visit_add([ov1, ov2])
         assert ov1 == origin_visit1
         assert ov2 == origin_visit2
 
         ovs1 = OriginVisitStatus(
             origin=origin1.url,
             visit=ov1.visit,
             date=date_visit,
             status="created",
             snapshot=None,
         )
         ovs2 = OriginVisitStatus(
             origin=origin1.url,
             visit=ov2.visit,
             date=date_visit2,
             status="created",
             snapshot=None,
         )
 
         actual_visits = swh_storage.origin_visit_get(origin1.url).results
         expected_visits = [ov1, ov2]
         assert len(expected_visits) == len(actual_visits)
         for visit in expected_visits:
             assert visit in actual_visits
 
         actual_objects = list(swh_storage.journal_writer.journal.objects)
         expected_objects = list(
             [("origin", origin1)]
             + [("origin_visit", visit) for visit in expected_visits] * 2
             + [("origin_visit_status", ovs) for ovs in [ovs1, ovs2]]
         )
 
         for obj in expected_objects:
             assert obj in actual_objects
 
     def test_origin_visit_add_validation(self, swh_storage, sample_data):
         """Unknown origin when adding visits should raise"""
         visit = attr.evolve(sample_data.origin_visit, origin="something-unknonw")
         with pytest.raises(StorageArgumentException, match="Unknown origin"):
             swh_storage.origin_visit_add([visit])
 
         objects = list(swh_storage.journal_writer.journal.objects)
         assert not objects
 
     def test_origin_visit_status_add_validation(self, swh_storage):
         """Wrong origin_visit_status input should raise storage argument error"""
         date_visit = now()
         visit_status1 = OriginVisitStatus(
             origin="unknown-origin-url",
             visit=10,
             date=date_visit,
             status="full",
             snapshot=None,
         )
         with pytest.raises(StorageArgumentException, match="Unknown origin"):
             swh_storage.origin_visit_status_add([visit_status1])
 
         objects = list(swh_storage.journal_writer.journal.objects)
         assert not objects
 
     def test_origin_visit_status_add(self, swh_storage, sample_data):
         """Correct origin visit statuses should add a new visit status
 
         """
         snapshot = sample_data.snapshot
         origin1 = sample_data.origins[1]
         origin2 = Origin(url="new-origin")
         swh_storage.origin_add([origin1, origin2])
 
         ov1, ov2 = swh_storage.origin_visit_add(
             [
                 OriginVisit(
                     origin=origin1.url,
                     date=sample_data.date_visit1,
                     type=sample_data.type_visit1,
                 ),
                 OriginVisit(
                     origin=origin2.url,
                     date=sample_data.date_visit2,
                     type=sample_data.type_visit2,
                 ),
             ]
         )
 
         ovs1 = OriginVisitStatus(
             origin=origin1.url,
             visit=ov1.visit,
             date=sample_data.date_visit1,
             status="created",
             snapshot=None,
         )
         ovs2 = OriginVisitStatus(
             origin=origin2.url,
             visit=ov2.visit,
             date=sample_data.date_visit2,
             status="created",
             snapshot=None,
         )
 
         date_visit_now = round_to_milliseconds(now())
         visit_status1 = OriginVisitStatus(
             origin=ov1.origin,
             visit=ov1.visit,
             date=date_visit_now,
             status="full",
             snapshot=snapshot.id,
         )
 
         date_visit_now = round_to_milliseconds(now())
         visit_status2 = OriginVisitStatus(
             origin=ov2.origin,
             visit=ov2.visit,
             date=date_visit_now,
             status="ongoing",
             snapshot=None,
             metadata={"intrinsic": "something"},
         )
         swh_storage.origin_visit_status_add([visit_status1, visit_status2])
 
         visit = swh_storage.origin_visit_get_latest(origin1.url, require_snapshot=True)
         visit_status = swh_storage.origin_visit_status_get_latest(
             origin1.url, visit.visit, require_snapshot=True
         )
         assert visit_status == visit_status1
 
         visit = swh_storage.origin_visit_get_latest(origin2.url, require_snapshot=False)
         visit_status = swh_storage.origin_visit_status_get_latest(
             origin2.url, visit.visit, require_snapshot=False
         )
         assert origin2.url != origin1.url
         assert visit_status == visit_status2
 
         actual_objects = list(swh_storage.journal_writer.journal.objects)
 
         expected_origins = [origin1, origin2]
         expected_visits = [ov1, ov2]
         expected_visit_statuses = [ovs1, ovs2, visit_status1, visit_status2]
 
         expected_objects = (
             [("origin", o) for o in expected_origins]
             + [("origin_visit", v) for v in expected_visits]
             + [("origin_visit_status", ovs) for ovs in expected_visit_statuses]
         )
 
         for obj in expected_objects:
             assert obj in actual_objects
 
     def test_origin_visit_status_add_twice(self, swh_storage, sample_data):
         """Correct origin visit statuses should add a new visit status
 
         """
         snapshot = sample_data.snapshot
         origin1 = sample_data.origins[1]
         swh_storage.origin_add([origin1])
         ov1 = swh_storage.origin_visit_add(
             [
                 OriginVisit(
                     origin=origin1.url,
                     date=sample_data.date_visit1,
                     type=sample_data.type_visit1,
                 ),
             ]
         )[0]
 
         ovs1 = OriginVisitStatus(
             origin=origin1.url,
             visit=ov1.visit,
             date=sample_data.date_visit1,
             status="created",
             snapshot=None,
         )
         date_visit_now = round_to_milliseconds(now())
         visit_status1 = OriginVisitStatus(
             origin=ov1.origin,
             visit=ov1.visit,
             date=date_visit_now,
             status="full",
             snapshot=snapshot.id,
         )
 
         swh_storage.origin_visit_status_add([visit_status1])
         # second call will ignore existing entries (will send to storage though)
         swh_storage.origin_visit_status_add([visit_status1])
 
         visit_status = swh_storage.origin_visit_status_get_latest(ov1.origin, ov1.visit)
         assert visit_status == visit_status1
 
         actual_objects = list(swh_storage.journal_writer.journal.objects)
 
         expected_origins = [origin1]
         expected_visits = [ov1]
         expected_visit_statuses = [ovs1, visit_status1, visit_status1]
 
         # write twice in the journal
         expected_objects = (
             [("origin", o) for o in expected_origins]
             + [("origin_visit", v) for v in expected_visits]
             + [("origin_visit_status", ovs) for ovs in expected_visit_statuses]
         )
 
         for obj in expected_objects:
             assert obj in actual_objects
 
     def test_origin_visit_find_by_date(self, swh_storage, sample_data):
         origin = sample_data.origin
         swh_storage.origin_add([origin])
         visit1 = OriginVisit(
             origin=origin.url,
             date=sample_data.date_visit2,
             type=sample_data.type_visit1,
         )
         visit2 = OriginVisit(
             origin=origin.url,
             date=sample_data.date_visit3,
             type=sample_data.type_visit2,
         )
         visit3 = OriginVisit(
             origin=origin.url,
             date=sample_data.date_visit2,
             type=sample_data.type_visit3,
         )
         ov1, ov2, ov3 = swh_storage.origin_visit_add([visit1, visit2, visit3])
 
         ovs1 = OriginVisitStatus(
             origin=origin.url,
             visit=ov1.visit,
             date=sample_data.date_visit2,
             status="ongoing",
             snapshot=None,
         )
         ovs2 = OriginVisitStatus(
             origin=origin.url,
             visit=ov2.visit,
             date=sample_data.date_visit3,
             status="ongoing",
             snapshot=None,
         )
         ovs3 = OriginVisitStatus(
             origin=origin.url,
             visit=ov3.visit,
             date=sample_data.date_visit2,
             status="ongoing",
             snapshot=None,
         )
         swh_storage.origin_visit_status_add([ovs1, ovs2, ovs3])
 
         # Simple case
         actual_visit = swh_storage.origin_visit_find_by_date(
             origin.url, sample_data.date_visit3
         )
         assert actual_visit == ov2
 
         # There are two visits at the same date, the latest must be returned
         actual_visit = swh_storage.origin_visit_find_by_date(
             origin.url, sample_data.date_visit2
         )
         assert actual_visit == ov3
 
     def test_origin_visit_find_by_date__unknown_origin(self, swh_storage, sample_data):
         actual_visit = swh_storage.origin_visit_find_by_date(
             "foo", sample_data.date_visit2
         )
         assert actual_visit is None
 
     def test_origin_visit_get_by(self, swh_storage, sample_data):
         snapshot = sample_data.snapshot
         origins = sample_data.origins[:2]
         swh_storage.origin_add(origins)
         origin_url, origin_url2 = [o.url for o in origins]
 
         visit = OriginVisit(
             origin=origin_url,
             date=sample_data.date_visit2,
             type=sample_data.type_visit2,
         )
         origin_visit1 = swh_storage.origin_visit_add([visit])[0]
 
         swh_storage.snapshot_add([snapshot])
         swh_storage.origin_visit_status_add(
             [
                 OriginVisitStatus(
                     origin=origin_url,
                     visit=origin_visit1.visit,
                     date=now(),
                     status="ongoing",
                     snapshot=snapshot.id,
                 )
             ]
         )
 
         # Add some other {origin, visit} entries
         visit2 = OriginVisit(
             origin=origin_url,
             date=sample_data.date_visit3,
             type=sample_data.type_visit3,
         )
         visit3 = OriginVisit(
             origin=origin_url2,
             date=sample_data.date_visit3,
             type=sample_data.type_visit3,
         )
         swh_storage.origin_visit_add([visit2, visit3])
 
         # when
         visit1_metadata = {
             "contents": 42,
             "directories": 22,
         }
 
         swh_storage.origin_visit_status_add(
             [
                 OriginVisitStatus(
                     origin=origin_url,
                     visit=origin_visit1.visit,
                     date=now(),
                     status="full",
                     snapshot=snapshot.id,
                     metadata=visit1_metadata,
                 )
             ]
         )
 
         actual_visit = swh_storage.origin_visit_get_by(origin_url, origin_visit1.visit)
         assert actual_visit == origin_visit1
 
     def test_origin_visit_get_by__no_result(self, swh_storage, sample_data):
         actual_visit = swh_storage.origin_visit_get_by("unknown", 10)  # unknown origin
         assert actual_visit is None
 
         origin = sample_data.origin
         swh_storage.origin_add([origin])
         actual_visit = swh_storage.origin_visit_get_by(origin.url, 999)  # unknown visit
         assert actual_visit is None
 
     def test_origin_visit_get_latest_edge_cases(self, swh_storage, sample_data):
         # unknown origin so no result
         assert swh_storage.origin_visit_get_latest("unknown-origin") is None
 
         # unknown type so no result
         origin = sample_data.origin
         swh_storage.origin_add([origin])
         assert swh_storage.origin_visit_get_latest(origin.url, type="unknown") is None
 
         # unknown allowed statuses should raise
         with pytest.raises(StorageArgumentException, match="Unknown allowed statuses"):
             swh_storage.origin_visit_get_latest(
                 origin.url, allowed_statuses=["unknown"]
             )
 
     def test_origin_visit_get_latest_filter_type(self, swh_storage, sample_data):
         """Filtering origin visit get latest with filter type should be ok
 
         """
         origin = sample_data.origin
         swh_storage.origin_add([origin])
         visit1 = OriginVisit(
             origin=origin.url, date=sample_data.date_visit1, type="git",
         )
         visit2 = OriginVisit(
             origin=origin.url, date=sample_data.date_visit2, type="hg",
         )
         date_now = round_to_milliseconds(now())
         visit3 = OriginVisit(origin=origin.url, date=date_now, type="hg",)
         assert sample_data.date_visit1 < sample_data.date_visit2
         assert sample_data.date_visit2 < date_now
 
         ov1, ov2, ov3 = swh_storage.origin_visit_add([visit1, visit2, visit3])
 
         # Check type filter is ok
         actual_visit = swh_storage.origin_visit_get_latest(origin.url, type="git")
         assert actual_visit == ov1
         actual_visit = swh_storage.origin_visit_get_latest(origin.url, type="hg")
         assert actual_visit == ov3
         actual_visit_unknown_type = swh_storage.origin_visit_get_latest(
             origin.url, type="npm",  # no visit matching that type
         )
         assert actual_visit_unknown_type is None
 
     def test_origin_visit_get_latest(self, swh_storage, sample_data):
         empty_snapshot, complete_snapshot = sample_data.snapshots[1:3]
         origin = sample_data.origin
 
         swh_storage.origin_add([origin])
         visit1 = OriginVisit(
             origin=origin.url, date=sample_data.date_visit1, type="git",
         )
         visit2 = OriginVisit(
             origin=origin.url, date=sample_data.date_visit2, type="hg",
         )
         date_now = round_to_milliseconds(now())
         visit3 = OriginVisit(origin=origin.url, date=date_now, type="hg",)
         assert visit1.date < visit2.date
         assert visit2.date < visit3.date
 
         ov1, ov2, ov3 = swh_storage.origin_visit_add([visit1, visit2, visit3])
 
         # no filters, latest visit is the last one (whose date is most recent)
         actual_visit = swh_storage.origin_visit_get_latest(origin.url)
         assert actual_visit == ov3
 
         # 3 visits, none has snapshot so nothing is returned
         actual_visit = swh_storage.origin_visit_get_latest(
             origin.url, require_snapshot=True
         )
         assert actual_visit is None
 
         # visit are created with "created" status, so nothing will get returned
         actual_visit = swh_storage.origin_visit_get_latest(
             origin.url, allowed_statuses=["partial"]
         )
         assert actual_visit is None
 
         # visit are created with "created" status, so most recent again
         actual_visit = swh_storage.origin_visit_get_latest(
             origin.url, allowed_statuses=["created"]
         )
         assert actual_visit == ov3
 
         # Add snapshot to visit1; require_snapshot=True makes it return first visit
         swh_storage.snapshot_add([complete_snapshot])
         visit_status_with_snapshot = OriginVisitStatus(
             origin=origin.url,
             visit=ov1.visit,
             date=round_to_milliseconds(now()),
             status="ongoing",
             snapshot=complete_snapshot.id,
         )
         swh_storage.origin_visit_status_add([visit_status_with_snapshot])
         # only the first visit has a snapshot now
         actual_visit = swh_storage.origin_visit_get_latest(
             origin.url, require_snapshot=True
         )
         assert actual_visit == ov1
 
         # only the first visit has a status ongoing now
         actual_visit = swh_storage.origin_visit_get_latest(
             origin.url, allowed_statuses=["ongoing"]
         )
         assert actual_visit == ov1
 
         actual_visit_status = swh_storage.origin_visit_status_get_latest(
             origin.url, ov1.visit, require_snapshot=True
         )
         assert actual_visit_status == visit_status_with_snapshot
 
         # ... and require_snapshot=False (defaults) still returns latest visit (3rd)
         actual_visit = swh_storage.origin_visit_get_latest(
             origin.url, require_snapshot=False
         )
         assert actual_visit == ov3
         # no specific filter, this returns as before the latest visit
         actual_visit = swh_storage.origin_visit_get_latest(origin.url)
         assert actual_visit == ov3
 
         # Status filter: all three visits are status=ongoing, so no visit
         # returned
         actual_visit = swh_storage.origin_visit_get_latest(
             origin.url, allowed_statuses=["full"]
         )
         assert actual_visit is None
 
         visit_status1_full = OriginVisitStatus(
             origin=origin.url,
             visit=ov1.visit,
             date=round_to_milliseconds(now()),
             status="full",
             snapshot=complete_snapshot.id,
         )
         # Mark the first visit as completed and check status filter again
         swh_storage.origin_visit_status_add([visit_status1_full])
 
         # only the first visit has the full status
         actual_visit = swh_storage.origin_visit_get_latest(
             origin.url, allowed_statuses=["full"]
         )
         assert actual_visit == ov1
 
         actual_visit_status = swh_storage.origin_visit_status_get_latest(
             origin.url, ov1.visit, allowed_statuses=["full"]
         )
         assert actual_visit_status == visit_status1_full
 
         # no specific filter, this returns as before the latest visit
         actual_visit = swh_storage.origin_visit_get_latest(origin.url)
         assert actual_visit == ov3
 
         # Add snapshot to visit2 and check that the new snapshot is returned
         swh_storage.snapshot_add([empty_snapshot])
 
         visit_status2_full = OriginVisitStatus(
             origin=origin.url,
             visit=ov2.visit,
             date=round_to_milliseconds(now()),
             status="ongoing",
             snapshot=empty_snapshot.id,
         )
         swh_storage.origin_visit_status_add([visit_status2_full])
         actual_visit = swh_storage.origin_visit_get_latest(
             origin.url, require_snapshot=True
         )
         # 2nd visit is most recent with a snapshot
         assert actual_visit == ov2
         actual_visit_status = swh_storage.origin_visit_status_get_latest(
             origin.url, ov2.visit, require_snapshot=True
         )
         assert actual_visit_status == visit_status2_full
 
         # no specific filter, this returns as before the latest visit, 3rd one
         actual_origin = swh_storage.origin_visit_get_latest(origin.url)
         assert actual_origin == ov3
 
         # full status is still the first visit
         actual_visit = swh_storage.origin_visit_get_latest(
             origin.url, allowed_statuses=["full"]
         )
         assert actual_visit == ov1
 
         # Add snapshot to visit3 (same date as visit2)
         visit_status3_with_snapshot = OriginVisitStatus(
             origin=origin.url,
             visit=ov3.visit,
             date=round_to_milliseconds(now()),
             status="ongoing",
             snapshot=complete_snapshot.id,
         )
         swh_storage.origin_visit_status_add([visit_status3_with_snapshot])
 
         # full status is still the first visit
         actual_visit = swh_storage.origin_visit_get_latest(
             origin.url, allowed_statuses=["full"], require_snapshot=True,
         )
         assert actual_visit == ov1
 
         actual_visit_status = swh_storage.origin_visit_status_get_latest(
             origin.url,
             visit=actual_visit.visit,
             allowed_statuses=["full"],
             require_snapshot=True,
         )
         assert actual_visit_status == visit_status1_full
 
         # most recent is still the 3rd visit
         actual_visit = swh_storage.origin_visit_get_latest(origin.url)
         assert actual_visit == ov3
 
         # 3rd visit has a snapshot now, so it's elected
         actual_visit = swh_storage.origin_visit_get_latest(
             origin.url, require_snapshot=True
         )
         assert actual_visit == ov3
 
         actual_visit_status = swh_storage.origin_visit_status_get_latest(
             origin.url, ov3.visit, require_snapshot=True
         )
         assert actual_visit_status == visit_status3_with_snapshot
 
     def test_origin_visit_get_latest__same_date(self, swh_storage, sample_data):
         empty_snapshot, complete_snapshot = sample_data.snapshots[1:3]
         origin = sample_data.origin
 
         swh_storage.origin_add([origin])
         visit1 = OriginVisit(
             origin=origin.url, date=sample_data.date_visit1, type="git",
         )
         visit2 = OriginVisit(
             origin=origin.url, date=sample_data.date_visit1, type="hg",
         )
 
         ov1, ov2 = swh_storage.origin_visit_add([visit1, visit2])
 
         # ties should be broken by using the visit id
         actual_visit = swh_storage.origin_visit_get_latest(origin.url)
         assert actual_visit == ov2
 
     def test_origin_visit_get_latest__not_last(self, swh_storage, sample_data):
         origin = sample_data.origin
         swh_storage.origin_add([origin])
 
         visit1, visit2 = sample_data.origin_visits[:2]
         assert visit1.origin == origin.url
 
         swh_storage.origin_visit_add([visit1])
         ov1 = swh_storage.origin_visit_get_latest(origin.url)
 
         # Add snapshot to visit1, latest snapshot = visit 1 snapshot
         complete_snapshot = sample_data.snapshots[2]
         swh_storage.snapshot_add([complete_snapshot])
 
         swh_storage.origin_visit_status_add(
             [
                 OriginVisitStatus(
                     origin=origin.url,
                     visit=ov1.visit,
                     date=visit2.date,
                     status="partial",
                     snapshot=None,
                 )
             ]
         )
         assert visit1.date < visit2.date
 
         # no snapshot associated to the visit, so None
         visit = swh_storage.origin_visit_get_latest(
             origin.url, allowed_statuses=["partial"], require_snapshot=True,
         )
         assert visit is None
 
         date_now = now()
         assert visit2.date < date_now
         swh_storage.origin_visit_status_add(
             [
                 OriginVisitStatus(
                     origin=origin.url,
                     visit=ov1.visit,
                     date=date_now,
                     status="full",
                     snapshot=complete_snapshot.id,
                 )
             ]
         )
 
         swh_storage.origin_visit_add(
             [OriginVisit(origin=origin.url, date=now(), type=visit1.type,)]
         )
 
         visit = swh_storage.origin_visit_get_latest(origin.url, require_snapshot=True)
         assert visit is not None
 
     def test_origin_visit_status_get_latest__validation(self, swh_storage, sample_data):
         origin = sample_data.origin
         swh_storage.origin_add([origin])
         visit1 = OriginVisit(
             origin=origin.url, date=sample_data.date_visit1, type="git",
         )
 
         # unknown allowed statuses should raise
         with pytest.raises(StorageArgumentException, match="Unknown allowed statuses"):
             swh_storage.origin_visit_status_get_latest(
                 origin.url, visit1.visit, allowed_statuses=["unknown"]
             )
 
     def test_origin_visit_status_get_latest(self, swh_storage, sample_data):
         snapshot = sample_data.snapshots[2]
         origin1 = sample_data.origin
         swh_storage.origin_add([origin1])
 
         # to have some reference visits
 
         ov1, ov2 = swh_storage.origin_visit_add(
             [
                 OriginVisit(
                     origin=origin1.url,
                     date=sample_data.date_visit1,
                     type=sample_data.type_visit1,
                 ),
                 OriginVisit(
                     origin=origin1.url,
                     date=sample_data.date_visit2,
                     type=sample_data.type_visit2,
                 ),
             ]
         )
         swh_storage.snapshot_add([snapshot])
 
         date_now = round_to_milliseconds(now())
         assert sample_data.date_visit1 < sample_data.date_visit2
         assert sample_data.date_visit2 < date_now
 
         ovs1 = OriginVisitStatus(
             origin=origin1.url,
             visit=ov1.visit,
             date=sample_data.date_visit1,
             status="partial",
             snapshot=None,
         )
         ovs2 = OriginVisitStatus(
             origin=origin1.url,
             visit=ov1.visit,
             date=sample_data.date_visit2,
             status="ongoing",
             snapshot=None,
         )
         ovs3 = OriginVisitStatus(
             origin=origin1.url,
             visit=ov2.visit,
             date=sample_data.date_visit2
             + datetime.timedelta(minutes=1),  # to not be ignored
             status="ongoing",
             snapshot=None,
         )
         ovs4 = OriginVisitStatus(
             origin=origin1.url,
             visit=ov2.visit,
             date=date_now,
             status="full",
             snapshot=snapshot.id,
             metadata={"something": "wicked"},
         )
 
         swh_storage.origin_visit_status_add([ovs1, ovs2, ovs3, ovs4])
 
         # unknown origin so no result
         actual_origin_visit = swh_storage.origin_visit_status_get_latest(
             "unknown-origin", ov1.visit
         )
         assert actual_origin_visit is None
 
         # unknown visit so no result
         actual_origin_visit = swh_storage.origin_visit_status_get_latest(
             ov1.origin, ov1.visit + 10
         )
         assert actual_origin_visit is None
 
         # Two visits, both with no snapshot, take the most recent
         actual_origin_visit2 = swh_storage.origin_visit_status_get_latest(
             origin1.url, ov1.visit
         )
         assert isinstance(actual_origin_visit2, OriginVisitStatus)
         assert actual_origin_visit2 == ovs2
         assert ovs2.origin == origin1.url
         assert ovs2.visit == ov1.visit
 
         actual_origin_visit = swh_storage.origin_visit_status_get_latest(
             origin1.url, ov1.visit, require_snapshot=True
         )
         # there is no visit with snapshot yet for that visit
         assert actual_origin_visit is None
 
         actual_origin_visit2 = swh_storage.origin_visit_status_get_latest(
             origin1.url, ov1.visit, allowed_statuses=["partial", "ongoing"]
         )
         # visit status with partial status visit elected
         assert actual_origin_visit2 == ovs2
         assert actual_origin_visit2.status == "ongoing"
 
         actual_origin_visit4 = swh_storage.origin_visit_status_get_latest(
             origin1.url, ov2.visit, require_snapshot=True
         )
         assert actual_origin_visit4 == ovs4
         assert actual_origin_visit4.snapshot == snapshot.id
 
         actual_origin_visit = swh_storage.origin_visit_status_get_latest(
             origin1.url, ov2.visit, require_snapshot=True, allowed_statuses=["ongoing"]
         )
         # nothing matches so nothing
         assert actual_origin_visit is None  # there is no visit with status full
 
         actual_origin_visit3 = swh_storage.origin_visit_status_get_latest(
             origin1.url, ov2.visit, allowed_statuses=["ongoing"]
         )
         assert actual_origin_visit3 == ovs3
 
     def test_person_fullname_unicity(self, swh_storage, sample_data):
         revision, rev2 = sample_data.revisions[0:2]
         # create a revision with same committer fullname but wo name and email
         revision2 = attr.evolve(
             rev2,
             committer=Person(
                 fullname=revision.committer.fullname, name=None, email=None
             ),
         )
 
         swh_storage.revision_add([revision, revision2])
 
         # when getting added revisions
         revisions = swh_storage.revision_get([revision.id, revision2.id])
 
         # then check committers are the same
         assert revisions[0].committer == revisions[1].committer
 
     def test_snapshot_add_get_empty(self, swh_storage, sample_data):
         empty_snapshot = sample_data.snapshots[1]
         empty_snapshot_dict = empty_snapshot.to_dict()
 
         origin = sample_data.origin
         swh_storage.origin_add([origin])
         ov1 = swh_storage.origin_visit_add(
             [
                 OriginVisit(
                     origin=origin.url,
                     date=sample_data.date_visit1,
                     type=sample_data.type_visit1,
                 )
             ]
         )[0]
 
         actual_result = swh_storage.snapshot_add([empty_snapshot])
         assert actual_result == {"snapshot:add": 1}
 
         date_now = now()
 
         swh_storage.origin_visit_status_add(
             [
                 OriginVisitStatus(
                     origin=origin.url,
                     visit=ov1.visit,
                     date=date_now,
                     status="full",
                     snapshot=empty_snapshot.id,
                 )
             ]
         )
 
         by_id = swh_storage.snapshot_get(empty_snapshot.id)
         assert by_id == {**empty_snapshot_dict, "next_branch": None}
 
         ovs1 = OriginVisitStatus.from_dict(
             {
                 "origin": origin.url,
                 "date": sample_data.date_visit1,
                 "visit": ov1.visit,
                 "status": "created",
                 "snapshot": None,
                 "metadata": None,
             }
         )
         ovs2 = OriginVisitStatus.from_dict(
             {
                 "origin": origin.url,
                 "date": date_now,
                 "visit": ov1.visit,
                 "status": "full",
                 "metadata": None,
                 "snapshot": empty_snapshot.id,
             }
         )
         actual_objects = list(swh_storage.journal_writer.journal.objects)
 
         expected_objects = [
             ("origin", origin),
             ("origin_visit", ov1),
             ("origin_visit_status", ovs1,),
             ("snapshot", empty_snapshot),
             ("origin_visit_status", ovs2,),
         ]
         for obj in expected_objects:
             assert obj in actual_objects
 
     def test_snapshot_add_get_complete(self, swh_storage, sample_data):
         complete_snapshot = sample_data.snapshots[2]
         complete_snapshot_dict = complete_snapshot.to_dict()
         origin = sample_data.origin
 
         swh_storage.origin_add([origin])
         visit = OriginVisit(
             origin=origin.url,
             date=sample_data.date_visit1,
             type=sample_data.type_visit1,
         )
         origin_visit1 = swh_storage.origin_visit_add([visit])[0]
 
         actual_result = swh_storage.snapshot_add([complete_snapshot])
         swh_storage.origin_visit_status_add(
             [
                 OriginVisitStatus(
                     origin=origin.url,
                     visit=origin_visit1.visit,
                     date=now(),
                     status="ongoing",
                     snapshot=complete_snapshot.id,
                 )
             ]
         )
         assert actual_result == {"snapshot:add": 1}
 
         by_id = swh_storage.snapshot_get(complete_snapshot.id)
         assert by_id == {**complete_snapshot_dict, "next_branch": None}
 
     def test_snapshot_add_many(self, swh_storage, sample_data):
         snapshot, _, complete_snapshot = sample_data.snapshots[:3]
 
         actual_result = swh_storage.snapshot_add([snapshot, complete_snapshot])
         assert actual_result == {"snapshot:add": 2}
 
         assert swh_storage.snapshot_get(complete_snapshot.id) == {
             **complete_snapshot.to_dict(),
             "next_branch": None,
         }
 
         assert swh_storage.snapshot_get(snapshot.id) == {
             **snapshot.to_dict(),
             "next_branch": None,
         }
 
         swh_storage.refresh_stat_counters()
         assert swh_storage.stat_counters()["snapshot"] == 2
 
     def test_snapshot_add_many_incremental(self, swh_storage, sample_data):
         snapshot, _, complete_snapshot = sample_data.snapshots[:3]
 
         actual_result = swh_storage.snapshot_add([complete_snapshot])
         assert actual_result == {"snapshot:add": 1}
 
         actual_result2 = swh_storage.snapshot_add([snapshot, complete_snapshot])
         assert actual_result2 == {"snapshot:add": 1}
 
         assert swh_storage.snapshot_get(complete_snapshot.id) == {
             **complete_snapshot.to_dict(),
             "next_branch": None,
         }
 
         assert swh_storage.snapshot_get(snapshot.id) == {
             **snapshot.to_dict(),
             "next_branch": None,
         }
 
     def test_snapshot_add_twice(self, swh_storage, sample_data):
         snapshot, empty_snapshot = sample_data.snapshots[:2]
 
         actual_result = swh_storage.snapshot_add([empty_snapshot])
         assert actual_result == {"snapshot:add": 1}
 
         assert list(swh_storage.journal_writer.journal.objects) == [
             ("snapshot", empty_snapshot)
         ]
 
         actual_result = swh_storage.snapshot_add([snapshot])
         assert actual_result == {"snapshot:add": 1}
 
         assert list(swh_storage.journal_writer.journal.objects) == [
             ("snapshot", empty_snapshot),
             ("snapshot", snapshot),
         ]
 
     def test_snapshot_add_count_branches(self, swh_storage, sample_data):
         complete_snapshot = sample_data.snapshots[2]
 
         actual_result = swh_storage.snapshot_add([complete_snapshot])
         assert actual_result == {"snapshot:add": 1}
 
         snp_size = swh_storage.snapshot_count_branches(complete_snapshot.id)
 
         expected_snp_size = {
             "alias": 1,
             "content": 1,
             "directory": 2,
             "release": 1,
             "revision": 1,
             "snapshot": 1,
             None: 1,
         }
         assert snp_size == expected_snp_size
 
     def test_snapshot_add_get_paginated(self, swh_storage, sample_data):
         complete_snapshot = sample_data.snapshots[2]
 
         swh_storage.snapshot_add([complete_snapshot])
 
         snp_id = complete_snapshot.id
         branches = complete_snapshot.branches
         branch_names = list(sorted(branches))
 
         # Test branch_from
         snapshot = swh_storage.snapshot_get_branches(snp_id, branches_from=b"release")
 
         rel_idx = branch_names.index(b"release")
         expected_snapshot = {
             "id": snp_id,
             "branches": {name: branches[name] for name in branch_names[rel_idx:]},
             "next_branch": None,
         }
 
         assert snapshot == expected_snapshot
 
         # Test branches_count
         snapshot = swh_storage.snapshot_get_branches(snp_id, branches_count=1)
 
         expected_snapshot = {
             "id": snp_id,
             "branches": {branch_names[0]: branches[branch_names[0]],},
             "next_branch": b"content",
         }
         assert snapshot == expected_snapshot
 
         # test branch_from + branches_count
 
         snapshot = swh_storage.snapshot_get_branches(
             snp_id, branches_from=b"directory", branches_count=3
         )
 
         dir_idx = branch_names.index(b"directory")
         expected_snapshot = {
             "id": snp_id,
             "branches": {
                 name: branches[name] for name in branch_names[dir_idx : dir_idx + 3]
             },
             "next_branch": branch_names[dir_idx + 3],
         }
 
         assert snapshot == expected_snapshot
 
     def test_snapshot_add_get_filtered(self, swh_storage, sample_data):
         origin = sample_data.origin
         complete_snapshot = sample_data.snapshots[2]
 
         swh_storage.origin_add([origin])
         visit = OriginVisit(
             origin=origin.url,
             date=sample_data.date_visit1,
             type=sample_data.type_visit1,
         )
         origin_visit1 = swh_storage.origin_visit_add([visit])[0]
 
         swh_storage.snapshot_add([complete_snapshot])
         swh_storage.origin_visit_status_add(
             [
                 OriginVisitStatus(
                     origin=origin.url,
                     visit=origin_visit1.visit,
                     date=now(),
                     status="ongoing",
                     snapshot=complete_snapshot.id,
                 )
             ]
         )
 
         snp_id = complete_snapshot.id
         branches = complete_snapshot.branches
 
         snapshot = swh_storage.snapshot_get_branches(
             snp_id, target_types=["release", "revision"]
         )
 
         expected_snapshot = {
             "id": snp_id,
             "branches": {
                 name: tgt
                 for name, tgt in branches.items()
                 if tgt and tgt.target_type in [TargetType.RELEASE, TargetType.REVISION]
             },
             "next_branch": None,
         }
 
         assert snapshot == expected_snapshot
 
         snapshot = swh_storage.snapshot_get_branches(snp_id, target_types=["alias"])
 
         expected_snapshot = {
             "id": snp_id,
             "branches": {
                 name: tgt
                 for name, tgt in branches.items()
                 if tgt and tgt.target_type == TargetType.ALIAS
             },
             "next_branch": None,
         }
 
         assert snapshot == expected_snapshot
 
     def test_snapshot_add_get_filtered_and_paginated(self, swh_storage, sample_data):
         complete_snapshot = sample_data.snapshots[2]
 
         swh_storage.snapshot_add([complete_snapshot])
 
         snp_id = complete_snapshot.id
         branches = complete_snapshot.branches
         branch_names = list(sorted(branches))
 
         # Test branch_from
 
         snapshot = swh_storage.snapshot_get_branches(
             snp_id, target_types=["directory", "release"], branches_from=b"directory2"
         )
 
         expected_snapshot = {
             "id": snp_id,
             "branches": {name: branches[name] for name in (b"directory2", b"release")},
             "next_branch": None,
         }
 
         assert snapshot == expected_snapshot
 
         # Test branches_count
 
         snapshot = swh_storage.snapshot_get_branches(
             snp_id, target_types=["directory", "release"], branches_count=1
         )
 
         expected_snapshot = {
             "id": snp_id,
             "branches": {b"directory": branches[b"directory"]},
             "next_branch": b"directory2",
         }
         assert snapshot == expected_snapshot
 
         # Test branches_count
 
         snapshot = swh_storage.snapshot_get_branches(
             snp_id, target_types=["directory", "release"], branches_count=2
         )
 
         expected_snapshot = {
             "id": snp_id,
             "branches": {
                 name: branches[name] for name in (b"directory", b"directory2")
             },
             "next_branch": b"release",
         }
         assert snapshot == expected_snapshot
 
         # test branch_from + branches_count
 
         snapshot = swh_storage.snapshot_get_branches(
             snp_id,
             target_types=["directory", "release"],
             branches_from=b"directory2",
             branches_count=1,
         )
 
         dir_idx = branch_names.index(b"directory2")
         expected_snapshot = {
             "id": snp_id,
             "branches": {branch_names[dir_idx]: branches[branch_names[dir_idx]],},
             "next_branch": b"release",
         }
 
         assert snapshot == expected_snapshot
 
     def test_snapshot_add_get_branch_by_type(self, swh_storage, sample_data):
         complete_snapshot = sample_data.snapshots[2]
         snapshot = complete_snapshot.to_dict()
 
         alias1 = b"alias1"
         alias2 = b"alias2"
         target1 = random.choice(list(snapshot["branches"].keys()))
         target2 = random.choice(list(snapshot["branches"].keys()))
 
         snapshot["branches"][alias2] = {
             "target": target2,
             "target_type": "alias",
         }
 
         snapshot["branches"][alias1] = {
             "target": target1,
             "target_type": "alias",
         }
 
         new_snapshot = Snapshot.from_dict(snapshot)
         swh_storage.snapshot_add([new_snapshot])
 
         branches = swh_storage.snapshot_get_branches(
             new_snapshot.id,
             target_types=["alias"],
             branches_from=alias1,
             branches_count=1,
         )["branches"]
 
         assert len(branches) == 1
         assert alias1 in branches
 
     def test_snapshot_add_get(self, swh_storage, sample_data):
         snapshot = sample_data.snapshot
         origin = sample_data.origin
 
         swh_storage.origin_add([origin])
         visit = OriginVisit(
             origin=origin.url,
             date=sample_data.date_visit1,
             type=sample_data.type_visit1,
         )
         ov1 = swh_storage.origin_visit_add([visit])[0]
 
         swh_storage.snapshot_add([snapshot])
         swh_storage.origin_visit_status_add(
             [
                 OriginVisitStatus(
                     origin=origin.url,
                     visit=ov1.visit,
                     date=now(),
                     status="ongoing",
                     snapshot=snapshot.id,
                 )
             ]
         )
 
         expected_snapshot = {**snapshot.to_dict(), "next_branch": None}
 
         by_id = swh_storage.snapshot_get(snapshot.id)
         assert by_id == expected_snapshot
 
         actual_visit = swh_storage.origin_visit_get_by(origin.url, ov1.visit)
         assert actual_visit == ov1
 
         visit_status = swh_storage.origin_visit_status_get_latest(
             origin.url, ov1.visit, require_snapshot=True
         )
         assert visit_status.snapshot == snapshot.id
 
     def test_snapshot_get_random(self, swh_storage, sample_data):
         snapshot, empty_snapshot, complete_snapshot = sample_data.snapshots[:3]
         swh_storage.snapshot_add([snapshot, empty_snapshot, complete_snapshot])
 
         assert swh_storage.snapshot_get_random() in {
             snapshot.id,
             empty_snapshot.id,
             complete_snapshot.id,
         }
 
     def test_snapshot_missing(self, swh_storage, sample_data):
         snapshot, missing_snapshot = sample_data.snapshots[:2]
         snapshots = [snapshot.id, missing_snapshot.id]
         swh_storage.snapshot_add([snapshot])
 
         missing_snapshots = swh_storage.snapshot_missing(snapshots)
 
         assert list(missing_snapshots) == [missing_snapshot.id]
 
     def test_stat_counters(self, swh_storage, sample_data):
         origin = sample_data.origin
         snapshot = sample_data.snapshot
         revision = sample_data.revision
         release = sample_data.release
         directory = sample_data.directory
         content = sample_data.content
 
         expected_keys = ["content", "directory", "origin", "revision"]
 
         # Initially, all counters are 0
 
         swh_storage.refresh_stat_counters()
         counters = swh_storage.stat_counters()
         assert set(expected_keys) <= set(counters)
         for key in expected_keys:
             assert counters[key] == 0
 
         # Add a content. Only the content counter should increase.
 
         swh_storage.content_add([content])
 
         swh_storage.refresh_stat_counters()
         counters = swh_storage.stat_counters()
 
         assert set(expected_keys) <= set(counters)
         for key in expected_keys:
             if key != "content":
                 assert counters[key] == 0
         assert counters["content"] == 1
 
         # Add other objects. Check their counter increased as well.
 
         swh_storage.origin_add([origin])
         visit = OriginVisit(
             origin=origin.url,
             date=sample_data.date_visit2,
             type=sample_data.type_visit2,
         )
         origin_visit1 = swh_storage.origin_visit_add([visit])[0]
 
         swh_storage.snapshot_add([snapshot])
         swh_storage.origin_visit_status_add(
             [
                 OriginVisitStatus(
                     origin=origin.url,
                     visit=origin_visit1.visit,
                     date=now(),
                     status="ongoing",
                     snapshot=snapshot.id,
                 )
             ]
         )
         swh_storage.directory_add([directory])
         swh_storage.revision_add([revision])
         swh_storage.release_add([release])
 
         swh_storage.refresh_stat_counters()
         counters = swh_storage.stat_counters()
         assert counters["content"] == 1
         assert counters["directory"] == 1
         assert counters["snapshot"] == 1
         assert counters["origin"] == 1
         assert counters["origin_visit"] == 1
         assert counters["revision"] == 1
         assert counters["release"] == 1
         assert counters["snapshot"] == 1
         if "person" in counters:
             assert counters["person"] == 3
 
     def test_content_find_ctime(self, swh_storage, sample_data):
         origin_content = sample_data.content
         ctime = round_to_milliseconds(now())
         content = attr.evolve(origin_content, data=None, ctime=ctime)
         swh_storage.content_add_metadata([content])
 
         actually_present = swh_storage.content_find({"sha1": content.sha1})
         assert actually_present[0] == content
         assert actually_present[0].ctime is not None
         assert actually_present[0].ctime.tzinfo is not None
 
     def test_content_find_with_present_content(self, swh_storage, sample_data):
         content = sample_data.content
         expected_content = attr.evolve(content, data=None)
 
         # 1. with something to find
         swh_storage.content_add([content])
 
         actually_present = swh_storage.content_find({"sha1": content.sha1})
         assert 1 == len(actually_present)
         assert actually_present[0] == expected_content
 
         # 2. with something to find
         actually_present = swh_storage.content_find({"sha1_git": content.sha1_git})
         assert 1 == len(actually_present)
         assert actually_present[0] == expected_content
 
         # 3. with something to find
         actually_present = swh_storage.content_find({"sha256": content.sha256})
         assert 1 == len(actually_present)
         assert actually_present[0] == expected_content
 
         # 4. with something to find
         actually_present = swh_storage.content_find(content.hashes())
         assert 1 == len(actually_present)
         assert actually_present[0] == expected_content
 
     def test_content_find_with_non_present_content(self, swh_storage, sample_data):
         missing_content = sample_data.skipped_content
         # 1. with something that does not exist
         actually_present = swh_storage.content_find({"sha1": missing_content.sha1})
         assert actually_present == []
 
         # 2. with something that does not exist
         actually_present = swh_storage.content_find(
             {"sha1_git": missing_content.sha1_git}
         )
         assert actually_present == []
 
         # 3. with something that does not exist
         actually_present = swh_storage.content_find({"sha256": missing_content.sha256})
         assert actually_present == []
 
     def test_content_find_with_duplicate_input(self, swh_storage, sample_data):
         content = sample_data.content
 
         # Create fake data with colliding sha256 and blake2s256
         sha1_array = bytearray(content.sha1)
         sha1_array[0] += 1
         sha1git_array = bytearray(content.sha1_git)
         sha1git_array[0] += 1
         duplicated_content = attr.evolve(
             content, sha1=bytes(sha1_array), sha1_git=bytes(sha1git_array)
         )
 
         # Inject the data
         swh_storage.content_add([content, duplicated_content])
 
         actual_result = swh_storage.content_find(
             {
                 "blake2s256": duplicated_content.blake2s256,
                 "sha256": duplicated_content.sha256,
             }
         )
 
         expected_content = attr.evolve(content, data=None)
         expected_duplicated_content = attr.evolve(duplicated_content, data=None)
 
         for result in actual_result:
             assert result in [expected_content, expected_duplicated_content]
 
     def test_content_find_with_duplicate_sha256(self, swh_storage, sample_data):
         content = sample_data.content
 
         hashes = {}
         # Create fake data with colliding sha256
         for hashalgo in ("sha1", "sha1_git", "blake2s256"):
             value = bytearray(getattr(content, hashalgo))
             value[0] += 1
             hashes[hashalgo] = bytes(value)
 
         duplicated_content = attr.evolve(
             content,
             sha1=hashes["sha1"],
             sha1_git=hashes["sha1_git"],
             blake2s256=hashes["blake2s256"],
         )
         swh_storage.content_add([content, duplicated_content])
 
         actual_result = swh_storage.content_find({"sha256": duplicated_content.sha256})
         assert len(actual_result) == 2
 
         expected_content = attr.evolve(content, data=None)
         expected_duplicated_content = attr.evolve(duplicated_content, data=None)
 
         for result in actual_result:
             assert result in [expected_content, expected_duplicated_content]
 
         # Find with both sha256 and blake2s256
         actual_result = swh_storage.content_find(
             {
                 "sha256": duplicated_content.sha256,
                 "blake2s256": duplicated_content.blake2s256,
             }
         )
 
         assert len(actual_result) == 1
         assert actual_result == [expected_duplicated_content]
 
     def test_content_find_with_duplicate_blake2s256(self, swh_storage, sample_data):
         content = sample_data.content
 
         # Create fake data with colliding sha256 and blake2s256
         sha1_array = bytearray(content.sha1)
         sha1_array[0] += 1
         sha1git_array = bytearray(content.sha1_git)
         sha1git_array[0] += 1
         sha256_array = bytearray(content.sha256)
         sha256_array[0] += 1
 
         duplicated_content = attr.evolve(
             content,
             sha1=bytes(sha1_array),
             sha1_git=bytes(sha1git_array),
             sha256=bytes(sha256_array),
         )
 
         swh_storage.content_add([content, duplicated_content])
 
         actual_result = swh_storage.content_find(
             {"blake2s256": duplicated_content.blake2s256}
         )
 
         expected_content = attr.evolve(content, data=None)
         expected_duplicated_content = attr.evolve(duplicated_content, data=None)
 
         for result in actual_result:
             assert result in [expected_content, expected_duplicated_content]
 
         # Find with both sha256 and blake2s256
         actual_result = swh_storage.content_find(
             {
                 "sha256": duplicated_content.sha256,
                 "blake2s256": duplicated_content.blake2s256,
             }
         )
 
         assert actual_result == [expected_duplicated_content]
 
     def test_content_find_bad_input(self, swh_storage):
         # 1. with no hash to lookup
         with pytest.raises(StorageArgumentException):
             swh_storage.content_find({})  # need at least one hash
 
         # 2. with bad hash
         with pytest.raises(StorageArgumentException):
             swh_storage.content_find({"unknown-sha1": "something"})  # not the right key
 
     def test_object_find_by_sha1_git(self, swh_storage, sample_data):
         content = sample_data.content
         directory = sample_data.directory
         revision = sample_data.revision
         release = sample_data.release
 
         sha1_gits = [b"00000000000000000000"]
         expected = {
             b"00000000000000000000": [],
         }
 
         swh_storage.content_add([content])
         sha1_gits.append(content.sha1_git)
 
         expected[content.sha1_git] = [
             {"sha1_git": content.sha1_git, "type": "content",}
         ]
 
         swh_storage.directory_add([directory])
         sha1_gits.append(directory.id)
         expected[directory.id] = [{"sha1_git": directory.id, "type": "directory",}]
 
         swh_storage.revision_add([revision])
         sha1_gits.append(revision.id)
         expected[revision.id] = [{"sha1_git": revision.id, "type": "revision",}]
 
         swh_storage.release_add([release])
         sha1_gits.append(release.id)
         expected[release.id] = [{"sha1_git": release.id, "type": "release",}]
 
         ret = swh_storage.object_find_by_sha1_git(sha1_gits)
 
         assert expected == ret
 
     def test_metadata_fetcher_add_get(self, swh_storage, sample_data):
         fetcher = sample_data.metadata_fetcher
         actual_fetcher = swh_storage.metadata_fetcher_get(fetcher.name, fetcher.version)
         assert actual_fetcher is None  # does not exist
 
         swh_storage.metadata_fetcher_add([fetcher])
 
         res = swh_storage.metadata_fetcher_get(fetcher.name, fetcher.version)
         assert res == fetcher
 
         actual_objects = list(swh_storage.journal_writer.journal.objects)
         expected_objects = [
             ("metadata_fetcher", fetcher),
         ]
 
         for obj in expected_objects:
             assert obj in actual_objects
 
     def test_metadata_fetcher_add_zero(self, swh_storage, sample_data):
         fetcher = sample_data.metadata_fetcher
         actual_fetcher = swh_storage.metadata_fetcher_get(fetcher.name, fetcher.version)
         assert actual_fetcher is None  # does not exist
 
         swh_storage.metadata_fetcher_add([])
 
     def test_metadata_authority_add_get(self, swh_storage, sample_data):
         authority = sample_data.metadata_authority
 
         actual_authority = swh_storage.metadata_authority_get(
             authority.type, authority.url
         )
         assert actual_authority is None  # does not exist
 
         swh_storage.metadata_authority_add([authority])
 
         res = swh_storage.metadata_authority_get(authority.type, authority.url)
         assert res == authority
 
         actual_objects = list(swh_storage.journal_writer.journal.objects)
         expected_objects = [
             ("metadata_authority", authority),
         ]
 
         for obj in expected_objects:
             assert obj in actual_objects
 
     def test_metadata_authority_add_zero(self, swh_storage, sample_data):
         authority = sample_data.metadata_authority
 
         actual_authority = swh_storage.metadata_authority_get(
             authority.type, authority.url
         )
         assert actual_authority is None  # does not exist
 
         swh_storage.metadata_authority_add([])
 
     def test_content_metadata_add(self, swh_storage, sample_data):
         content = sample_data.content
         fetcher = sample_data.metadata_fetcher
         authority = sample_data.metadata_authority
         content_metadata = sample_data.content_metadata[:2]
 
         content_swhid = SWHID(
             object_type="content", object_id=hash_to_bytes(content.sha1_git)
         )
 
         swh_storage.metadata_fetcher_add([fetcher])
         swh_storage.metadata_authority_add([authority])
 
         swh_storage.raw_extrinsic_metadata_add(content_metadata)
 
         result = swh_storage.raw_extrinsic_metadata_get(
             MetadataTargetType.CONTENT, content_swhid, authority
         )
         assert result.next_page_token is None
         assert list(sorted(result.results, key=lambda x: x.discovery_date,)) == list(
             content_metadata
         )
 
         actual_objects = list(swh_storage.journal_writer.journal.objects)
         expected_objects = [
             ("metadata_authority", authority),
             ("metadata_fetcher", fetcher),
         ] + [("raw_extrinsic_metadata", item) for item in content_metadata]
 
         for obj in expected_objects:
             assert obj in actual_objects
 
     def test_content_metadata_add_duplicate(self, swh_storage, sample_data):
         """Duplicates should be silently updated."""
         content = sample_data.content
         fetcher = sample_data.metadata_fetcher
         authority = sample_data.metadata_authority
         content_metadata, content_metadata2 = sample_data.content_metadata[:2]
         content_swhid = SWHID(
             object_type="content", object_id=hash_to_bytes(content.sha1_git)
         )
 
         new_content_metadata2 = attr.evolve(
             content_metadata2, format="new-format", metadata=b"new-metadata",
         )
 
         swh_storage.metadata_fetcher_add([fetcher])
         swh_storage.metadata_authority_add([authority])
 
         swh_storage.raw_extrinsic_metadata_add([content_metadata, content_metadata2])
         swh_storage.raw_extrinsic_metadata_add([new_content_metadata2])
 
         result = swh_storage.raw_extrinsic_metadata_get(
             MetadataTargetType.CONTENT, content_swhid, authority
         )
         assert result.next_page_token is None
 
         expected_results1 = (content_metadata, new_content_metadata2)
         expected_results2 = (content_metadata, content_metadata2)
 
         assert tuple(sorted(result.results, key=lambda x: x.discovery_date,)) in (
             expected_results1,  # cassandra
             expected_results2,  # postgresql
         )
 
     def test_content_metadata_get(self, swh_storage, sample_data):
         content, content2 = sample_data.contents[:2]
         fetcher, fetcher2 = sample_data.fetchers[:2]
         authority, authority2 = sample_data.authorities[:2]
         (
             content1_metadata1,
             content1_metadata2,
             content1_metadata3,
         ) = sample_data.content_metadata[:3]
 
         content1_swhid = SWHID(object_type="content", object_id=content.sha1_git)
         content2_swhid = SWHID(object_type="content", object_id=content2.sha1_git)
         content2_metadata = attr.evolve(content1_metadata2, id=content2_swhid)
 
         swh_storage.metadata_authority_add([authority, authority2])
         swh_storage.metadata_fetcher_add([fetcher, fetcher2])
 
         swh_storage.raw_extrinsic_metadata_add(
             [
                 content1_metadata1,
                 content1_metadata2,
                 content1_metadata3,
                 content2_metadata,
             ]
         )
 
         result = swh_storage.raw_extrinsic_metadata_get(
             MetadataTargetType.CONTENT, content1_swhid, authority
         )
         assert result.next_page_token is None
         assert [content1_metadata1, content1_metadata2] == list(
             sorted(result.results, key=lambda x: x.discovery_date,)
         )
 
         result = swh_storage.raw_extrinsic_metadata_get(
             MetadataTargetType.CONTENT, content1_swhid, authority2
         )
         assert result.next_page_token is None
         assert [content1_metadata3] == list(
             sorted(result.results, key=lambda x: x.discovery_date,)
         )
 
         result = swh_storage.raw_extrinsic_metadata_get(
             MetadataTargetType.CONTENT, content2_swhid, authority
         )
         assert result.next_page_token is None
         assert [content2_metadata] == list(result.results,)
 
     def test_content_metadata_get_after(self, swh_storage, sample_data):
         content = sample_data.content
         fetcher = sample_data.metadata_fetcher
         authority = sample_data.metadata_authority
         content_metadata, content_metadata2 = sample_data.content_metadata[:2]
 
         content_swhid = SWHID(object_type="content", object_id=content.sha1_git)
 
         swh_storage.metadata_fetcher_add([fetcher])
         swh_storage.metadata_authority_add([authority])
 
         swh_storage.raw_extrinsic_metadata_add([content_metadata, content_metadata2])
 
         result = swh_storage.raw_extrinsic_metadata_get(
             MetadataTargetType.CONTENT,
             content_swhid,
             authority,
             after=content_metadata.discovery_date - timedelta(seconds=1),
         )
         assert result.next_page_token is None
         assert [content_metadata, content_metadata2] == list(
             sorted(result.results, key=lambda x: x.discovery_date,)
         )
 
         result = swh_storage.raw_extrinsic_metadata_get(
             MetadataTargetType.CONTENT,
             content_swhid,
             authority,
             after=content_metadata.discovery_date,
         )
         assert result.next_page_token is None
         assert result.results == [content_metadata2]
 
         result = swh_storage.raw_extrinsic_metadata_get(
             MetadataTargetType.CONTENT,
             content_swhid,
             authority,
             after=content_metadata2.discovery_date,
         )
         assert result.next_page_token is None
         assert result.results == []
 
     def test_content_metadata_get_paginate(self, swh_storage, sample_data):
         content = sample_data.content
         fetcher = sample_data.metadata_fetcher
         authority = sample_data.metadata_authority
         content_metadata, content_metadata2 = sample_data.content_metadata[:2]
 
         content_swhid = SWHID(object_type="content", object_id=content.sha1_git)
 
         swh_storage.metadata_fetcher_add([fetcher])
         swh_storage.metadata_authority_add([authority])
         swh_storage.raw_extrinsic_metadata_add([content_metadata, content_metadata2])
         swh_storage.raw_extrinsic_metadata_get(
             MetadataTargetType.CONTENT, content_swhid, authority
         )
 
         result = swh_storage.raw_extrinsic_metadata_get(
             MetadataTargetType.CONTENT, content_swhid, authority, limit=1
         )
         assert result.next_page_token is not None
         assert result.results == [content_metadata]
 
         result = swh_storage.raw_extrinsic_metadata_get(
             MetadataTargetType.CONTENT,
             content_swhid,
             authority,
             limit=1,
             page_token=result.next_page_token,
         )
         assert result.next_page_token is None
         assert result.results == [content_metadata2]
 
     def test_content_metadata_get_paginate_same_date(self, swh_storage, sample_data):
         content = sample_data.content
         fetcher1, fetcher2 = sample_data.fetchers[:2]
         authority = sample_data.metadata_authority
         content_metadata, content_metadata2 = sample_data.content_metadata[:2]
 
         content_swhid = SWHID(object_type="content", object_id=content.sha1_git)
 
         swh_storage.metadata_fetcher_add([fetcher1, fetcher2])
         swh_storage.metadata_authority_add([authority])
 
         new_content_metadata2 = attr.evolve(
             content_metadata2,
             discovery_date=content_metadata2.discovery_date,
             fetcher=attr.evolve(fetcher2, metadata=None),
         )
 
         swh_storage.raw_extrinsic_metadata_add(
             [content_metadata, new_content_metadata2]
         )
 
         result = swh_storage.raw_extrinsic_metadata_get(
             MetadataTargetType.CONTENT, content_swhid, authority, limit=1
         )
         assert result.next_page_token is not None
         assert result.results == [content_metadata]
 
         result = swh_storage.raw_extrinsic_metadata_get(
             MetadataTargetType.CONTENT,
             content_swhid,
             authority,
             limit=1,
             page_token=result.next_page_token,
         )
         assert result.next_page_token is None
         assert result.results == [new_content_metadata2]
 
     def test_content_metadata_get__invalid_id(self, swh_storage, sample_data):
         origin = sample_data.origin
         fetcher = sample_data.metadata_fetcher
         authority = sample_data.metadata_authority
         content_metadata, content_metadata2 = sample_data.content_metadata[:2]
 
         swh_storage.metadata_fetcher_add([fetcher])
         swh_storage.metadata_authority_add([authority])
         swh_storage.raw_extrinsic_metadata_add([content_metadata, content_metadata2])
 
         with pytest.raises(StorageArgumentException, match="SWHID"):
             swh_storage.raw_extrinsic_metadata_get(
                 MetadataTargetType.CONTENT, origin.url, authority
             )
 
     def test_origin_metadata_add(self, swh_storage, sample_data):
         origin = sample_data.origin
         fetcher = sample_data.metadata_fetcher
         authority = sample_data.metadata_authority
         origin_metadata, origin_metadata2 = sample_data.origin_metadata[:2]
 
         assert swh_storage.origin_add([origin]) == {"origin:add": 1}
 
         swh_storage.metadata_fetcher_add([fetcher])
         swh_storage.metadata_authority_add([authority])
 
         swh_storage.raw_extrinsic_metadata_add([origin_metadata, origin_metadata2])
 
         result = swh_storage.raw_extrinsic_metadata_get(
             MetadataTargetType.ORIGIN, origin.url, authority
         )
         assert result.next_page_token is None
         assert list(sorted(result.results, key=lambda x: x.discovery_date)) == [
             origin_metadata,
             origin_metadata2,
         ]
 
         actual_objects = list(swh_storage.journal_writer.journal.objects)
         expected_objects = [
             ("metadata_authority", authority),
             ("metadata_fetcher", fetcher),
             ("raw_extrinsic_metadata", origin_metadata),
             ("raw_extrinsic_metadata", origin_metadata2),
         ]
 
         for obj in expected_objects:
             assert obj in actual_objects
 
     def test_origin_metadata_add_duplicate(self, swh_storage, sample_data):
         """Duplicates should be silently updated."""
         origin = sample_data.origin
         fetcher = sample_data.metadata_fetcher
         authority = sample_data.metadata_authority
         origin_metadata, origin_metadata2 = sample_data.origin_metadata[:2]
         assert swh_storage.origin_add([origin]) == {"origin:add": 1}
 
         new_origin_metadata2 = attr.evolve(
             origin_metadata2, format="new-format", metadata=b"new-metadata",
         )
 
         swh_storage.metadata_fetcher_add([fetcher])
         swh_storage.metadata_authority_add([authority])
 
         swh_storage.raw_extrinsic_metadata_add([origin_metadata, origin_metadata2])
         swh_storage.raw_extrinsic_metadata_add([new_origin_metadata2])
 
         result = swh_storage.raw_extrinsic_metadata_get(
             MetadataTargetType.ORIGIN, origin.url, authority
         )
         assert result.next_page_token is None
 
         # which of the two behavior happens is backend-specific.
         expected_results1 = (origin_metadata, new_origin_metadata2)
         expected_results2 = (origin_metadata, origin_metadata2)
 
         assert tuple(sorted(result.results, key=lambda x: x.discovery_date,)) in (
             expected_results1,  # cassandra
             expected_results2,  # postgresql
         )
 
     def test_origin_metadata_get(self, swh_storage, sample_data):
         origin, origin2 = sample_data.origins[:2]
         fetcher, fetcher2 = sample_data.fetchers[:2]
         authority, authority2 = sample_data.authorities[:2]
         (
             origin1_metadata1,
             origin1_metadata2,
             origin1_metadata3,
         ) = sample_data.origin_metadata[:3]
 
         assert swh_storage.origin_add([origin, origin2]) == {"origin:add": 2}
 
         origin2_metadata = attr.evolve(origin1_metadata2, id=origin2.url)
 
         swh_storage.metadata_authority_add([authority, authority2])
         swh_storage.metadata_fetcher_add([fetcher, fetcher2])
 
         swh_storage.raw_extrinsic_metadata_add(
             [origin1_metadata1, origin1_metadata2, origin1_metadata3, origin2_metadata]
         )
 
         result = swh_storage.raw_extrinsic_metadata_get(
             MetadataTargetType.ORIGIN, origin.url, authority
         )
         assert result.next_page_token is None
         assert [origin1_metadata1, origin1_metadata2] == list(
             sorted(result.results, key=lambda x: x.discovery_date,)
         )
 
         result = swh_storage.raw_extrinsic_metadata_get(
             MetadataTargetType.ORIGIN, origin.url, authority2
         )
         assert result.next_page_token is None
         assert [origin1_metadata3] == list(
             sorted(result.results, key=lambda x: x.discovery_date,)
         )
 
         result = swh_storage.raw_extrinsic_metadata_get(
             MetadataTargetType.ORIGIN, origin2.url, authority
         )
         assert result.next_page_token is None
         assert [origin2_metadata] == list(result.results,)
 
     def test_origin_metadata_get_after(self, swh_storage, sample_data):
         origin = sample_data.origin
         fetcher = sample_data.metadata_fetcher
         authority = sample_data.metadata_authority
         origin_metadata, origin_metadata2 = sample_data.origin_metadata[:2]
 
         assert swh_storage.origin_add([origin]) == {"origin:add": 1}
 
         swh_storage.metadata_fetcher_add([fetcher])
         swh_storage.metadata_authority_add([authority])
         swh_storage.raw_extrinsic_metadata_add([origin_metadata, origin_metadata2])
 
         result = swh_storage.raw_extrinsic_metadata_get(
             MetadataTargetType.ORIGIN,
             origin.url,
             authority,
             after=origin_metadata.discovery_date - timedelta(seconds=1),
         )
         assert result.next_page_token is None
         assert list(sorted(result.results, key=lambda x: x.discovery_date,)) == [
             origin_metadata,
             origin_metadata2,
         ]
 
         result = swh_storage.raw_extrinsic_metadata_get(
             MetadataTargetType.ORIGIN,
             origin.url,
             authority,
             after=origin_metadata.discovery_date,
         )
         assert result.next_page_token is None
         assert result.results == [origin_metadata2]
 
         result = swh_storage.raw_extrinsic_metadata_get(
             MetadataTargetType.ORIGIN,
             origin.url,
             authority,
             after=origin_metadata2.discovery_date,
         )
         assert result.next_page_token is None
         assert result.results == []
 
     def test_origin_metadata_get_paginate(self, swh_storage, sample_data):
         origin = sample_data.origin
         fetcher = sample_data.metadata_fetcher
         authority = sample_data.metadata_authority
         origin_metadata, origin_metadata2 = sample_data.origin_metadata[:2]
         assert swh_storage.origin_add([origin]) == {"origin:add": 1}
 
         swh_storage.metadata_fetcher_add([fetcher])
         swh_storage.metadata_authority_add([authority])
 
         swh_storage.raw_extrinsic_metadata_add([origin_metadata, origin_metadata2])
 
         swh_storage.raw_extrinsic_metadata_get(
             MetadataTargetType.ORIGIN, origin.url, authority
         )
 
         result = swh_storage.raw_extrinsic_metadata_get(
             MetadataTargetType.ORIGIN, origin.url, authority, limit=1
         )
         assert result.next_page_token is not None
         assert result.results == [origin_metadata]
 
         result = swh_storage.raw_extrinsic_metadata_get(
             MetadataTargetType.ORIGIN,
             origin.url,
             authority,
             limit=1,
             page_token=result.next_page_token,
         )
         assert result.next_page_token is None
         assert result.results == [origin_metadata2]
 
     def test_origin_metadata_get_paginate_same_date(self, swh_storage, sample_data):
         origin = sample_data.origin
         fetcher1, fetcher2 = sample_data.fetchers[:2]
         authority = sample_data.metadata_authority
         origin_metadata, origin_metadata2 = sample_data.origin_metadata[:2]
         assert swh_storage.origin_add([origin]) == {"origin:add": 1}
 
         swh_storage.metadata_fetcher_add([fetcher1, fetcher2])
         swh_storage.metadata_authority_add([authority])
 
         new_origin_metadata2 = attr.evolve(
             origin_metadata2,
             discovery_date=origin_metadata2.discovery_date,
             fetcher=attr.evolve(fetcher2, metadata=None),
         )
 
         swh_storage.raw_extrinsic_metadata_add([origin_metadata, new_origin_metadata2])
 
         result = swh_storage.raw_extrinsic_metadata_get(
             MetadataTargetType.ORIGIN, origin.url, authority, limit=1
         )
         assert result.next_page_token is not None
         assert result.results == [origin_metadata]
 
         result = swh_storage.raw_extrinsic_metadata_get(
             MetadataTargetType.ORIGIN,
             origin.url,
             authority,
             limit=1,
             page_token=result.next_page_token,
         )
         assert result.next_page_token is None
         assert result.results == [new_origin_metadata2]
 
     def test_origin_metadata_add_missing_authority(self, swh_storage, sample_data):
         origin = sample_data.origin
         fetcher = sample_data.metadata_fetcher
         origin_metadata, origin_metadata2 = sample_data.origin_metadata[:2]
         assert swh_storage.origin_add([origin]) == {"origin:add": 1}
 
         swh_storage.metadata_fetcher_add([fetcher])
 
         with pytest.raises(StorageArgumentException, match="authority"):
             swh_storage.raw_extrinsic_metadata_add([origin_metadata, origin_metadata2])
 
     def test_origin_metadata_add_missing_fetcher(self, swh_storage, sample_data):
         origin = sample_data.origin
         authority = sample_data.metadata_authority
         origin_metadata, origin_metadata2 = sample_data.origin_metadata[:2]
         assert swh_storage.origin_add([origin]) == {"origin:add": 1}
 
         swh_storage.metadata_authority_add([authority])
 
         with pytest.raises(StorageArgumentException, match="fetcher"):
             swh_storage.raw_extrinsic_metadata_add([origin_metadata, origin_metadata2])
 
     def test_origin_metadata_get__invalid_id_type(self, swh_storage, sample_data):
         origin = sample_data.origin
         authority = sample_data.metadata_authority
         fetcher = sample_data.metadata_fetcher
         origin_metadata, origin_metadata2 = sample_data.origin_metadata[:2]
         content_metadata = sample_data.content_metadata[0]
         assert swh_storage.origin_add([origin]) == {"origin:add": 1}
 
         swh_storage.metadata_fetcher_add([fetcher])
         swh_storage.metadata_authority_add([authority])
 
         swh_storage.raw_extrinsic_metadata_add([origin_metadata, origin_metadata2])
 
         with pytest.raises(StorageArgumentException, match="SWHID"):
             swh_storage.raw_extrinsic_metadata_get(
                 MetadataTargetType.ORIGIN, content_metadata.id, authority,
             )
 
 
 class TestStorageGeneratedData:
     def test_generate_content_get_data(self, swh_storage, swh_contents):
         contents_with_data = [c for c in swh_contents if c.status != "absent"]
 
         # retrieve contents
         for content in contents_with_data:
             actual_content_data = swh_storage.content_get_data(content.sha1)
             assert actual_content_data is not None
             assert actual_content_data == content.data
 
     def test_generate_content_get(self, swh_storage, swh_contents):
         expected_contents = [
             attr.evolve(c, data=None) for c in swh_contents if c.status != "absent"
         ]
 
         actual_contents = swh_storage.content_get([c.sha1 for c in expected_contents])
 
         assert len(actual_contents) == len(expected_contents)
         assert actual_contents == expected_contents
 
     @pytest.mark.parametrize("limit", [1, 7, 10, 100, 1000])
     def test_origin_list(self, swh_storage, swh_origins, limit):
         returned_origins = []
 
         page_token = None
         i = 0
         while True:
             actual_page = swh_storage.origin_list(page_token=page_token, limit=limit)
             assert len(actual_page.results) <= limit
 
             returned_origins.extend(actual_page.results)
 
             i += 1
             page_token = actual_page.next_page_token
 
             if page_token is None:
                 assert i * limit >= len(swh_origins)
                 break
             else:
                 assert len(actual_page.results) == limit
 
         assert sorted(returned_origins) == sorted(swh_origins)
 
     def test_origin_count(self, swh_storage, sample_data):
         swh_storage.origin_add(sample_data.origins)
 
         assert swh_storage.origin_count("github") == 3
         assert swh_storage.origin_count("gitlab") == 2
         assert swh_storage.origin_count(".*user.*", regexp=True) == 5
         assert swh_storage.origin_count(".*user.*", regexp=False) == 0
         assert swh_storage.origin_count(".*user1.*", regexp=True) == 2
         assert swh_storage.origin_count(".*user1.*", regexp=False) == 0
 
     def test_origin_count_with_visit_no_visits(self, swh_storage, sample_data):
         swh_storage.origin_add(sample_data.origins)
 
         # none of them have visits, so with_visit=True => 0
         assert swh_storage.origin_count("github", with_visit=True) == 0
         assert swh_storage.origin_count("gitlab", with_visit=True) == 0
         assert swh_storage.origin_count(".*user.*", regexp=True, with_visit=True) == 0
         assert swh_storage.origin_count(".*user.*", regexp=False, with_visit=True) == 0
         assert swh_storage.origin_count(".*user1.*", regexp=True, with_visit=True) == 0
         assert swh_storage.origin_count(".*user1.*", regexp=False, with_visit=True) == 0
 
     def test_origin_count_with_visit_with_visits_no_snapshot(
         self, swh_storage, sample_data
     ):
         swh_storage.origin_add(sample_data.origins)
 
         origin_url = "https://github.com/user1/repo1"
         visit = OriginVisit(origin=origin_url, date=now(), type="git",)
         swh_storage.origin_visit_add([visit])
 
         assert swh_storage.origin_count("github", with_visit=False) == 3
         # it has a visit, but no snapshot, so with_visit=True => 0
         assert swh_storage.origin_count("github", with_visit=True) == 0
 
         assert swh_storage.origin_count("gitlab", with_visit=False) == 2
         # these gitlab origins have no visit
         assert swh_storage.origin_count("gitlab", with_visit=True) == 0
 
         assert (
             swh_storage.origin_count("github.*user1", regexp=True, with_visit=False)
             == 1
         )
         assert (
             swh_storage.origin_count("github.*user1", regexp=True, with_visit=True) == 0
         )
         assert swh_storage.origin_count("github", regexp=True, with_visit=True) == 0
 
     def test_origin_count_with_visit_with_visits_and_snapshot(
         self, swh_storage, sample_data
     ):
         snapshot = sample_data.snapshot
         swh_storage.origin_add(sample_data.origins)
 
         swh_storage.snapshot_add([snapshot])
         origin_url = "https://github.com/user1/repo1"
         visit = OriginVisit(origin=origin_url, date=now(), type="git",)
         visit = swh_storage.origin_visit_add([visit])[0]
         swh_storage.origin_visit_status_add(
             [
                 OriginVisitStatus(
                     origin=origin_url,
                     visit=visit.visit,
                     date=now(),
                     status="ongoing",
                     snapshot=snapshot.id,
                 )
             ]
         )
 
         assert swh_storage.origin_count("github", with_visit=False) == 3
         # github/user1 has a visit and a snapshot, so with_visit=True => 1
         assert swh_storage.origin_count("github", with_visit=True) == 1
 
         assert (
             swh_storage.origin_count("github.*user1", regexp=True, with_visit=False)
             == 1
         )
         assert (
             swh_storage.origin_count("github.*user1", regexp=True, with_visit=True) == 1
         )
         assert swh_storage.origin_count("github", regexp=True, with_visit=True) == 1
 
     @settings(suppress_health_check=[HealthCheck.too_slow])
     @given(strategies.lists(objects(split_content=True), max_size=2))
     def test_add_arbitrary(self, swh_storage, objects):
         for (obj_type, obj) in objects:
             if obj.object_type == "origin_visit":
                 swh_storage.origin_add([Origin(url=obj.origin)])
                 visit = OriginVisit(origin=obj.origin, date=obj.date, type=obj.type,)
                 swh_storage.origin_visit_add([visit])
             else:
                 method = getattr(swh_storage, obj_type + "_add")
                 try:
                     method([obj])
                 except HashCollision:
                     pass